blob: 693500e40c7aa2c0b038ed53e9228f95fa87859c [file] [log] [blame]
Marc Jones8ae8c882007-12-19 01:32:08 +00001/*
Stefan Reinauer7e61e452008-01-18 10:35:56 +00002 * This file is part of the coreboot project.
Marc Jones8ae8c882007-12-19 01:32:08 +00003 *
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00004 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
Marc Jones8ae8c882007-12-19 01:32:08 +00005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20/* Description: Main memory controller system configuration for DDR 2 */
21
22
23/* KNOWN ISSUES - ERRATA
24 *
25 * Trtp is not calculated correctly when the controller is in 64-bit mode, it
26 * is 1 busclock off. No fix planned. The controller is not ordinarily in
27 * 64-bit mode.
28 *
29 * 32 Byte burst not supported. No fix planned. The controller is not
30 * ordinarily in 64-bit mode.
31 *
32 * Trc precision does not use extra Jedec defined fractional component.
33 * InsteadTrc (course) is rounded up to nearest 1 ns.
34 *
35 * Mini and Micro DIMM not supported. Only RDIMM, UDIMM, SO-DIMM defined types
36 * supported.
37 */
38
39static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
40 struct DCTStatStruc *pDCTstatA);
41static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
42 struct DCTStatStruc *pDCTstatA);
43static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
44 struct DCTStatStruc *pDCTstatA);
45static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
46 struct DCTStatStruc *pDCTstatA);
47static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
48 struct DCTStatStruc *pDCTstatA);
49static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
50 struct DCTStatStruc *pDCTstatA);
51static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
52 struct DCTStatStruc *pDCTstat);
53static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
54 struct DCTStatStruc *pDCTstat);
55static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
56 struct DCTStatStruc *pDCTstatA);
57static u8 NodePresent_D(u8 Node);
58static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
59 struct DCTStatStruc *pDCTstatA);
60static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
61 struct DCTStatStruc *pDCTstat, u8 dct);
62static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
63 struct DCTStatStruc *pDCTstat, u8 dct);
64static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
65 struct DCTStatStruc *pDCTstat, u8 dct);
66static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
67 struct DCTStatStruc *pDCTstat);
68static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
69 struct DCTStatStruc *pDCTstat, u8 dct);
70static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
71 struct DCTStatStruc *pDCTstat, u8 dct);
72static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
73 struct DCTStatStruc *pDCTstat, u8 dct);
74static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
75 struct DCTStatStruc *pDCTstat, u8 dct);
76static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
77 struct DCTStatStruc *pDCTstat, u8 dct);
78static u8 Get_DefTrc_k_D(u8 k);
79static u16 Get_40Tk_D(u8 k);
80static u16 Get_Fk_D(u8 k);
81static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat, u8 i, u8 j, u8 k);
82static u8 Sys_Capability_D(struct MCTStatStruc *pMCTstat,
83 struct DCTStatStruc *pDCTstat, int j, int k);
84static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i);
85static void mct_initDCT(struct MCTStatStruc *pMCTstat,
86 struct DCTStatStruc *pDCTstat);
87static void mct_DramInit(struct MCTStatStruc *pMCTstat,
88 struct DCTStatStruc *pDCTstat, u8 dct);
89static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
90 struct DCTStatStruc *pDCTstat, u8 dct);
91static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat);
92static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
93 struct DCTStatStruc *pDCTstat, u8 dct);
94static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
95 struct DCTStatStruc *pDCTstat, u8 dct);
96static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,\
97 struct DCTStatStruc *pDCTstat, u8 dct);
98static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
99 struct DCTStatStruc *pDCTstat, u8 dct);
100static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
101 struct DCTStatStruc *pDCTstat, u8 dct);
102static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
103 struct DCTStatStruc *pDCTstat, u8 dct);
104static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
105 struct DCTStatStruc *pDCTstat, u8 dct);
106static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
107 struct DCTStatStruc *pDCTstat, u8 dct);
108static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
109 struct DCTStatStruc *pDCTstat, u8 dct);
110static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
111 struct DCTStatStruc *pDCTstat);
112static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
113 u32 dev, u32 index_reg, u32 index);
114static u8 Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
115 u32 dev, u32 index_reg);
116static u8 Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
117 u32 dev, u32 index_reg);
118static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
119 u32 dev, u32 index_reg, u32 index);
120static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
121 struct DCTStatStruc *pDCTstat);
122static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat, u8 dct,
123 u32 dev, u32 index_reg, u32 index);
124static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat,
125 struct DCTStatStruc *pDCTstat);
126static void mct_init(struct MCTStatStruc *pMCTstat,
127 struct DCTStatStruc *pDCTstat);
128static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
129 struct DCTStatStruc *pDCTstat);
130static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
131 struct DCTStatStruc *pDCTstatA);
132static void SetCSTriState(struct MCTStatStruc *pMCTstat,
133 struct DCTStatStruc *pDCTstat, u8 dct);
134static void SetODTTriState(struct MCTStatStruc *pMCTstat,
135 struct DCTStatStruc *pDCTstat, u8 dct);
136static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
137 struct DCTStatStruc *pDCTstat, u8 dct);
138static u32 mct_NodePresent_D(void);
139static void WaitRoutine_D(u32 time);
140static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
141 struct DCTStatStruc *pDCTstatA);
142static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
143 struct DCTStatStruc *pDCTstatA);
144static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
145 struct DCTStatStruc *pDCTstat);
146static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
147 struct DCTStatStruc *pDCTstat);
148void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
149 struct DCTStatStruc *pDCTstat);
150static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
151 struct DCTStatStruc *pDCTstat);
152void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
153 struct DCTStatStruc *pDCTstat);
154static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
155 struct DCTStatStruc *pDCTstatA);
156static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct);
157static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
158 struct DCTStatStruc *pDCTstat, u8 dct);
159
160
161/*See mctAutoInitMCT header for index relationships to CL and T*/
162static const u16 Table_F_k[] = {00,200,266,333,400,533 };
163static const u8 Table_T_k[] = {0x00,0x50,0x3D,0x30,0x25, 0x18 };
164static const u8 Table_CL2_j[] = {0x04,0x08,0x10,0x20,0x40, 0x80 };
165static const u8 Tab_defTrc_k[] = {0x0,0x41,0x3C,0x3C,0x3A, 0x3A };
166static const u16 Tab_40T_k[] = {00,200,150,120,100,75 };
167static const u8 Tab_TrefT_k[] = {00,0,1,1,2,2,3,4,5,6,0,0};
168static const u8 Tab_BankAddr[] = {0x0,0x08,0x09,0x10,0x0C,0x0D,0x11,0x0E,0x15,0x16,0x0F,0x17};
169static const u8 Tab_tCL_j[] = {0,2,3,4,5};
170static const u8 Tab_1KTfawT_k[] = {00,8,10,13,14,20};
171static const u8 Tab_2KTfawT_k[] = {00,10,14,17,18,24};
172static const u8 Tab_L1CLKDis[] = {8,8,6,4,2,0,8,8};
173static const u8 Tab_M2CLKDis[] = {2,0,8,8,2,0,2,0};
174static const u8 Tab_S1CLKDis[] = {8,0,8,8,8,0,8,0};
175static const u8 Table_Comp_Rise_Slew_20x[] = {7, 3, 2, 2, 0xFF};
176static const u8 Table_Comp_Rise_Slew_15x[] = {7, 7, 3, 2, 0xFF};
177static const u8 Table_Comp_Fall_Slew_20x[] = {7, 5, 3, 2, 0xFF};
178static const u8 Table_Comp_Fall_Slew_15x[] = {7, 7, 5, 3, 0xFF};
179
180void mctAutoInitMCT_D(struct MCTStatStruc *pMCTstat,
181 struct DCTStatStruc *pDCTstatA)
182{
183 /*
184 * Memory may be mapped contiguously all the way up to 4GB (depending
185 * on setup options). It is the responsibility of PCI subsystem to
186 * create an uncacheable IO region below 4GB and to adjust TOP_MEM
187 * downward prior to any IO mapping or accesses. It is the same
188 * responsibility of the CPU sub-system prior toaccessing LAPIC.
189 *
190 * Slot Number is an external convention, and is determined by OEM with
191 * accompanying silk screening. OEM may choose to use Slot number
192 * convention which is consistent with DIMM number conventions.
193 * All AMD engineering
194 * platforms do.
195 *
196 * Run-Time Requirements:
197 * 1. Complete Hypertransport Bus Configuration
198 * 2. SMBus Controller Initialized
199 * 3. Checksummed or Valid NVRAM bits
200 * 4. MCG_CTL=-1, MC4_CTL_EN=0 for all CPUs
201 * 5. MCi_STS from shutdown/warm reset recorded (if desired) prior to
202 * entry
203 * 6. All var MTRRs reset to zero
204 * 7. State of NB_CFG.DisDatMsk set properly on all CPUs
205 * 8. All CPUs at 2Ghz Speed (unless DQS training is not installed).
206 * 9. All cHT links at max Speed/Width (unless DQS training is not
207 * installed).
208 *
209 *
210 * Global relationship between index values and item values:
211 * j CL(j) k F(k)
212 * --------------------------
213 * 0 2.0 - -
214 * 1 3.0 1 200 Mhz
215 * 2 4.0 2 266 Mhz
216 * 3 5.0 3 333 Mhz
217 * 4 6.0 4 400 Mhz
218 * 5 7.0 5 533 Mhz
219 */
220 u8 Node, NodesWmem;
221 u32 node_sys_base;
222
223restartinit:
224 mctInitMemGPIOs_A_D(); /* Set any required GPIOs*/
225 NodesWmem = 0;
226 node_sys_base = 0;
227 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
228 struct DCTStatStruc *pDCTstat;
229 pDCTstat = pDCTstatA + Node;
230 pDCTstat->Node_ID = Node;
231 pDCTstat->dev_host = PA_HOST(Node);
232 pDCTstat->dev_map = PA_MAP(Node);
233 pDCTstat->dev_dct = PA_DCT(Node);
234 pDCTstat->dev_nbmisc = PA_NBMISC(Node);
235 pDCTstat->NodeSysBase = node_sys_base;
236
237 print_tx("mctAutoInitMCT_D: mct_init Node ", Node);
238 mct_init(pMCTstat, pDCTstat);
239 mctNodeIDDebugPort_D();
240 pDCTstat->NodePresent = NodePresent_D(Node);
241 if (pDCTstat->NodePresent) { /* See if Node is there*/
242 print_t("mctAutoInitMCT_D: clear_legacy_Mode\n");
243 clear_legacy_Mode(pMCTstat, pDCTstat);
244 pDCTstat->LogicalCPUID = mctGetLogicalCPUID_D(Node);
245
246 print_t("mctAutoInitMCT_D: mct_InitialMCT_D\n");
247 mct_InitialMCT_D(pMCTstat, pDCTstat);
248
249 print_t("mctAutoInitMCT_D: mctSMBhub_Init\n");
250 mctSMBhub_Init(Node); /* Switch SMBUS crossbar to proper node*/
251
252 print_t("mctAutoInitMCT_D: mct_initDCT\n");
253 mct_initDCT(pMCTstat, pDCTstat);
254 if (pDCTstat->ErrCode == SC_FatalErr) {
255 goto fatalexit; /* any fatal errors?*/
256 } else if (pDCTstat->ErrCode < SC_StopError) {
257 NodesWmem++;
258 }
259 } /* if Node present */
260 node_sys_base = pDCTstat->NodeSysBase;
261 node_sys_base += (pDCTstat->NodeSysLimit + 2) & ~0x0F;
262 }
263 if (NodesWmem == 0) {
264 print_debug("No Nodes?!\n");
265 goto fatalexit;
266 }
267
268 print_t("mctAutoInitMCT_D: SyncDCTsReady_D\n");
269 SyncDCTsReady_D(pMCTstat, pDCTstatA); /* Make sure DCTs are ready for accesses.*/
270
271 print_t("mctAutoInitMCT_D: HTMemMapInit_D\n");
272 HTMemMapInit_D(pMCTstat, pDCTstatA); /* Map local memory into system address space.*/
273 mctHookAfterHTMap();
274
275 print_t("mctAutoInitMCT_D: CPUMemTyping_D\n");
276 CPUMemTyping_D(pMCTstat, pDCTstatA); /* Map dram into WB/UC CPU cacheability */
277 mctHookAfterCPU(); /* Setup external northbridge(s) */
278
279 print_t("mctAutoInitMCT_D: DQSTiming_D\n");
280 DQSTiming_D(pMCTstat, pDCTstatA); /* Get Receiver Enable and DQS signal timing*/
281
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000282 print_t("mctAutoInitMCT_D: UMAMemTyping_D\n");
283 UMAMemTyping_D(pMCTstat, pDCTstatA); /* Fix up for UMA sizing */
284
Marc Jones8ae8c882007-12-19 01:32:08 +0000285 print_t("mctAutoInitMCT_D: :OtherTiming\n");
286 mct_OtherTiming(pMCTstat, pDCTstatA);
287
288 if (ReconfigureDIMMspare_D(pMCTstat, pDCTstatA)) { /* RESET# if 1st pass of DIMM spare enabled*/
289 goto restartinit;
290 }
291
292 InterleaveNodes_D(pMCTstat, pDCTstatA);
293 InterleaveChannels_D(pMCTstat, pDCTstatA);
294
295 print_t("mctAutoInitMCT_D: ECCInit_D\n");
296 if (ECCInit_D(pMCTstat, pDCTstatA)) { /* Setup ECC control and ECC check-bits*/
297 print_t("mctAutoInitMCT_D: MCTMemClr_D\n");
298 MCTMemClr_D(pMCTstat,pDCTstatA);
299 }
300
301 mct_FinalMCT_D(pMCTstat, (pDCTstatA + 0) ); // Node 0
302 print_t("All Done\n");
303 return;
304
305fatalexit:
306 die("mct_d: fatalexit");
307}
308
309
310static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
311 struct DCTStatStruc *pDCTstatA)
312{
313 u8 ret;
314
315 if (mctGet_NVbits(NV_CS_SpareCTL)) {
316 if (MCT_DIMM_SPARE_NO_WARM) {
317 /* Do no warm-reset DIMM spare */
318 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
319 LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA);
320 ret = 0;
321 } else {
322 mct_ResetDataStruct_D(pMCTstat, pDCTstatA);
323 pMCTstat->GStatus |= 1 << GSB_EnDIMMSpareNW;
324 ret = 1;
325 }
326 } else {
327 /* Do warm-reset DIMM spare */
328 if (mctGet_NVbits(NV_DQSTrainCTL))
329 mctWarmReset_D();
330 ret = 0;
331 }
332
333
334 } else {
335 ret = 0;
336 }
337
338 return ret;
339}
340
341
342static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
343 struct DCTStatStruc *pDCTstatA)
344{
345 u8 nv_DQSTrainCTL;
346
347 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
348 return;
349 }
350 nv_DQSTrainCTL = mctGet_NVbits(NV_DQSTrainCTL);
351 /* FIXME: BOZO- DQS training every time*/
352 nv_DQSTrainCTL = 1;
353
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000354 print_t("DQSTiming_D: mct_BeforeDQSTrain_D:\n");
355 mct_BeforeDQSTrain_D(pMCTstat, pDCTstatA);;
356 phyAssistedMemFnceTraining(pMCTstat, pDCTstatA);
357
Marc Jones8ae8c882007-12-19 01:32:08 +0000358 if (nv_DQSTrainCTL) {
Marc Jones8ae8c882007-12-19 01:32:08 +0000359 mctHookBeforeAnyTraining();
360
361 print_t("DQSTiming_D: TrainReceiverEn_D FirstPass:\n");
362 TrainReceiverEn_D(pMCTstat, pDCTstatA, FirstPass);
363
364 print_t("DQSTiming_D: mct_TrainDQSPos_D\n");
365 mct_TrainDQSPos_D(pMCTstat, pDCTstatA);
366
367 // Second Pass never used for Barcelona!
368 //print_t("DQSTiming_D: TrainReceiverEn_D SecondPass:\n");
369 //TrainReceiverEn_D(pMCTstat, pDCTstatA, SecondPass);
370
371 print_t("DQSTiming_D: mctSetEccDQSRcvrEn_D\n");
372 mctSetEccDQSRcvrEn_D(pMCTstat, pDCTstatA);
373
374 print_t("DQSTiming_D: TrainMaxReadLatency_D\n");
375//FIXME - currently uses calculated value TrainMaxReadLatency_D(pMCTstat, pDCTstatA);
376 mctHookAfterAnyTraining();
377 mctSaveDQSSigTmg_D();
378
379 print_t("DQSTiming_D: mct_EndDQSTraining_D\n");
380 mct_EndDQSTraining_D(pMCTstat, pDCTstatA);
381
382 print_t("DQSTiming_D: MCTMemClr_D\n");
383 MCTMemClr_D(pMCTstat, pDCTstatA);
384 } else {
385 mctGetDQSSigTmg_D(); /* get values into data structure */
386 LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA); /* load values into registers.*/
387 //mctDoWarmResetMemClr_D();
388 MCTMemClr_D(pMCTstat, pDCTstatA);
389 }
390}
391
392
393static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
394 struct DCTStatStruc *pDCTstatA)
395{
396 u8 Node, Receiver, Channel, Dir, DIMM;
397 u32 dev;
398 u32 index_reg;
399 u32 reg;
400 u32 index;
401 u32 val;
402
403
404 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
405 struct DCTStatStruc *pDCTstat;
406 pDCTstat = pDCTstatA + Node;
407
408 if (pDCTstat->DCTSysLimit) {
409 dev = pDCTstat->dev_dct;
410 for (Channel = 0;Channel < 2; Channel++) {
411 /* there are four receiver pairs,
412 loosely associated with chipselects.*/
413 index_reg = 0x98 + Channel * 0x100;
414 for (Receiver = 0; Receiver < 8; Receiver += 2) {
415 /* Set Receiver Enable Values */
416 mct_SetRcvrEnDly_D(pDCTstat,
417 0, /* RcvrEnDly */
418 1, /* FinalValue, From stack */
419 Channel,
420 Receiver,
421 dev, index_reg,
422 (Receiver >> 1) * 3 + 0x10, /* Addl_Index */
423 2); /* Pass Second Pass ? */
424
425 }
426 }
427 for (Channel = 0; Channel<2; Channel++) {
428 SetEccDQSRcvrEn_D(pDCTstat, Channel);
429 }
430
431 for (Channel = 0; Channel < 2; Channel++) {
432 u8 *p;
433 index_reg = 0x98 + Channel * 0x100;
434
435 /* NOTE:
436 * when 400, 533, 667, it will support dimm0/1/2/3,
437 * and set conf for dimm0, hw will copy to dimm1/2/3
438 * set for dimm1, hw will copy to dimm3
439 * Rev A/B only support DIMM0/1 when 800Mhz and above
440 * + 0x100 to next dimm
441 * Rev C support DIMM0/1/2/3 when 800Mhz and above
442 * + 0x100 to next dimm
443 */
444 for (DIMM = 0; DIMM < 2; DIMM++) {
445 if (DIMM==0) {
446 index = 0; /* CHA Write Data Timing Low */
447 } else {
448 if (pDCTstat->Speed >= 4) {
449 index = 0x100 * DIMM;
450 } else {
451 break;
452 }
453 }
454 for (Dir=0;Dir<2;Dir++) {//RD/WR
455 p = pDCTstat->CH_D_DIR_B_DQS[Channel][DIMM][Dir];
456 val = stream_to_int(p); /* CHA Read Data Timing High */
457 Set_NB32_index_wait(dev, index_reg, index+1, val);
458 val = stream_to_int(p+4); /* CHA Write Data Timing High */
459 Set_NB32_index_wait(dev, index_reg, index+2, val);
460 val = *(p+8); /* CHA Write ECC Timing */
461 Set_NB32_index_wait(dev, index_reg, index+3, val);
462 index += 4;
463 }
464 }
465 }
466
467 for (Channel = 0; Channel<2; Channel++) {
468 reg = 0x78 + Channel * 0x100;
469 val = Get_NB32(dev, reg);
470 val &= ~(0x3ff<<22);
471 val |= ((u32) pDCTstat->CH_MaxRdLat[Channel] << 22);
472 val &= ~(1<<DqsRcvEnTrain);
473 Set_NB32(dev, reg, val); /* program MaxRdLatency to correspond with current delay*/
474 }
475 }
476 }
477}
478
479
480static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
481 struct DCTStatStruc *pDCTstatA)
482{
483 /* Clear MC4_STS for all Nodes in the system. This is required in some
484 * circumstances to clear left over garbage from cold reset, shutdown,
485 * or normal ECC memory conditioning.
486 */
487
488 //FIXME: this function depends on pDCTstat Array ( with Node id ) - Is this really a problem?
489
490 u32 dev;
491 u8 Node;
492
493 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
494 struct DCTStatStruc *pDCTstat;
495 pDCTstat = pDCTstatA + Node;
496
497 if (pDCTstat->NodePresent) {
498 dev = pDCTstat->dev_nbmisc;
499 /*MCA NB Status Low (alias to MC4_STS[31:0] */
500 Set_NB32(dev, 0x48, 0);
501 /* MCA NB Status High (alias to MC4_STS[63:32] */
502 Set_NB32(dev, 0x4C, 0);
503 }
504 }
505}
506
507
508static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
509 struct DCTStatStruc *pDCTstatA)
510{
511 u8 Node;
512 u32 NextBase, BottomIO;
513 u8 _MemHoleRemap, DramHoleBase, DramHoleOffset;
514 u32 HoleSize, DramSelBaseAddr;
515
516 u32 val;
517 u32 base;
518 u32 limit;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000519 u32 dev, devx;
Marc Jones8ae8c882007-12-19 01:32:08 +0000520 struct DCTStatStruc *pDCTstat;
521
522 _MemHoleRemap = mctGet_NVbits(NV_MemHole);
523
524 if (pMCTstat->HoleBase == 0) {
525 DramHoleBase = mctGet_NVbits(NV_BottomIO);
526 } else {
527 DramHoleBase = pMCTstat->HoleBase >> (24-8);
528 }
529
530 BottomIO = DramHoleBase << (24-8);
531
532 NextBase = 0;
533 pDCTstat = pDCTstatA + 0;
534 dev = pDCTstat->dev_map;
535
536
537 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000538 pDCTstat = pDCTstatA + Node;
539 devx = pDCTstat->dev_map;
Marc Jones8ae8c882007-12-19 01:32:08 +0000540 DramSelBaseAddr = 0;
541 pDCTstat = pDCTstatA + Node;
542 if (!pDCTstat->GangedMode) {
543 DramSelBaseAddr = pDCTstat->NodeSysLimit - pDCTstat->DCTSysLimit;
544 /*In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
545 val = pDCTstat->NodeSysLimit;
546 if ((val & 0xFF) == 0xFE) {
547 DramSelBaseAddr++;
548 val++;
549 }
550 pDCTstat->DCTSysLimit = val;
551 }
552
553 base = pDCTstat->DCTSysBase;
554 limit = pDCTstat->DCTSysLimit;
555 if (limit > base) {
556 base += NextBase;
557 limit += NextBase;
558 DramSelBaseAddr += NextBase;
559 printk_debug(" Node: %02x base: %02x limit: %02x BottomIO: %02x\n", Node, base, limit, BottomIO);
560
561 if (_MemHoleRemap) {
562 if ((base < BottomIO) && (limit >= BottomIO)) {
563 /* HW Dram Remap */
564 pDCTstat->Status |= 1 << SB_HWHole;
565 pMCTstat->GStatus |= 1 << GSB_HWHole;
566 pDCTstat->DCTSysBase = base;
567 pDCTstat->DCTSysLimit = limit;
568 pDCTstat->DCTHoleBase = BottomIO;
569 pMCTstat->HoleBase = BottomIO;
570 HoleSize = _4GB_RJ8 - BottomIO; /* HoleSize[39:8] */
571 if ((DramSelBaseAddr > 0) && (DramSelBaseAddr < BottomIO))
572 base = DramSelBaseAddr;
573 val = ((base + HoleSize) >> (24-8)) & 0xFF;
574 DramHoleOffset = val;
575 val <<= 8; /* shl 16, rol 24 */
576 val |= DramHoleBase << 24;
577 val |= 1 << DramHoleValid;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000578 Set_NB32(devx, 0xF0, val); /* Dram Hole Address Reg */
Marc Jones8ae8c882007-12-19 01:32:08 +0000579 pDCTstat->DCTSysLimit += HoleSize;
580 base = pDCTstat->DCTSysBase;
581 limit = pDCTstat->DCTSysLimit;
582 } else if (base == BottomIO) {
583 /* SW Node Hoist */
584 pMCTstat->GStatus |= 1<<GSB_SpIntRemapHole;
585 pDCTstat->Status |= 1<<SB_SWNodeHole;
586 pMCTstat->GStatus |= 1<<GSB_SoftHole;
587 pMCTstat->HoleBase = base;
588 limit -= base;
589 base = _4GB_RJ8;
590 limit += base;
591 pDCTstat->DCTSysBase = base;
592 pDCTstat->DCTSysLimit = limit;
593 } else {
594 /* No Remapping. Normal Contiguous mapping */
595 pDCTstat->DCTSysBase = base;
596 pDCTstat->DCTSysLimit = limit;
597 }
598 } else {
599 /*No Remapping. Normal Contiguous mapping*/
600 pDCTstat->DCTSysBase = base;
601 pDCTstat->DCTSysLimit = limit;
602 }
603 base |= 3; /* set WE,RE fields*/
604 pMCTstat->SysLimit = limit;
605 }
606 Set_NB32(dev, 0x40 + (Node << 3), base); /* [Node] + Dram Base 0 */
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000607
608 /* if Node limit > 1GB then set it to 1GB boundary for each node */
609 if ((mctSetNodeBoundary_D()) && (limit > 0x00400000)) {
610 limit++;
611 limit &= 0xFFC00000;
612 limit--;
613 }
614 val = limit & 0xFFFF0000;
615 val |= Node;
Marc Jones8ae8c882007-12-19 01:32:08 +0000616 Set_NB32(dev, 0x44 + (Node << 3), val); /* set DstNode */
617
618 limit = pDCTstat->DCTSysLimit;
619 if (limit) {
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000620 NextBase = (limit & 0xFFFF0000) + 0x10000;
621 if ((mctSetNodeBoundary_D()) && (NextBase > 0x00400000)) {
622 NextBase++;
623 NextBase &= 0xFFC00000;
624 NextBase--;
625 }
Marc Jones8ae8c882007-12-19 01:32:08 +0000626 }
627 }
628
629 /* Copy dram map from Node 0 to Node 1-7 */
630 for (Node = 1; Node < MAX_NODES_SUPPORTED; Node++) {
Marc Jones8ae8c882007-12-19 01:32:08 +0000631 u32 reg;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000632 pDCTstat = pDCTstatA + Node;
633 devx = pDCTstat->dev_map;
Marc Jones8ae8c882007-12-19 01:32:08 +0000634
635 if (pDCTstat->NodePresent) {
636 printk_debug(" Copy dram map from Node 0 to Node %02x \n", Node);
637 reg = 0x40; /*Dram Base 0*/
638 do {
639 val = Get_NB32(dev, reg);
640 Set_NB32(devx, reg, val);
641 reg += 4;
642 } while ( reg < 0x80);
643 } else {
644 break; /* stop at first absent Node */
645 }
646 }
647
648 /*Copy dram map to F1x120/124*/
649 mct_HTMemMapExt(pMCTstat, pDCTstatA);
650}
651
652
653static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
654 struct DCTStatStruc *pDCTstatA)
655{
656
657 /* Initiates a memory clear operation for all node. The mem clr
658 * is done in paralel. After the memclr is complete, all processors
659 * status are checked to ensure that memclr has completed.
660 */
661 u8 Node;
662 struct DCTStatStruc *pDCTstat;
663
664 if (!mctGet_NVbits(NV_DQSTrainCTL)){
665 // FIXME: callback to wrapper: mctDoWarmResetMemClr_D
666 } else { // NV_DQSTrainCTL == 1
667 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
668 pDCTstat = pDCTstatA + Node;
669
670 if (pDCTstat->NodePresent) {
671 DCTMemClr_Init_D(pMCTstat, pDCTstat);
672 }
673 }
674 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
675 pDCTstat = pDCTstatA + Node;
676
677 if (pDCTstat->NodePresent) {
678 DCTMemClr_Sync_D(pMCTstat, pDCTstat);
679 }
680 }
681 }
682}
683
684
685static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
686 struct DCTStatStruc *pDCTstat)
687{
688 u32 val;
689 u32 dev;
690 u32 reg;
691
692 /* Initiates a memory clear operation on one node */
693 if (pDCTstat->DCTSysLimit) {
694 dev = pDCTstat->dev_dct;
695 reg = 0x110;
696
697 do {
698 val = Get_NB32(dev, reg);
699 } while (val & (1 << MemClrBusy));
700
701 val |= (1 << MemClrInit);
702 Set_NB32(dev, reg, val);
703
704 }
705}
706
707
708static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
709 struct DCTStatStruc *pDCTstatA)
710{
711 /* Ensures that memory clear has completed on all node.*/
712 u8 Node;
713 struct DCTStatStruc *pDCTstat;
714
715 if (!mctGet_NVbits(NV_DQSTrainCTL)){
716 // callback to wrapper: mctDoWarmResetMemClr_D
717 } else { // NV_DQSTrainCTL == 1
718 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
719 pDCTstat = pDCTstatA + Node;
720
721 if (pDCTstat->NodePresent) {
722 DCTMemClr_Sync_D(pMCTstat, pDCTstat);
723 }
724 }
725 }
726}
727
728
729static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
730 struct DCTStatStruc *pDCTstat)
731{
732 u32 val;
733 u32 dev = pDCTstat->dev_dct;
734 u32 reg;
735
736 /* Ensure that a memory clear operation has completed on one node */
737 if (pDCTstat->DCTSysLimit){
738 reg = 0x110;
739
740 do {
741 val = Get_NB32(dev, reg);
742 } while (val & (1 << MemClrBusy));
743
744 do {
745 val = Get_NB32(dev, reg);
746 } while (!(val & (1 << Dr_MemClrStatus)));
747 }
748
749 val = 0x0FE40FC0; // BKDG recommended
750 val |= MCCH_FlushWrOnStpGnt; // Set for S3
751 Set_NB32(dev, 0x11C, val);
752}
753
754
755static u8 NodePresent_D(u8 Node)
756{
757 /*
758 * Determine if a single Hammer Node exists within the network.
759 */
760
761 u32 dev;
762 u32 val;
763 u32 dword;
764 u8 ret = 0;
765
766 dev = PA_HOST(Node); /*test device/vendor id at host bridge */
767 val = Get_NB32(dev, 0);
768 dword = mct_NodePresent_D(); /* FIXME: BOZO -11001022h rev for F */
769 if (val == dword) { /* AMD Hammer Family CPU HT Configuration */
770 if (oemNodePresent_D(Node, &ret))
771 goto finish;
772 /* Node ID register */
773 val = Get_NB32(dev, 0x60);
774 val &= 0x07;
775 dword = Node;
776 if (val == dword) /* current nodeID = requested nodeID ? */
777 ret = 1;
778finish:
779 ;
780 }
781
782 return ret;
783}
784
785
786static void DCTInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 dct)
787{
788 /*
789 * Initialize DRAM on single Athlon 64/Opteron Node.
790 */
791
792 u8 stopDCTflag;
793 u32 val;
794
795 ClearDCT_D(pMCTstat, pDCTstat, dct);
796 stopDCTflag = 1; /*preload flag with 'disable' */
797 if (mct_DIMMPresence(pMCTstat, pDCTstat, dct) < SC_StopError) {
798 print_t("\t\tDCTInit_D: mct_DIMMPresence Done\n");
799 if (mct_SPDCalcWidth(pMCTstat, pDCTstat, dct) < SC_StopError) {
800 print_t("\t\tDCTInit_D: mct_SPDCalcWidth Done\n");
801 if (AutoCycTiming_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
802 print_t("\t\tDCTInit_D: AutoCycTiming_D Done\n");
803 if (AutoConfig_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
804 print_t("\t\tDCTInit_D: AutoConfig_D Done\n");
805 if (PlatformSpec_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
806 print_t("\t\tDCTInit_D: PlatformSpec_D Done\n");
807 stopDCTflag = 0;
808 if (!(pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW))) {
809 print_t("\t\tDCTInit_D: StartupDCT_D\n");
810 StartupDCT_D(pMCTstat, pDCTstat, dct); /*yeaahhh! */
811 }
812 }
813 }
814 }
815 }
816 }
817 if (stopDCTflag) {
818 u32 reg_off = dct * 0x100;
819 val = 1<<DisDramInterface;
820 Set_NB32(pDCTstat->dev_dct, reg_off+0x94, val);
821 /*To maximize power savings when DisDramInterface=1b,
822 all of the MemClkDis bits should also be set.*/
823 val = 0xFF000000;
824 Set_NB32(pDCTstat->dev_dct, reg_off+0x88, val);
825 }
826}
827
828
829static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
830 struct DCTStatStruc *pDCTstatA)
831{
832 /* Wait (and block further access to dram) for all DCTs to be ready,
833 * by polling all InitDram bits and waiting for possible memory clear
834 * operations to be complete. Read MemClkFreqVal bit to see if
835 * the DIMMs are present in this node.
836 */
837
838 u8 Node;
839
840 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
841 struct DCTStatStruc *pDCTstat;
842 pDCTstat = pDCTstatA + Node;
843 mct_SyncDCTsReady(pDCTstat);
844 }
845}
846
847
848static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
849 struct DCTStatStruc *pDCTstat, u8 dct)
850{
851 /* Read MemClkFreqVal bit to see if the DIMMs are present in this node.
852 * If the DIMMs are present then set the DRAM Enable bit for this node.
853 *
854 * Setting dram init starts up the DCT state machine, initializes the
855 * dram devices with MRS commands, and kicks off any
856 * HW memory clear process that the chip is capable of. The sooner
857 * that dram init is set for all nodes, the faster the memory system
858 * initialization can complete. Thus, the init loop is unrolled into
859 * two loops so as to start the processeses for non BSP nodes sooner.
860 * This procedure will not wait for the process to finish.
861 * Synchronization is handled elsewhere.
862 */
863
864 u32 val;
865 u32 dev;
866 u8 byte;
867 u32 reg;
868 u32 reg_off = dct * 0x100;
869
870 dev = pDCTstat->dev_dct;
871 val = Get_NB32(dev, 0x94 + reg_off);
872 if (val & (1<<MemClkFreqVal)) {
873 print_t("\t\t\tStartupDCT_D: MemClkFreqVal\n");
874 byte = mctGet_NVbits(NV_DQSTrainCTL);
875 if (byte == 1) {
876 /* Enable DQSRcvEn training mode */
877 print_t("\t\t\tStartupDCT_D: DqsRcvEnTrain set \n");
878 reg = 0x78 + reg_off;
879 val = Get_NB32(dev, reg);
880 /* Setting this bit forces a 1T window with hard left
881 * pass/fail edge and a probabalistic right pass/fail
882 * edge. LEFT edge is referenced for final
883 * receiver enable position.*/
884 val |= 1 << DqsRcvEnTrain;
885 Set_NB32(dev, reg, val);
886 }
887 mctHookBeforeDramInit(); /* generalized Hook */
888 print_t("\t\t\tStartupDCT_D: DramInit \n");
889 mct_DramInit(pMCTstat, pDCTstat, dct);
890 AfterDramInit_D(pDCTstat, dct);
891 mctHookAfterDramInit(); /* generalized Hook*/
892 }
893}
894
895
896static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
897 struct DCTStatStruc *pDCTstat, u8 dct)
898{
899 u32 reg_end;
900 u32 dev = pDCTstat->dev_dct;
901 u32 reg = 0x40 + 0x100 * dct;
902 u32 val = 0;
903
904 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
905 reg_end = 0x78 + 0x100 * dct;
906 } else {
907 reg_end = 0xA4 + 0x100 * dct;
908 }
909
910 while(reg < reg_end) {
911 Set_NB32(dev, reg, val);
912 reg += 4;
913 }
914
915 val = 0;
916 dev = pDCTstat->dev_map;
917 reg = 0xF0;
918 Set_NB32(dev, reg, val);
919}
920
921
922static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
923 struct DCTStatStruc *pDCTstat, u8 dct)
924{
925 /* Initialize DCT Timing registers as per DIMM SPD.
926 * For primary timing (T, CL) use best case T value.
927 * For secondary timing params., use most aggressive settings
928 * of slowest DIMM.
929 *
930 * There are three components to determining "maximum frequency":
931 * SPD component, Bus load component, and "Preset" max frequency
932 * component.
933 *
934 * The SPD component is a function of the min cycle time specified
935 * by each DIMM, and the interaction of cycle times from all DIMMs
936 * in conjunction with CAS latency. The SPD component only applies
937 * when user timing mode is 'Auto'.
938 *
939 * The Bus load component is a limiting factor determined by electrical
940 * characteristics on the bus as a result of varying number of device
941 * loads. The Bus load component is specific to each platform but may
942 * also be a function of other factors. The bus load component only
943 * applies when user timing mode is 'Auto'.
944 *
945 * The Preset component is subdivided into three items and is the
946 * minimum of the set: Silicon revision, user limit setting when user
947 * timing mode is 'Auto' and memclock mode is 'Limit', OEM build
948 * specification of the maximum frequency. The Preset component is only
949 * applies when user timing mode is 'Auto'.
950 */
951
952 u8 i;
953 u8 Twr, Trtp;
954 u8 Trp, Trrd, Trcd, Tras, Trc, Trfc[4], Rows;
955 u32 DramTimingLo, DramTimingHi;
956 u16 Tk10, Tk40;
957 u8 Twtr;
958 u8 LDIMM;
959 u8 DDR2_1066;
960 u8 byte;
961 u32 dword;
962 u32 dev;
963 u32 reg;
964 u32 reg_off;
965 u32 val;
966 u16 smbaddr;
967
968 /* Get primary timing (CAS Latency and Cycle Time) */
969 if (pDCTstat->Speed == 0) {
970 mctGet_MaxLoadFreq(pDCTstat);
971
972 /* and Factor in presets (setup options, Si cap, etc.) */
973 GetPresetmaxF_D(pMCTstat, pDCTstat);
974
975 /* Go get best T and CL as specified by DIMM mfgs. and OEM */
976 SPDGetTCL_D(pMCTstat, pDCTstat, dct);
977 /* skip callback mctForce800to1067_D */
978 pDCTstat->Speed = pDCTstat->DIMMAutoSpeed;
979 pDCTstat->CASL = pDCTstat->DIMMCASL;
980
981 /* if "manual" memclock mode */
982 if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 2)
983 pDCTstat->Speed = mctGet_NVbits(NV_MemCkVal) + 1;
984
985 mct_AfterGetCLT(pMCTstat, pDCTstat, dct);
986 }
987
988 /* Gather all DIMM mini-max values for cycle timing data */
989 Rows = 0;
990 Trp = 0;
991 Trrd = 0;
992 Trcd = 0;
993 Trtp = 0;
994 Tras = 0;
995 Trc = 0;
996 Twr = 0;
997 Twtr = 0;
998 for (i=0; i < 4; i++)
999 Trfc[i] = 0;
1000
1001 for ( i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
1002 LDIMM = i >> 1;
1003 if (pDCTstat->DIMMValid & (1 << i)) {
1004 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
1005 byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
1006 if (Rows < byte)
1007 Rows = byte; /* keep track of largest row sz */
1008
1009 byte = mctRead_SPD(smbaddr, SPD_TRP);
1010 if (Trp < byte)
1011 Trp = byte;
1012
1013 byte = mctRead_SPD(smbaddr, SPD_TRRD);
1014 if (Trrd < byte)
1015 Trrd = byte;
1016
1017 byte = mctRead_SPD(smbaddr, SPD_TRCD);
1018 if (Trcd < byte)
1019 Trcd = byte;
1020
1021 byte = mctRead_SPD(smbaddr, SPD_TRTP);
1022 if (Trtp < byte)
1023 Trtp = byte;
1024
1025 byte = mctRead_SPD(smbaddr, SPD_TWR);
1026 if (Twr < byte)
1027 Twr = byte;
1028
1029 byte = mctRead_SPD(smbaddr, SPD_TWTR);
1030 if (Twtr < byte)
1031 Twtr = byte;
1032
1033 val = mctRead_SPD(smbaddr, SPD_TRC);
1034 if ((val == 0) || (val == 0xFF)) {
1035 pDCTstat->ErrStatus |= 1<<SB_NoTrcTrfc;
1036 pDCTstat->ErrCode = SC_VarianceErr;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00001037 val = Get_DefTrc_k_D(pDCTstat->Speed);
Marc Jones8ae8c882007-12-19 01:32:08 +00001038 } else {
1039 byte = mctRead_SPD(smbaddr, SPD_TRCRFC);
1040 if (byte & 0xF0) {
1041 val++; /* round up in case fractional extention is non-zero.*/
1042 }
1043 }
1044 if (Trc < val)
1045 Trc = val;
1046
1047 /* dev density=rank size/#devs per rank */
1048 byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
1049
1050 val = ((byte >> 5) | (byte << 3)) & 0xFF;
1051 val <<= 2;
1052
1053 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE; /* dev density=2^(rows+columns+banks) */
1054 if (byte == 4) {
1055 val >>= 4;
1056 } else if (byte == 8) {
1057 val >>= 3;
1058 } else if (byte == 16) {
1059 val >>= 2;
1060 }
1061
1062 byte = bsr(val);
1063
1064 if (Trfc[LDIMM] < byte)
1065 Trfc[LDIMM] = byte;
1066
1067 byte = mctRead_SPD(smbaddr, SPD_TRAS);
1068 if (Tras < byte)
1069 Tras = byte;
1070 } /* Dimm Present */
1071 }
1072
1073 /* Convert DRAM CycleTiming values and store into DCT structure */
1074 DDR2_1066 = 0;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00001075 byte = pDCTstat->Speed;
Marc Jones8ae8c882007-12-19 01:32:08 +00001076 if (byte == 5)
1077 DDR2_1066 = 1;
1078 Tk40 = Get_40Tk_D(byte);
1079 Tk10 = Tk40>>2;
1080
1081 /* Notes:
1082 1. All secondary time values given in SPDs are in binary with units of ns.
1083 2. Some time values are scaled by four, in order to have least count of 0.25 ns
1084 (more accuracy). JEDEC SPD spec. shows which ones are x1 and x4.
1085 3. Internally to this SW, cycle time, Tk, is scaled by 10 to affect a
1086 least count of 0.1 ns (more accuracy).
1087 4. SPD values not scaled are multiplied by 10 and then divided by 10T to find
1088 equivalent minimum number of bus clocks (a remainder causes round-up of clocks).
1089 5. SPD values that are prescaled by 4 are multiplied by 10 and then divided by 40T to find
1090 equivalent minimum number of bus clocks (a remainder causes round-up of clocks).*/
1091
1092 /* Tras */
1093 dword = Tras * 40;
1094 pDCTstat->DIMMTras = (u16)dword;
1095 val = dword / Tk40;
1096 if (dword % Tk40) { /* round up number of busclocks */
1097 val++;
1098 }
1099 if (DDR2_1066) {
1100 if (val < Min_TrasT_1066)
1101 val = Min_TrasT_1066;
1102 else if (val > Max_TrasT_1066)
1103 val = Max_TrasT_1066;
1104 } else {
1105 if (val < Min_TrasT)
1106 val = Min_TrasT;
1107 else if (val > Max_TrasT)
1108 val = Max_TrasT;
1109 }
1110 pDCTstat->Tras = val;
1111
1112 /* Trp */
1113 dword = Trp * 10;
1114 pDCTstat->DIMMTrp = dword;
1115 val = dword / Tk40;
1116 if (dword % Tk40) { /* round up number of busclocks */
1117 val++;
1118 }
1119 if (DDR2_1066) {
1120 if (val < Min_TrasT_1066)
1121 val = Min_TrpT_1066;
1122 else if (val > Max_TrpT_1066)
1123 val = Max_TrpT_1066;
1124 } else {
1125 if (val < Min_TrpT)
1126 val = Min_TrpT;
1127 else if (val > Max_TrpT)
1128 val = Max_TrpT;
1129 }
1130 pDCTstat->Trp = val;
1131
1132 /*Trrd*/
1133 dword = Trrd * 10;
1134 pDCTstat->DIMMTrrd = dword;
1135 val = dword / Tk40;
1136 if (dword % Tk40) { /* round up number of busclocks */
1137 val++;
1138 }
1139 if (DDR2_1066) {
1140 if (val < Min_TrrdT_1066)
1141 val = Min_TrrdT_1066;
1142 else if (val > Max_TrrdT_1066)
1143 val = Max_TrrdT_1066;
1144 } else {
1145 if (val < Min_TrrdT)
1146 val = Min_TrrdT;
1147 else if (val > Max_TrrdT)
1148 val = Max_TrrdT;
1149 }
1150 pDCTstat->Trrd = val;
1151
1152 /* Trcd */
1153 dword = Trcd * 10;
1154 pDCTstat->DIMMTrcd = dword;
1155 val = dword / Tk40;
1156 if (dword % Tk40) { /* round up number of busclocks */
1157 val++;
1158 }
1159 if (DDR2_1066) {
1160 if (val < Min_TrcdT_1066)
1161 val = Min_TrcdT_1066;
1162 else if (val > Max_TrcdT_1066)
1163 val = Max_TrcdT_1066;
1164 } else {
1165 if (val < Min_TrcdT)
1166 val = Min_TrcdT;
1167 else if (val > Max_TrcdT)
1168 val = Max_TrcdT;
1169 }
1170 pDCTstat->Trcd = val;
1171
1172 /* Trc */
1173 dword = Trc * 40;
1174 pDCTstat->DIMMTrc = dword;
1175 val = dword / Tk40;
1176 if (dword % Tk40) { /* round up number of busclocks */
1177 val++;
1178 }
1179 if (DDR2_1066) {
1180 if (val < Min_TrcT_1066)
1181 val = Min_TrcT_1066;
1182 else if (val > Max_TrcT_1066)
1183 val = Max_TrcT_1066;
1184 } else {
1185 if (val < Min_TrcT)
1186 val = Min_TrcT;
1187 else if (val > Max_TrcT)
1188 val = Max_TrcT;
1189 }
1190 pDCTstat->Trc = val;
1191
1192 /* Trtp */
1193 dword = Trtp * 10;
1194 pDCTstat->DIMMTrtp = dword;
1195 val = pDCTstat->Speed;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00001196 if (val <= 2) { /* 7.75ns / Speed in ns to get clock # */
1197 val = 2; /* for DDR400/DDR533 */
1198 } else { /* Note a speed of 3 will be a Trtp of 3 */
1199 val = 3; /* for DDR667/DDR800/DDR1066 */
Marc Jones8ae8c882007-12-19 01:32:08 +00001200 }
1201 pDCTstat->Trtp = val;
1202
1203 /* Twr */
1204 dword = Twr * 10;
1205 pDCTstat->DIMMTwr = dword;
1206 val = dword / Tk40;
1207 if (dword % Tk40) { /* round up number of busclocks */
1208 val++;
1209 }
1210 if (DDR2_1066) {
1211 if (val < Min_TwrT_1066)
1212 val = Min_TwrT_1066;
1213 else if (val > Max_TwrT_1066)
1214 val = Max_TwrT_1066;
1215 } else {
1216 if (val < Min_TwrT)
1217 val = Min_TwrT;
1218 else if (val > Max_TwrT)
1219 val = Max_TwrT;
1220 }
1221 pDCTstat->Twr = val;
1222
1223 /* Twtr */
1224 dword = Twtr * 10;
1225 pDCTstat->DIMMTwtr = dword;
1226 val = dword / Tk40;
1227 if (dword % Tk40) { /* round up number of busclocks */
1228 val++;
1229 }
1230 if (DDR2_1066) {
1231 if (val < Min_TwrT_1066)
1232 val = Min_TwtrT_1066;
1233 else if (val > Max_TwtrT_1066)
1234 val = Max_TwtrT_1066;
1235 } else {
1236 if (val < Min_TwtrT)
1237 val = Min_TwtrT;
1238 else if (val > Max_TwtrT)
1239 val = Max_TwtrT;
1240 }
1241 pDCTstat->Twtr = val;
1242
1243
1244 /* Trfc0-Trfc3 */
1245 for (i=0; i<4; i++)
1246 pDCTstat->Trfc[i] = Trfc[i];
1247
1248 mctAdjustAutoCycTmg_D();
1249
1250 /* Program DRAM Timing values */
1251 DramTimingLo = 0; /* Dram Timing Low init */
1252 val = pDCTstat->CASL;
1253 val = Tab_tCL_j[val];
1254 DramTimingLo |= val;
1255
1256 val = pDCTstat->Trcd;
1257 if (DDR2_1066)
1258 val -= Bias_TrcdT_1066;
1259 else
1260 val -= Bias_TrcdT;
1261
1262 DramTimingLo |= val<<4;
1263
1264 val = pDCTstat->Trp;
1265 if (DDR2_1066)
1266 val -= Bias_TrpT_1066;
1267 else {
1268 val -= Bias_TrpT;
1269 val <<= 1;
1270 }
1271 DramTimingLo |= val<<7;
1272
1273 val = pDCTstat->Trtp;
1274 val -= Bias_TrtpT;
1275 DramTimingLo |= val<<11;
1276
1277 val = pDCTstat->Tras;
1278 if (DDR2_1066)
1279 val -= Bias_TrasT_1066;
1280 else
1281 val -= Bias_TrasT;
1282 DramTimingLo |= val<<12;
1283
1284 val = pDCTstat->Trc;
1285 val -= Bias_TrcT;
1286 DramTimingLo |= val<<16;
1287
1288 if (!DDR2_1066) {
1289 val = pDCTstat->Twr;
1290 val -= Bias_TwrT;
1291 DramTimingLo |= val<<20;
1292 }
1293
1294 val = pDCTstat->Trrd;
1295 if (DDR2_1066)
1296 val -= Bias_TrrdT_1066;
1297 else
1298 val -= Bias_TrrdT;
1299 DramTimingLo |= val<<22;
1300
1301
1302 DramTimingHi = 0; /* Dram Timing Low init */
1303 val = pDCTstat->Twtr;
1304 if (DDR2_1066)
1305 val -= Bias_TwtrT_1066;
1306 else
1307 val -= Bias_TwtrT;
1308 DramTimingHi |= val<<8;
1309
1310 val = 2;
1311 DramTimingHi |= val<<16;
1312
1313 val = 0;
1314 for (i=4;i>0;i--) {
1315 val <<= 3;
1316 val |= Trfc[i-1];
1317 }
1318 DramTimingHi |= val << 20;
1319
1320
1321 dev = pDCTstat->dev_dct;
1322 reg_off = 0x100 * dct;
1323 print_tx("AutoCycTiming: DramTimingLo ", DramTimingLo);
1324 print_tx("AutoCycTiming: DramTimingHi ", DramTimingHi);
1325
1326 Set_NB32(dev, 0x88 + reg_off, DramTimingLo); /*DCT Timing Low*/
1327 DramTimingHi |=0x0000FC77;
1328 Set_NB32(dev, 0x8c + reg_off, DramTimingHi); /*DCT Timing Hi*/
1329
1330 if (DDR2_1066) {
1331 /* Twr */
1332 dword = pDCTstat->Twr;
1333 dword -= Bias_TwrT_1066;
1334 dword <<= 4;
1335 reg = 0x84 + reg_off;
1336 val = Get_NB32(dev, reg);
1337 val &= 0x8F;
1338 val |= dword;
1339 Set_NB32(dev, reg, val);
1340 }
1341// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1342
1343 print_tx("AutoCycTiming: Status ", pDCTstat->Status);
1344 print_tx("AutoCycTiming: ErrStatus ", pDCTstat->ErrStatus);
1345 print_tx("AutoCycTiming: ErrCode ", pDCTstat->ErrCode);
1346 print_t("AutoCycTiming: Done\n");
1347
1348 mctHookAfterAutoCycTmg();
1349
1350 return pDCTstat->ErrCode;
1351}
1352
1353
1354static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
1355 struct DCTStatStruc *pDCTstat)
1356{
1357 /* Get max frequency from OEM platform definition, from any user
1358 * override (limiting) of max frequency, and from any Si Revision
1359 * Specific information. Return the least of these three in
1360 * DCTStatStruc.PresetmaxFreq.
1361 */
1362
1363 u16 proposedFreq;
1364 u16 word;
1365
1366 /* Get CPU Si Revision defined limit (NPT) */
1367 proposedFreq = 533; /* Rev F0 programmable max memclock is */
1368
1369 /*Get User defined limit if "limit" mode */
1370 if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
1371 word = Get_Fk_D(mctGet_NVbits(NV_MemCkVal) + 1);
1372 if (word < proposedFreq)
1373 proposedFreq = word;
1374
1375 /* Get Platform defined limit */
1376 word = mctGet_NVbits(NV_MAX_MEMCLK);
1377 if (word < proposedFreq)
1378 proposedFreq = word;
1379
1380 word = pDCTstat->PresetmaxFreq;
1381 if (word > proposedFreq)
1382 word = proposedFreq;
1383
1384 pDCTstat->PresetmaxFreq = word;
1385 }
1386}
1387
1388
1389
1390static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
1391 struct DCTStatStruc *pDCTstat, u8 dct)
1392{
1393 /* Find the best T and CL primary timing parameter pair, per Mfg.,
1394 * for the given set of DIMMs, and store into DCTStatStruc
1395 * (.DIMMAutoSpeed and .DIMMCASL). See "Global relationship between
1396 * index values and item values" for definition of CAS latency
1397 * index (j) and Frequency index (k).
1398 */
1399 int i, j, k;
1400 u8 T1min, CL1min;
1401
1402 /* i={0..7} (std. physical DIMM number)
1403 * j is an integer which enumerates increasing CAS latency.
1404 * k is an integer which enumerates decreasing cycle time.
1405 * CL no. {0,1,2} corresponds to CL X, CL X-.5, or CL X-1 (per individual DIMM)
1406 * Max timing values are per parameter, of all DIMMs, spec'd in ns like the SPD.
1407 */
1408
1409 CL1min = 0xFF;
1410 T1min = 0xFF;
1411 for (k=K_MAX; k >= K_MIN; k--) {
1412 for (j = J_MIN; j <= J_MAX; j++) {
1413 if (Sys_Capability_D(pMCTstat, pDCTstat, j, k) ) {
1414 /* 1. check to see if DIMMi is populated.
1415 2. check if DIMMi supports CLj and Tjk */
1416 for (i = 0; i < MAX_DIMMS_SUPPORTED; i++) {
1417 if (pDCTstat->DIMMValid & (1 << i)) {
1418 if (Dimm_Supports_D(pDCTstat, i, j, k))
1419 break;
1420 }
1421 } /* while ++i */
1422 if (i == MAX_DIMMS_SUPPORTED) {
1423 T1min = k;
1424 CL1min = j;
1425 goto got_TCL;
1426 }
1427 }
1428 } /* while ++j */
1429 } /* while --k */
1430
1431got_TCL:
1432 if (T1min != 0xFF) {
1433 pDCTstat->DIMMCASL = CL1min; /*mfg. optimized */
1434 pDCTstat->DIMMAutoSpeed = T1min;
1435 print_tx("SPDGetTCL_D: DIMMCASL ", pDCTstat->DIMMCASL);
1436 print_tx("SPDGetTCL_D: DIMMAutoSpeed ", pDCTstat->DIMMAutoSpeed);
1437
1438 } else {
1439 pDCTstat->DIMMCASL = CL_DEF; /* failsafe values (running in min. mode) */
1440 pDCTstat->DIMMAutoSpeed = T_DEF;
1441 pDCTstat->ErrStatus |= 1 << SB_DimmMismatchT;
1442 pDCTstat->ErrStatus |= 1 << SB_MinimumMode;
1443 pDCTstat->ErrCode = SC_VarianceErr;
1444 }
1445 print_tx("SPDGetTCL_D: Status ", pDCTstat->Status);
1446 print_tx("SPDGetTCL_D: ErrStatus ", pDCTstat->ErrStatus);
1447 print_tx("SPDGetTCL_D: ErrCode ", pDCTstat->ErrCode);
1448 print_t("SPDGetTCL_D: Done\n");
1449}
1450
1451
1452static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
1453 struct DCTStatStruc *pDCTstat, u8 dct)
1454{
1455 u32 dev;
1456 u32 reg;
1457 u32 val;
1458
1459 mctGet_PS_Cfg_D(pMCTstat, pDCTstat, dct);
1460
1461 if (pDCTstat->GangedMode) {
1462 mctGet_PS_Cfg_D(pMCTstat, pDCTstat, 1);
1463 }
1464
1465 if ( pDCTstat->_2Tmode == 2) {
1466 dev = pDCTstat->dev_dct;
1467 reg = 0x94 + 0x100 * dct; /* Dram Configuration Hi */
1468 val = Get_NB32(dev, reg);
1469 val |= 1 << 20; /* 2T CMD mode */
1470 Set_NB32(dev, reg, val);
1471 }
1472
1473 mct_PlatformSpec(pMCTstat, pDCTstat, dct);
1474 InitPhyCompensation(pMCTstat, pDCTstat, dct);
1475 mctHookAfterPSCfg();
1476 return pDCTstat->ErrCode;
1477}
1478
1479
1480static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
1481 struct DCTStatStruc *pDCTstat, u8 dct)
1482{
1483 u32 DramControl, DramTimingLo, Status;
1484 u32 DramConfigLo, DramConfigHi, DramConfigMisc, DramConfigMisc2;
1485 u32 val;
1486 u32 reg_off;
1487 u32 dev;
1488 u16 word;
1489 u32 dword;
1490 u8 byte;
1491
1492 print_tx("AutoConfig_D: DCT: ", dct);
1493
1494 DramConfigLo = 0;
1495 DramConfigHi = 0;
1496 DramConfigMisc = 0;
1497 DramConfigMisc2 = 0;
1498
1499 /* set bank addessing and Masks, plus CS pops */
1500 SPDSetBanks_D(pMCTstat, pDCTstat, dct);
1501 if (pDCTstat->ErrCode == SC_StopError)
1502 goto AutoConfig_exit;
1503
1504 /* map chip-selects into local address space */
1505 StitchMemory_D(pMCTstat, pDCTstat, dct);
1506 InterleaveBanks_D(pMCTstat, pDCTstat, dct);
1507
1508 /* temp image of status (for convenience). RO usage! */
1509 Status = pDCTstat->Status;
1510
1511 dev = pDCTstat->dev_dct;
1512 reg_off = 0x100 * dct;
1513
1514
1515 /* Build Dram Control Register Value */
1516 DramConfigMisc2 = Get_NB32 (dev, 0xA8 + reg_off); /* Dram Control*/
1517 DramControl = Get_NB32 (dev, 0x78 + reg_off); /* Dram Control*/
1518
1519 if (mctGet_NVbits(NV_CLKHZAltVidC3))
1520 DramControl |= 1<<16;
1521
1522 // FIXME: Add support(skip) for Ax and Cx versions
1523 DramControl |= 5; /* RdPtrInit */
1524
1525
1526 /* Build Dram Config Lo Register Value */
1527 DramConfigLo |= 1 << 4; /* 75 Ohms ODT */
1528 if (mctGet_NVbits(NV_MAX_DIMMS) == 8) {
1529 if (pDCTstat->Speed == 3) {
1530 if ((pDCTstat->MAdimms[dct] == 4))
1531 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1532 } else if (pDCTstat->Speed == 4){
1533 if ((pDCTstat->MAdimms[dct] != 1))
1534 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1535 }
1536 } else {
1537 // FIXME: Skip for Ax versions
1538 if ((pDCTstat->MAdimms[dct] == 4)) {
1539 if ( pDCTstat->DimmQRPresent != 0) {
1540 if ((pDCTstat->Speed == 3) || (pDCTstat->Speed == 4)) {
1541 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1542 }
1543 } else if ((pDCTstat->MAdimms[dct] == 4)) {
1544 if (pDCTstat->Speed == 4) {
1545 if ( pDCTstat->DimmQRPresent != 0) {
1546 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1547 }
1548 }
1549 }
1550 } else if ((pDCTstat->MAdimms[dct] == 2)) {
1551 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1552 }
1553
1554 }
1555
1556 // FIXME: Skip for Ax versions
1557 /* callback not required - if (!mctParityControl_D()) */
1558 if (Status & (1 << SB_PARDIMMs)) {
1559 DramConfigLo |= 1 << ParEn;
1560 DramConfigMisc2 |= 1 << ActiveCmdAtRst;
1561 } else {
1562 DramConfigLo &= ~(1 << ParEn);
1563 DramConfigMisc2 &= ~(1 << ActiveCmdAtRst);
1564 }
1565
1566 if (mctGet_NVbits(NV_BurstLen32)) {
1567 if (!pDCTstat->GangedMode)
1568 DramConfigLo |= 1 << BurstLength32;
1569 }
1570
1571 if (Status & (1 << SB_128bitmode))
1572 DramConfigLo |= 1 << Width128; /* 128-bit mode (normal) */
1573
1574 word = dct;
1575 dword = X4Dimm;
1576 while (word < 8) {
1577 if (pDCTstat->Dimmx4Present & (1 << word))
1578 DramConfigLo |= 1 << dword; /* X4Dimm[3:0] */
1579 word++;
1580 word++;
1581 dword++;
1582 }
1583
1584 if (!(Status & (1 << SB_Registered)))
1585 DramConfigLo |= 1 << UnBuffDimm; /* Unbufferd DIMMs */
1586
1587 if (mctGet_NVbits(NV_ECC_CAP))
1588 if (Status & (1 << SB_ECCDIMMs))
1589 if ( mctGet_NVbits(NV_ECC))
1590 DramConfigLo |= 1 << DimmEcEn;
1591
1592
1593
1594 /* Build Dram Config Hi Register Value */
1595 dword = pDCTstat->Speed;
1596 DramConfigHi |= dword - 1; /* get MemClk encoding */
1597 DramConfigHi |= 1 << MemClkFreqVal;
1598
1599 if (Status & (1 << SB_Registered))
1600 if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0))
1601 /* set only if x8 Registered DIMMs in System*/
1602 DramConfigHi |= 1 << RDqsEn;
1603
1604 if (mctGet_NVbits(NV_CKE_PDEN)) {
1605 DramConfigHi |= 1 << 15; /* PowerDownEn */
1606 if (mctGet_NVbits(NV_CKE_CTL))
1607 /*Chip Select control of CKE*/
1608 DramConfigHi |= 1 << 16;
1609 }
1610
1611 /* Control Bank Swizzle */
1612 if (0) /* call back not needed mctBankSwizzleControl_D()) */
1613 DramConfigHi &= ~(1 << BankSwizzleMode);
1614 else
1615 DramConfigHi |= 1 << BankSwizzleMode; /* recommended setting (default) */
1616
1617 /* Check for Quadrank DIMM presence */
1618 if ( pDCTstat->DimmQRPresent != 0) {
1619 byte = mctGet_NVbits(NV_4RANKType);
1620 if (byte == 2)
1621 DramConfigHi |= 1 << 17; /* S4 (4-Rank SO-DIMMs) */
1622 else if (byte == 1)
1623 DramConfigHi |= 1 << 18; /* R4 (4-Rank Registered DIMMs) */
1624 }
1625
1626 if (0) /* call back not needed mctOverrideDcqBypMax_D ) */
1627 val = mctGet_NVbits(NV_BYPMAX);
1628 else
1629 val = 0x0f; // recommended setting (default)
1630 DramConfigHi |= val << 24;
1631
1632 val = pDCTstat->DIMM2Kpage;
1633 if (pDCTstat->GangedMode != 0) {
1634 if (dct != 0) {
1635 val &= 0x55;
1636 } else {
1637 val &= 0xAA;
1638 }
1639 }
1640 if (val)
1641 val = Tab_2KTfawT_k[pDCTstat->Speed];
1642 else
1643 val = Tab_1KTfawT_k[pDCTstat->Speed];
1644
1645 if (pDCTstat->Speed == 5)
1646 val >>= 1;
1647
1648 val -= Bias_TfawT;
1649 val <<= 28;
1650 DramConfigHi |= val; /* Tfaw for 1K or 2K paged drams */
1651
1652 // FIXME: Skip for Ax versions
1653 DramConfigHi |= 1 << DcqArbBypassEn;
1654
1655
1656 /* Build MemClkDis Value from Dram Timing Lo and
1657 Dram Config Misc Registers
1658 1. We will assume that MemClkDis field has been preset prior to this
1659 point.
1660 2. We will only set MemClkDis bits if a DIMM is NOT present AND if:
1661 NV_AllMemClks <>0 AND SB_DiagClks ==0 */
1662
1663
1664 /* Dram Timing Low (owns Clock Enable bits) */
1665 DramTimingLo = Get_NB32(dev, 0x88 + reg_off);
1666 if (mctGet_NVbits(NV_AllMemClks) == 0) {
1667 /* Special Jedec SPD diagnostic bit - "enable all clocks" */
1668 if (!(pDCTstat->Status & (1<<SB_DiagClks))) {
1669 const u8 *p;
1670 byte = mctGet_NVbits(NV_PACK_TYPE);
1671 if (byte == PT_L1)
1672 p = Tab_L1CLKDis;
1673 else if (byte == PT_M2)
1674 p = Tab_M2CLKDis;
1675 else
1676 p = Tab_S1CLKDis;
1677
1678 dword = 0;
1679 while(dword < MAX_DIMMS_SUPPORTED) {
1680 val = p[dword];
1681 print_tx("DramTimingLo: val=", val);
1682 if (!(pDCTstat->DIMMValid & (1<<val)))
1683 /*disable memclk*/
1684 DramTimingLo |= 1<<(dword+24);
1685 dword++ ;
1686 }
1687 }
1688 }
1689
1690 print_tx("AutoConfig_D: DramControl: ", DramControl);
1691 print_tx("AutoConfig_D: DramTimingLo: ", DramTimingLo);
1692 print_tx("AutoConfig_D: DramConfigMisc: ", DramConfigMisc);
1693 print_tx("AutoConfig_D: DramConfigMisc2: ", DramConfigMisc2);
1694 print_tx("AutoConfig_D: DramConfigLo: ", DramConfigLo);
1695 print_tx("AutoConfig_D: DramConfigHi: ", DramConfigHi);
1696
1697 /* Write Values to the registers */
1698 Set_NB32(dev, 0x78 + reg_off, DramControl);
1699 Set_NB32(dev, 0x88 + reg_off, DramTimingLo);
1700 Set_NB32(dev, 0xA0 + reg_off, DramConfigMisc);
1701 Set_NB32(dev, 0xA8 + reg_off, DramConfigMisc2);
1702 Set_NB32(dev, 0x90 + reg_off, DramConfigLo);
1703 mct_SetDramConfigHi_D(pDCTstat, dct, DramConfigHi);
1704 mct_ForceAutoPrecharge_D(pDCTstat, dct);
1705 mct_EarlyArbEn_D(pMCTstat, pDCTstat);
1706 mctHookAfterAutoCfg();
1707
1708// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1709
1710 print_tx("AutoConfig: Status ", pDCTstat->Status);
1711 print_tx("AutoConfig: ErrStatus ", pDCTstat->ErrStatus);
1712 print_tx("AutoConfig: ErrCode ", pDCTstat->ErrCode);
1713 print_t("AutoConfig: Done\n");
1714AutoConfig_exit:
1715 return pDCTstat->ErrCode;
1716}
1717
1718
1719static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
1720 struct DCTStatStruc *pDCTstat, u8 dct)
1721{
1722 /* Set bank addressing, program Mask values and build a chip-select
1723 * population map. This routine programs PCI 0:24N:2x80 config register
1724 * and PCI 0:24N:2x60,64,68,6C config registers (CS Mask 0-3).
1725 */
1726
1727 u8 ChipSel, Rows, Cols, Ranks ,Banks, DevWidth;
1728 u32 BankAddrReg, csMask;
1729
1730 u32 val;
1731 u32 reg;
1732 u32 dev;
1733 u32 reg_off;
1734 u8 byte;
1735 u16 word;
1736 u32 dword;
1737 u16 smbaddr;
1738
1739 dev = pDCTstat->dev_dct;
1740 reg_off = 0x100 * dct;
1741
1742 BankAddrReg = 0;
1743 for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel+=2) {
1744 byte = ChipSel;
1745 if ((pDCTstat->Status & (1 << SB_64MuxedMode)) && ChipSel >=4)
1746 byte -= 3;
1747
1748 if (pDCTstat->DIMMValid & (1<<byte)) {
1749 smbaddr = Get_DIMMAddress_D(pDCTstat, (ChipSel + dct));
1750
1751 byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
1752 Rows = byte & 0x1f;
1753
1754 byte = mctRead_SPD(smbaddr, SPD_COLSZ);
1755 Cols = byte & 0x1f;
1756
1757 Banks = mctRead_SPD(smbaddr, SPD_LBANKS);
1758
1759 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH);
1760 DevWidth = byte & 0x7f; /* bits 0-6 = bank 0 width */
1761
1762 byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
1763 Ranks = (byte & 7) + 1;
1764
1765 /* Configure Bank encoding
1766 * Use a 6-bit key into a lookup table.
1767 * Key (index) = CCCBRR, where CCC is the number of
1768 * Columns minus 9,RR is the number of Rows minus 13,
1769 * and B is the number of banks minus 2.
1770 * See "6-bit Bank Addressing Table" at the end of
1771 * this file.*/
1772 byte = Cols - 9; /* 9 Cols is smallest dev size */
1773 byte <<= 3; /* make room for row and bank bits*/
1774 if (Banks == 8)
1775 byte |= 4;
1776
1777 /* 13 Rows is smallest dev size */
1778 byte |= Rows - 13; /* CCCBRR internal encode */
1779
1780 for (dword=0; dword < 12; dword++) {
1781 if (byte == Tab_BankAddr[dword])
1782 break;
1783 }
1784
1785 if (dword < 12) {
1786
1787 /* bit no. of CS field in address mapping reg.*/
1788 dword <<= (ChipSel<<1);
1789 BankAddrReg |= dword;
1790
1791 /* Mask value=(2pow(rows+cols+banks+3)-1)>>8,
1792 or 2pow(rows+cols+banks-5)-1*/
1793 csMask = 0;
1794
1795 byte = Rows + Cols; /* cl=rows+cols*/
1796 if (Banks == 8)
1797 byte -= 2; /* 3 banks - 5 */
1798 else
1799 byte -= 3; /* 2 banks - 5 */
1800 /* mask size (64-bit rank only) */
1801
1802 if (pDCTstat->Status & (1 << SB_128bitmode))
1803 byte++; /* double mask size if in 128-bit mode*/
1804
1805 csMask |= 1 << byte;
1806 csMask--;
1807
1808 /*set ChipSelect population indicator even bits*/
1809 pDCTstat->CSPresent |= (1<<ChipSel);
1810 if (Ranks >= 2)
1811 /*set ChipSelect population indicator odd bits*/
1812 pDCTstat->CSPresent |= 1 << (ChipSel + 1);
1813
1814 reg = 0x60+(ChipSel<<1) + reg_off; /*Dram CS Mask Register */
1815 val = csMask;
1816 val &= 0x1FF83FE0; /* Mask out reserved bits.*/
1817 Set_NB32(dev, reg, val);
1818 }
1819 } else {
1820 if (pDCTstat->DIMMSPDCSE & (1<<ChipSel))
1821 pDCTstat->CSTestFail |= (1<<ChipSel);
1822 } /* if DIMMValid*/
1823 } /* while ChipSel*/
1824
1825 SetCSTriState(pMCTstat, pDCTstat, dct);
1826 /* SetCKETriState */
1827 SetODTTriState(pMCTstat, pDCTstat, dct);
1828
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00001829 if (pDCTstat->Status & (1 << SB_128bitmode)) {
Marc Jones8ae8c882007-12-19 01:32:08 +00001830 SetCSTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1831 SetODTTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1832 }
1833 word = pDCTstat->CSPresent;
1834 mctGetCS_ExcludeMap(); /* mask out specified chip-selects */
1835 word ^= pDCTstat->CSPresent;
1836 pDCTstat->CSTestFail |= word; /* enable ODT to disabled DIMMs */
1837 if (!pDCTstat->CSPresent)
1838 pDCTstat->ErrCode = SC_StopError;
1839
1840 reg = 0x80 + reg_off; /* Bank Addressing Register */
1841 Set_NB32(dev, reg, BankAddrReg);
1842
1843// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1844
1845 print_tx("SPDSetBanks: Status ", pDCTstat->Status);
1846 print_tx("SPDSetBanks: ErrStatus ", pDCTstat->ErrStatus);
1847 print_tx("SPDSetBanks: ErrCode ", pDCTstat->ErrCode);
1848 print_t("SPDSetBanks: Done\n");
1849}
1850
1851
1852static void SPDCalcWidth_D(struct MCTStatStruc *pMCTstat,
1853 struct DCTStatStruc *pDCTstat)
1854{
1855 /* Per SPDs, check the symmetry of DIMM pairs (DIMM on Channel A
1856 * matching with DIMM on Channel B), the overall DIMM population,
1857 * and determine the width mode: 64-bit, 64-bit muxed, 128-bit.
1858 */
1859
1860 u8 i;
1861 u8 smbaddr, smbaddr1;
1862 u8 byte, byte1;
1863
1864 /* Check Symmetry of Channel A and Channel B DIMMs
1865 (must be matched for 128-bit mode).*/
1866 for (i=0; i < MAX_DIMMS_SUPPORTED; i += 2) {
1867 if ((pDCTstat->DIMMValid & (1 << i)) && (pDCTstat->DIMMValid & (1<<(i+1)))) {
1868 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
1869 smbaddr1 = Get_DIMMAddress_D(pDCTstat, i+1);
1870
1871 byte = mctRead_SPD(smbaddr, SPD_ROWSZ) & 0x1f;
1872 byte1 = mctRead_SPD(smbaddr1, SPD_ROWSZ) & 0x1f;
1873 if (byte != byte1) {
1874 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1875 break;
1876 }
1877
1878 byte = mctRead_SPD(smbaddr, SPD_COLSZ) & 0x1f;
1879 byte1 = mctRead_SPD(smbaddr1, SPD_COLSZ) & 0x1f;
1880 if (byte != byte1) {
1881 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1882 break;
1883 }
1884
1885 byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
1886 byte1 = mctRead_SPD(smbaddr1, SPD_BANKSZ);
1887 if (byte != byte1) {
1888 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1889 break;
1890 }
1891
1892 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0x7f;
1893 byte1 = mctRead_SPD(smbaddr1, SPD_DEVWIDTH) & 0x7f;
1894 if (byte != byte1) {
1895 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1896 break;
1897 }
1898
1899 byte = mctRead_SPD(smbaddr, SPD_DMBANKS) & 7; /* #ranks-1 */
1900 byte1 = mctRead_SPD(smbaddr1, SPD_DMBANKS) & 7; /* #ranks-1 */
1901 if (byte != byte1) {
1902 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1903 break;
1904 }
1905
1906 }
1907 }
1908
1909}
1910
1911
1912static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
1913 struct DCTStatStruc *pDCTstat, u8 dct)
1914{
1915 /* Requires that Mask values for each bank be programmed first and that
1916 * the chip-select population indicator is correctly set.
1917 */
1918
1919 u8 b = 0;
1920 u32 nxtcsBase, curcsBase;
1921 u8 p, q;
1922 u32 Sizeq, BiggestBank;
1923 u8 _DSpareEn;
1924
1925 u16 word;
1926 u32 dev;
1927 u32 reg;
1928 u32 reg_off;
1929 u32 val;
1930
1931
1932 dev = pDCTstat->dev_dct;
1933 reg_off = 0x100 * dct;
1934
1935 _DSpareEn = 0;
1936
1937 /* CS Sparing 1=enabled, 0=disabled */
1938 if (mctGet_NVbits(NV_CS_SpareCTL) & 1) {
1939 if (MCT_DIMM_SPARE_NO_WARM) {
1940 /* Do no warm-reset DIMM spare */
1941 if (pMCTstat->GStatus & 1 << GSB_EnDIMMSpareNW) {
1942 word = pDCTstat->CSPresent;
1943 val = bsf(word);
1944 word &= ~(1<<val);
1945 if (word)
1946 /* Make sure at least two chip-selects are available */
1947 _DSpareEn = 1;
1948 else
1949 pDCTstat->ErrStatus |= 1 << SB_SpareDis;
1950 }
1951 } else {
1952 if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1=enabled, 0=disabled */
1953 word = pDCTstat->CSPresent;
1954 val = bsf(word);
1955 word &= ~(1 << val);
1956 if (word)
1957 /* Make sure at least two chip-selects are available */
1958 _DSpareEn = 1;
1959 else
1960 pDCTstat->ErrStatus |= 1 << SB_SpareDis;
1961 }
1962 }
1963 }
1964
1965 nxtcsBase = 0; /* Next available cs base ADDR[39:8] */
1966 for (p=0; p < MAX_DIMMS_SUPPORTED; p++) {
1967 BiggestBank = 0;
1968 for (q = 0; q < MAX_CS_SUPPORTED; q++) { /* from DIMMS to CS */
1969 if (pDCTstat->CSPresent & (1 << q)) { /* bank present? */
1970 reg = 0x40 + (q << 2) + reg_off; /* Base[q] reg.*/
1971 val = Get_NB32(dev, reg);
1972 if (!(val & 3)) { /* (CSEnable|Spare==1)bank is enabled already? */
1973 reg = 0x60 + (q << 1) + reg_off; /*Mask[q] reg.*/
1974 val = Get_NB32(dev, reg);
1975 val >>= 19;
1976 val++;
1977 val <<= 19;
1978 Sizeq = val; //never used
1979 if (val > BiggestBank) {
1980 /*Bingo! possibly Map this chip-select next! */
1981 BiggestBank = val;
1982 b = q;
1983 }
1984 }
1985 } /*if bank present */
1986 } /* while q */
1987 if (BiggestBank !=0) {
1988 curcsBase = nxtcsBase; /* curcsBase=nxtcsBase*/
1989 /* DRAM CS Base b Address Register offset */
1990 reg = 0x40 + (b << 2) + reg_off;
1991 if (_DSpareEn) {
1992 BiggestBank = 0;
1993 val = 1 << Spare; /* Spare Enable*/
1994 } else {
1995 val = curcsBase;
1996 val |= 1 << CSEnable; /* Bank Enable */
1997 }
1998 Set_NB32(dev, reg, val);
1999 if (_DSpareEn)
2000 _DSpareEn = 0;
2001 else
2002 /* let nxtcsBase+=Size[b] */
2003 nxtcsBase += BiggestBank;
2004 }
2005
2006 /* bank present but disabled?*/
2007 if ( pDCTstat->CSTestFail & (1 << p)) {
2008 /* DRAM CS Base b Address Register offset */
2009 reg = (p << 2) + 0x40 + reg_off;
2010 val = 1 << TestFail;
2011 Set_NB32(dev, reg, val);
2012 }
2013 }
2014
2015 if (nxtcsBase) {
2016 pDCTstat->DCTSysLimit = nxtcsBase - 1;
2017 mct_AfterStitchMemory(pMCTstat, pDCTstat, dct);
2018 }
2019
2020// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
2021
2022 print_tx("StitchMemory: Status ", pDCTstat->Status);
2023 print_tx("StitchMemory: ErrStatus ", pDCTstat->ErrStatus);
2024 print_tx("StitchMemory: ErrCode ", pDCTstat->ErrCode);
2025 print_t("StitchMemory: Done\n");
2026}
2027
2028
2029static u8 Get_Tk_D(u8 k)
2030{
2031 return Table_T_k[k];
2032}
2033
2034
2035static u8 Get_CLj_D(u8 j)
2036{
2037 return Table_CL2_j[j];
2038}
2039
2040static u8 Get_DefTrc_k_D(u8 k)
2041{
2042 return Tab_defTrc_k[k];
2043}
2044
2045
2046static u16 Get_40Tk_D(u8 k)
2047{
2048 return Tab_40T_k[k]; /* FIXME: k or k<<1 ?*/
2049}
2050
2051
2052static u16 Get_Fk_D(u8 k)
2053{
2054 return Table_F_k[k]; /* FIXME: k or k<<1 ? */
2055}
2056
2057
2058static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat,
2059 u8 i, u8 j, u8 k)
2060{
2061 u8 Tk, CLj, CL_i;
2062 u8 ret = 0;
2063
2064 u32 DIMMi;
2065 u8 byte;
2066 u16 word, wordx;
2067
2068 DIMMi = Get_DIMMAddress_D(pDCTstat, i);
2069
2070 CLj = Get_CLj_D(j);
2071
2072 /* check if DIMMi supports CLj */
2073 CL_i = mctRead_SPD(DIMMi, SPD_CASLAT);
2074 byte = CL_i & CLj;
2075 if (byte) {
2076 /*find out if its CL X, CLX-1, or CLX-2 */
2077 word = bsr(byte); /* bit position of CLj */
2078 wordx = bsr(CL_i); /* bit position of CLX of CLi */
2079 wordx -= word; /* CL number (CL no. = 0,1, 2, or 3) */
2080 wordx <<= 3; /* 8 bits per SPD byte index */
2081 /*get T from SPD byte 9, 23, 25*/
2082 word = (EncodedTSPD >> wordx) & 0xFF;
2083 Tk = Get_Tk_D(k);
2084 byte = mctRead_SPD(DIMMi, word); /* DIMMi speed */
2085 if (Tk < byte) {
2086 ret = 1;
2087 } else if (byte == 0){
2088 pDCTstat->ErrStatus |= 1<<SB_NoCycTime;
2089 ret = 1;
2090 } else {
2091 ret = 0; /* DIMM is capable! */
2092 }
2093 } else {
2094 ret = 1;
2095 }
2096 return ret;
2097}
2098
2099
2100static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
2101 struct DCTStatStruc *pDCTstat)
2102{
2103 /* Check DIMMs present, verify checksum, flag SDRAM type,
2104 * build population indicator bitmaps, and preload bus loading
2105 * of DIMMs into DCTStatStruc.
2106 * MAAload=number of devices on the "A" bus.
2107 * MABload=number of devices on the "B" bus.
2108 * MAAdimms=number of DIMMs on the "A" bus slots.
2109 * MABdimms=number of DIMMs on the "B" bus slots.
2110 * DATAAload=number of ranks on the "A" bus slots.
2111 * DATABload=number of ranks on the "B" bus slots.
2112 */
2113
2114 u16 i, j;
2115 u8 smbaddr, Index;
2116 u16 Checksum;
2117 u8 SPDCtrl;
2118 u16 RegDIMMPresent, MaxDimms;
2119 u8 devwidth;
2120 u16 DimmSlots;
2121 u8 byte = 0, bytex;
2122 u16 word;
2123
2124 /* preload data structure with addrs */
2125 mctGet_DIMMAddr(pDCTstat, pDCTstat->Node_ID);
2126
2127 DimmSlots = MaxDimms = mctGet_NVbits(NV_MAX_DIMMS);
2128
2129 SPDCtrl = mctGet_NVbits(NV_SPDCHK_RESTRT);
2130
2131 RegDIMMPresent = 0;
2132 pDCTstat->DimmQRPresent = 0;
2133
2134 for (i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
2135 if (i >= MaxDimms)
2136 break;
2137
2138 if ((pDCTstat->DimmQRPresent & (1 << i)) || (i < DimmSlots)) {
2139 print_tx("\t DIMMPresence: i=", i);
2140 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
2141 print_tx("\t DIMMPresence: smbaddr=", smbaddr);
2142 if (smbaddr) {
2143 Checksum = 0;
2144 for (Index=0; Index < 64; Index++){
2145 int status;
2146 status = mctRead_SPD(smbaddr, Index);
2147 if (status < 0)
2148 break;
2149 byte = status & 0xFF;
2150 if (Index < 63)
2151 Checksum += byte;
2152 }
2153
2154 if (Index == 64) {
2155 pDCTstat->DIMMPresent |= 1 << i;
2156 if ((Checksum & 0xFF) == byte) {
2157 byte = mctRead_SPD(smbaddr, SPD_TYPE);
2158 if (byte == JED_DDR2SDRAM) {
2159 /*Dimm is 'Present'*/
2160 pDCTstat->DIMMValid |= 1 << i;
2161 }
2162 } else {
2163 pDCTstat->DIMMSPDCSE = 1 << i;
2164 if (SPDCtrl == 0) {
2165 pDCTstat->ErrStatus |= 1 << SB_DIMMChkSum;
2166 pDCTstat->ErrCode = SC_StopError;
2167 } else {
2168 /*if NV_SPDCHK_RESTRT is set to 1, ignore faulty SPD checksum*/
2169 pDCTstat->ErrStatus |= 1<<SB_DIMMChkSum;
2170 byte = mctRead_SPD(smbaddr, SPD_TYPE);
2171 if (byte == JED_DDR2SDRAM)
2172 pDCTstat->DIMMValid |= 1 << i;
2173 }
2174 }
2175 /* Check module type */
2176 byte = mctRead_SPD(smbaddr, SPD_DIMMTYPE);
2177 if (byte & JED_REGADCMSK)
2178 RegDIMMPresent |= 1 << i;
2179 /* Check ECC capable */
2180 byte = mctRead_SPD(smbaddr, SPD_EDCTYPE);
2181 if (byte & JED_ECC) {
2182 /* DIMM is ECC capable */
2183 pDCTstat->DimmECCPresent |= 1 << i;
2184 }
2185 if (byte & JED_ADRCPAR) {
2186 /* DIMM is ECC capable */
2187 pDCTstat->DimmPARPresent |= 1 << i;
2188 }
2189 /* Check if x4 device */
2190 devwidth = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE;
2191 if (devwidth == 4) {
2192 /* DIMM is made with x4 or x16 drams */
2193 pDCTstat->Dimmx4Present |= 1 << i;
2194 } else if (devwidth == 8) {
2195 pDCTstat->Dimmx8Present |= 1 << i;
2196 } else if (devwidth == 16) {
2197 pDCTstat->Dimmx16Present |= 1 << i;
2198 }
2199 /* check page size */
2200 byte = mctRead_SPD(smbaddr, SPD_COLSZ);
2201 byte &= 0x0F;
2202 word = 1 << byte;
2203 word >>= 3;
2204 word *= devwidth; /* (((2^COLBITS) / 8) * ORG) / 2048 */
2205 word >>= 11;
2206 if (word)
2207 pDCTstat->DIMM2Kpage |= 1 << i;
2208
2209 /*Check if SPD diag bit 'analysis probe installed' is set */
2210 byte = mctRead_SPD(smbaddr, SPD_ATTRIB);
2211 if ( byte & JED_PROBEMSK )
2212 pDCTstat->Status |= 1<<SB_DiagClks;
2213
2214 byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
2215 if (!(byte & (1<< SPDPLBit)))
2216 pDCTstat->DimmPlPresent |= 1 << i;
2217 byte &= 7;
2218 byte++; /* ranks */
2219 if (byte > 2) {
2220 /* if any DIMMs are QR, we have to make two passes through DIMMs*/
2221 if ( pDCTstat->DimmQRPresent == 0) {
2222 MaxDimms <<= 1;
2223 }
2224 if (i < DimmSlots) {
2225 pDCTstat->DimmQRPresent |= (1 << i) | (1 << (i+4));
2226 }
2227 byte = 2; /* upper two ranks of QR DIMM will be counted on another DIMM number iteration*/
2228 } else if (byte == 2) {
2229 pDCTstat->DimmDRPresent |= 1 << i;
2230 }
2231 bytex = devwidth;
2232 if (devwidth == 16)
2233 bytex = 4;
2234 else if (devwidth == 4)
2235 bytex=16;
2236
2237 if (byte == 2)
2238 bytex <<= 1; /*double Addr bus load value for dual rank DIMMs*/
2239
2240 j = i & (1<<0);
2241 pDCTstat->DATAload[j] += byte; /*number of ranks on DATA bus*/
2242 pDCTstat->MAload[j] += bytex; /*number of devices on CMD/ADDR bus*/
2243 pDCTstat->MAdimms[j]++; /*number of DIMMs on A bus */
2244 /*check for DRAM package Year <= 06*/
2245 byte = mctRead_SPD(smbaddr, SPD_MANDATEYR);
2246 if (byte < MYEAR06) {
2247 /*Year < 06 and hence Week < 24 of 06 */
2248 pDCTstat->DimmYr06 |= 1 << i;
2249 pDCTstat->DimmWk2406 |= 1 << i;
2250 } else if (byte == MYEAR06) {
2251 /*Year = 06, check if Week <= 24 */
2252 pDCTstat->DimmYr06 |= 1 << i;
2253 byte = mctRead_SPD(smbaddr, SPD_MANDATEWK);
2254 if (byte <= MWEEK24)
2255 pDCTstat->DimmWk2406 |= 1 << i;
2256 }
2257 }
2258 }
2259 }
2260 }
2261 print_tx("\t DIMMPresence: DIMMValid=", pDCTstat->DIMMValid);
2262 print_tx("\t DIMMPresence: DIMMPresent=", pDCTstat->DIMMPresent);
2263 print_tx("\t DIMMPresence: RegDIMMPresent=", RegDIMMPresent);
2264 print_tx("\t DIMMPresence: DimmECCPresent=", pDCTstat->DimmECCPresent);
2265 print_tx("\t DIMMPresence: DimmPARPresent=", pDCTstat->DimmPARPresent);
2266 print_tx("\t DIMMPresence: Dimmx4Present=", pDCTstat->Dimmx4Present);
2267 print_tx("\t DIMMPresence: Dimmx8Present=", pDCTstat->Dimmx8Present);
2268 print_tx("\t DIMMPresence: Dimmx16Present=", pDCTstat->Dimmx16Present);
2269 print_tx("\t DIMMPresence: DimmPlPresent=", pDCTstat->DimmPlPresent);
2270 print_tx("\t DIMMPresence: DimmDRPresent=", pDCTstat->DimmDRPresent);
2271 print_tx("\t DIMMPresence: DimmQRPresent=", pDCTstat->DimmQRPresent);
2272 print_tx("\t DIMMPresence: DATAload[0]=", pDCTstat->DATAload[0]);
2273 print_tx("\t DIMMPresence: MAload[0]=", pDCTstat->MAload[0]);
2274 print_tx("\t DIMMPresence: MAdimms[0]=", pDCTstat->MAdimms[0]);
2275 print_tx("\t DIMMPresence: DATAload[1]=", pDCTstat->DATAload[1]);
2276 print_tx("\t DIMMPresence: MAload[1]=", pDCTstat->MAload[1]);
2277 print_tx("\t DIMMPresence: MAdimms[1]=", pDCTstat->MAdimms[1]);
2278
2279 if (pDCTstat->DIMMValid != 0) { /* If any DIMMs are present...*/
2280 if (RegDIMMPresent != 0) {
2281 if ((RegDIMMPresent ^ pDCTstat->DIMMValid) !=0) {
2282 /* module type DIMM mismatch (reg'ed, unbuffered) */
2283 pDCTstat->ErrStatus |= 1<<SB_DimmMismatchM;
2284 pDCTstat->ErrCode = SC_StopError;
2285 } else{
2286 /* all DIMMs are registered */
2287 pDCTstat->Status |= 1<<SB_Registered;
2288 }
2289 }
2290 if (pDCTstat->DimmECCPresent != 0) {
2291 if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid )== 0) {
2292 /* all DIMMs are ECC capable */
2293 pDCTstat->Status |= 1<<SB_ECCDIMMs;
2294 }
2295 }
2296 if (pDCTstat->DimmPARPresent != 0) {
2297 if ((pDCTstat->DimmPARPresent ^ pDCTstat->DIMMValid) == 0) {
2298 /*all DIMMs are Parity capable */
2299 pDCTstat->Status |= 1<<SB_PARDIMMs;
2300 }
2301 }
2302 } else {
2303 /* no DIMMs present or no DIMMs that qualified. */
2304 pDCTstat->ErrStatus |= 1<<SB_NoDimms;
2305 pDCTstat->ErrCode = SC_StopError;
2306 }
2307
2308 print_tx("\t DIMMPresence: Status ", pDCTstat->Status);
2309 print_tx("\t DIMMPresence: ErrStatus ", pDCTstat->ErrStatus);
2310 print_tx("\t DIMMPresence: ErrCode ", pDCTstat->ErrCode);
2311 print_t("\t DIMMPresence: Done\n");
2312
2313 mctHookAfterDIMMpre();
2314
2315 return pDCTstat->ErrCode;
2316}
2317
2318
2319static u8 Sys_Capability_D(struct MCTStatStruc *pMCTstat,
2320 struct DCTStatStruc *pDCTstat, int j, int k)
2321{
2322 /* Determine if system is capable of operating at given input
2323 * parameters for CL, and T. There are three components to
2324 * determining "maximum frequency" in AUTO mode: SPD component,
2325 * Bus load component, and "Preset" max frequency component.
2326 * This procedure is used to help find the SPD component and relies
2327 * on pre-determination of the bus load component and the Preset
2328 * components. The generalized algorithm for finding maximum
2329 * frequency is structured this way so as to optimize for CAS
2330 * latency (which might get better as a result of reduced frequency).
2331 * See "Global relationship between index values and item values"
2332 * for definition of CAS latency index (j) and Frequency index (k).
2333 */
2334 u8 freqOK, ClOK;
2335 u8 ret = 0;
2336
2337 if (Get_Fk_D(k) > pDCTstat->PresetmaxFreq)
2338 freqOK = 0;
2339 else
2340 freqOK = 1;
2341
2342 /* compare proposed CAS latency with AMD Si capabilities */
2343 if ((j < J_MIN) || (j > J_MAX))
2344 ClOK = 0;
2345 else
2346 ClOK = 1;
2347
2348 if (freqOK && ClOK)
2349 ret = 1;
2350
2351 return ret;
2352}
2353
2354
2355static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i)
2356{
2357 u8 *p;
2358
2359 p = pDCTstat->DIMMAddr;
2360 //mct_BeforeGetDIMMAddress();
2361 return p[i];
2362}
2363
2364
2365static void mct_initDCT(struct MCTStatStruc *pMCTstat,
2366 struct DCTStatStruc *pDCTstat)
2367{
2368 u32 val;
2369 u8 err_code;
2370
2371 /* Config. DCT0 for Ganged or unganged mode */
2372 print_t("\tmct_initDCT: DCTInit_D 0\n");
2373 DCTInit_D(pMCTstat, pDCTstat, 0);
2374 if (pDCTstat->ErrCode == SC_FatalErr) {
2375 // Do nothing goto exitDCTInit; /* any fatal errors? */
2376 } else {
2377 /* Configure DCT1 if unganged and enabled*/
2378 if (!pDCTstat->GangedMode) {
2379 if ( pDCTstat->DIMMValidDCT[1] > 0) {
2380 print_t("\tmct_initDCT: DCTInit_D 1\n");
2381 err_code = pDCTstat->ErrCode; /* save DCT0 errors */
2382 pDCTstat->ErrCode = 0;
2383 DCTInit_D(pMCTstat, pDCTstat, 1);
2384 if (pDCTstat->ErrCode == 2) /* DCT1 is not Running */
2385 pDCTstat->ErrCode = err_code; /* Using DCT0 Error code to update pDCTstat.ErrCode */
2386 } else {
2387 val = 1 << DisDramInterface;
2388 Set_NB32(pDCTstat->dev_dct, 0x100 + 0x94, val);
2389 }
2390 }
2391 }
2392// exitDCTInit:
2393}
2394
2395
2396static void mct_DramInit(struct MCTStatStruc *pMCTstat,
2397 struct DCTStatStruc *pDCTstat, u8 dct)
2398{
2399 u32 val;
2400
2401 mct_BeforeDramInit_Prod_D(pMCTstat, pDCTstat);
2402 // FIXME: for rev A: mct_BeforeDramInit_D(pDCTstat, dct);
2403
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002404 /* Disable auto refresh before Dram init when in ganged mode (Erratum 278) */
2405 if (pDCTstat->LogicalCPUID & AMD_DR_LT_B2) {
2406 if (pDCTstat->GangedMode) {
2407 val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
2408 val |= 1 << DisAutoRefresh;
2409 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
2410 }
Marc Jones8ae8c882007-12-19 01:32:08 +00002411 }
2412
2413 mct_DramInit_Hw_D(pMCTstat, pDCTstat, dct);
2414
2415 /* Re-enable auto refresh after Dram init when in ganged mode
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002416 * to ensure both DCTs are in sync (Erratum 278)
Marc Jones8ae8c882007-12-19 01:32:08 +00002417 */
2418
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002419 if (pDCTstat->LogicalCPUID & AMD_DR_LT_B2) {
2420 if (pDCTstat->GangedMode) {
2421 do {
2422 val = Get_NB32(pDCTstat->dev_dct, 0x90 + (0x100 * dct));
2423 } while (!(val & (1 << InitDram)));
Marc Jones8ae8c882007-12-19 01:32:08 +00002424
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002425 WaitRoutine_D(50);
Marc Jones8ae8c882007-12-19 01:32:08 +00002426
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002427 val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
2428 val &= ~(1 << DisAutoRefresh);
2429 val |= 1 << DisAutoRefresh;
2430 val &= ~(1 << DisAutoRefresh);
2431 }
Marc Jones8ae8c882007-12-19 01:32:08 +00002432 }
2433}
2434
2435
2436static u8 mct_setMode(struct MCTStatStruc *pMCTstat,
2437 struct DCTStatStruc *pDCTstat)
2438{
2439 u8 byte;
2440 u8 bytex;
2441 u32 val;
2442 u32 reg;
2443
2444 byte = bytex = pDCTstat->DIMMValid;
2445 bytex &= 0x55; /* CHA DIMM pop */
2446 pDCTstat->DIMMValidDCT[0] = bytex;
2447
2448 byte &= 0xAA; /* CHB DIMM popa */
2449 byte >>= 1;
2450 pDCTstat->DIMMValidDCT[1] = byte;
2451
2452 if (byte != bytex) {
2453 pDCTstat->ErrStatus &= ~(1 << SB_DimmMismatchO);
2454 } else {
2455 if ( mctGet_NVbits(NV_Unganged) )
2456 pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
2457
2458 if (!(pDCTstat->ErrStatus & (1 << SB_DimmMismatchO))) {
2459 pDCTstat->GangedMode = 1;
2460 /* valid 128-bit mode population. */
2461 pDCTstat->Status |= 1 << SB_128bitmode;
2462 reg = 0x110;
2463 val = Get_NB32(pDCTstat->dev_dct, reg);
2464 val |= 1 << DctGangEn;
2465 Set_NB32(pDCTstat->dev_dct, reg, val);
2466 print_tx("setMode: DRAM Controller Select Low Register = ", val);
2467 }
2468 }
2469 return pDCTstat->ErrCode;
2470}
2471
2472
2473u32 Get_NB32(u32 dev, u32 reg)
2474{
2475 u32 addr;
2476
2477 addr = (dev>>4) | (reg & 0xFF) | ((reg & 0xf00)<<16);
2478 outl((1<<31) | (addr & ~3), 0xcf8);
2479
2480 return inl(0xcfc);
2481}
2482
2483
2484void Set_NB32(u32 dev, u32 reg, u32 val)
2485{
2486 u32 addr;
2487
2488 addr = (dev>>4) | (reg & 0xFF) | ((reg & 0xf00)<<16);
2489 outl((1<<31) | (addr & ~3), 0xcf8);
2490 outl(val, 0xcfc);
2491}
2492
2493
2494u32 Get_NB32_index(u32 dev, u32 index_reg, u32 index)
2495{
2496 u32 dword;
2497
2498 Set_NB32(dev, index_reg, index);
2499 dword = Get_NB32(dev, index_reg+0x4);
2500
2501 return dword;
2502}
2503
2504void Set_NB32_index(u32 dev, u32 index_reg, u32 index, u32 data)
2505{
2506 Set_NB32(dev, index_reg, index);
2507 Set_NB32(dev, index_reg + 0x4, data);
2508}
2509
2510
2511u32 Get_NB32_index_wait(u32 dev, u32 index_reg, u32 index)
2512{
2513
2514 u32 dword;
2515
2516
2517 index &= ~(1 << DctAccessWrite);
2518 Set_NB32(dev, index_reg, index);
2519 do {
2520 dword = Get_NB32(dev, index_reg);
2521 } while (!(dword & (1 << DctAccessDone)));
2522 dword = Get_NB32(dev, index_reg + 0x4);
2523
2524 return dword;
2525}
2526
2527
2528void Set_NB32_index_wait(u32 dev, u32 index_reg, u32 index, u32 data)
2529{
2530 u32 dword;
2531
2532
2533 Set_NB32(dev, index_reg + 0x4, data);
2534 index |= (1 << DctAccessWrite);
2535 Set_NB32(dev, index_reg, index);
2536 do {
2537 dword = Get_NB32(dev, index_reg);
2538 } while (!(dword & (1 << DctAccessDone)));
2539
2540}
2541
2542
2543static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
2544 struct DCTStatStruc *pDCTstat, u8 dct)
2545{
2546 /* Get platform specific config/timing values from the interface layer
2547 * and program them into DCT.
2548 */
2549
2550 u32 dev = pDCTstat->dev_dct;
2551 u32 index_reg;
2552 u8 i, i_start, i_end;
2553
2554 if (pDCTstat->GangedMode) {
2555 SyncSetting(pDCTstat);
2556 i_start = 0;
2557 i_end = 2;
2558 } else {
2559 i_start = dct;
2560 i_end = dct + 1;
2561 }
2562 for (i=i_start; i<i_end; i++) {
2563 index_reg = 0x98 + (i * 0x100);
2564 Set_NB32_index_wait(dev, index_reg, 0x00, pDCTstat->CH_ODC_CTL[i]); /* Channel A Output Driver Compensation Control */
2565 Set_NB32_index_wait(dev, index_reg, 0x04, pDCTstat->CH_ADDR_TMG[i]); /* Channel A Output Driver Compensation Control */
2566 }
2567
2568 return pDCTstat->ErrCode;
2569
2570}
2571
2572
2573static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat)
2574{
2575 u32 dev;
2576 u32 val;
2577
2578 if (pDCTstat->NodePresent) {
2579 print_tx("mct_SyncDCTsReady: Node ", pDCTstat->Node_ID);
2580 dev = pDCTstat->dev_dct;
2581
2582 if ((pDCTstat->DIMMValidDCT[0] ) || (pDCTstat->DIMMValidDCT[1])) { /* This Node has dram */
2583 do {
2584 val = Get_NB32(dev, 0x110);
2585 } while (!(val & (1 << DramEnabled)));
2586 print_t("mct_SyncDCTsReady: DramEnabled\n");
2587 }
2588 } /* Node is present */
2589}
2590
2591
2592static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
2593 struct DCTStatStruc *pDCTstat, u8 dct)
2594{
2595 if (!pDCTstat->GangedMode) {
2596 if (dct == 0 ) {
2597 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
2598 if (pDCTstat->DIMMValidDCT[dct] == 0)
2599 pDCTstat->ErrCode = SC_StopError;
2600 } else {
2601 pDCTstat->CSPresent = 0;
2602 pDCTstat->CSTestFail = 0;
2603 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
2604 if (pDCTstat->DIMMValidDCT[dct] == 0)
2605 pDCTstat->ErrCode = SC_StopError;
2606 }
2607 }
2608}
2609
2610static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,
2611 struct DCTStatStruc *pDCTstat, u8 dct)
2612{
2613 u8 ret;
2614
2615 if ( dct == 0) {
2616 SPDCalcWidth_D(pMCTstat, pDCTstat);
2617 ret = mct_setMode(pMCTstat, pDCTstat);
2618 } else {
2619 ret = pDCTstat->ErrCode;
2620 }
2621
2622 print_tx("SPDCalcWidth: Status ", pDCTstat->Status);
2623 print_tx("SPDCalcWidth: ErrStatus ", pDCTstat->ErrStatus);
2624 print_tx("SPDCalcWidth: ErrCode ", pDCTstat->ErrCode);
2625 print_t("SPDCalcWidth: Done\n");
2626
2627 return ret;
2628}
2629
2630
2631static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
2632 struct DCTStatStruc *pDCTstat, u8 dct)
2633{
2634 u32 val;
2635 u32 dword;
2636 u32 dev;
2637 u32 reg;
2638 u8 _MemHoleRemap;
2639 u32 DramHoleBase;
2640
2641 _MemHoleRemap = mctGet_NVbits(NV_MemHole);
2642 DramHoleBase = mctGet_NVbits(NV_BottomIO);
2643 DramHoleBase <<= 8;
2644 /* Increase hole size so;[31:24]to[31:16]
2645 * it has granularity of 128MB shl eax,8
2646 * Set 'effective' bottom IOmov DramHoleBase,eax
2647 */
2648 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2649
2650 /* In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
2651 if (!pDCTstat->GangedMode) {
2652 dev = pDCTstat->dev_dct;
2653 pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002654 /* if DCT0 and DCT1 both exist, set DctSelBaseAddr[47:27] to the top of DCT0 */
Marc Jones8ae8c882007-12-19 01:32:08 +00002655 if (dct == 0) {
2656 if (pDCTstat->DIMMValidDCT[1] > 0) {
2657 dword = pDCTstat->DCTSysLimit + 1;
2658 dword += pDCTstat->NodeSysBase;
2659 dword >>= 8; /* scale [39:8] to [47:27],and to F2x110[31:11] */
2660 if ((dword >= DramHoleBase) && _MemHoleRemap) {
2661 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2662 val = pMCTstat->HoleBase;
2663 val >>= 16;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002664 val = (((~val) & 0xFF) + 1);
Marc Jones8ae8c882007-12-19 01:32:08 +00002665 val <<= 8;
2666 dword += val;
2667 }
2668 reg = 0x110;
2669 val = Get_NB32(dev, reg);
2670 val &= 0x7F;
2671 val |= dword;
2672 val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
2673 Set_NB32(dev, reg, val);
2674 print_tx("AfterStitch DCT0 and DCT1: DRAM Controller Select Low Register = ", val);
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002675 print_tx("AfterStitch DCT0 and DCT1: DRAM Controller Select High Register = ", dword);
Marc Jones8ae8c882007-12-19 01:32:08 +00002676
2677 reg = 0x114;
2678 val = dword;
2679 Set_NB32(dev, reg, val);
2680 }
2681 } else {
2682 /* Program the DctSelBaseAddr value to 0
2683 if DCT 0 is disabled */
2684 if (pDCTstat->DIMMValidDCT[0] == 0) {
2685 dword = pDCTstat->NodeSysBase;
2686 dword >>= 8;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002687 if ((dword >= DramHoleBase) && _MemHoleRemap) {
Marc Jones8ae8c882007-12-19 01:32:08 +00002688 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2689 val = pMCTstat->HoleBase;
2690 val >>= 8;
2691 val &= ~(0xFFFF);
2692 val |= (((~val) & 0xFFFF) + 1);
2693 dword += val;
2694 }
2695 reg = 0x114;
2696 val = dword;
2697 Set_NB32(dev, reg, val);
2698
2699 reg = 0x110;
2700 val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
2701 Set_NB32(dev, reg, val);
2702 print_tx("AfterStitch DCT1 only: DRAM Controller Select Low Register = ", val);
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002703 print_tx("AfterStitch DCT1 only: DRAM Controller Select High Register = ", dword);
Marc Jones8ae8c882007-12-19 01:32:08 +00002704 }
2705 }
2706 } else {
2707 pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
2708 }
2709 print_tx("AfterStitch pDCTstat->NodeSysBase = ", pDCTstat->NodeSysBase);
2710 print_tx("mct_AfterStitchMemory: pDCTstat->NodeSysLimit ", pDCTstat->NodeSysLimit);
2711}
2712
2713
2714static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
2715 struct DCTStatStruc *pDCTstat, u8 dct)
2716{
2717 u8 ret;
2718
2719 if ( dct == 0)
2720 ret = DIMMPresence_D(pMCTstat, pDCTstat);
2721 else
2722 ret = pDCTstat->ErrCode;
2723
2724 return ret;
2725}
2726
2727
2728/* mct_BeforeGetDIMMAddress inline in C */
2729
2730
2731static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
2732 struct DCTStatStruc *pDCTstatA)
2733{
2734 u8 Node;
2735
2736 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
2737 struct DCTStatStruc *pDCTstat;
2738 pDCTstat = pDCTstatA + Node;
2739 if (pDCTstat->NodePresent) {
2740 if (pDCTstat->DIMMValidDCT[0]) {
2741 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[0];
2742 Set_OtherTiming(pMCTstat, pDCTstat, 0);
2743 }
2744 if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode ) {
2745 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[1];
2746 Set_OtherTiming(pMCTstat, pDCTstat, 1);
2747 }
2748 } /* Node is present*/
2749 } /* while Node */
2750}
2751
2752
2753static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
2754 struct DCTStatStruc *pDCTstat, u8 dct)
2755{
2756 u32 reg;
2757 u32 reg_off = 0x100 * dct;
2758 u32 val;
2759 u32 dword;
2760 u32 dev = pDCTstat->dev_dct;
2761
2762 Get_Trdrd(pMCTstat, pDCTstat, dct);
2763 Get_Twrwr(pMCTstat, pDCTstat, dct);
2764 Get_Twrrd(pMCTstat, pDCTstat, dct);
2765 Get_TrwtTO(pMCTstat, pDCTstat, dct);
2766 Get_TrwtWB(pMCTstat, pDCTstat);
2767
2768 reg = 0x8C + reg_off; /* Dram Timing Hi */
2769 val = Get_NB32(dev, reg);
2770 val &= 0xffff0300;
2771 dword = pDCTstat->TrwtTO; //0x07
2772 val |= dword << 4;
2773 dword = pDCTstat->Twrrd; //0x03
2774 val |= dword << 10;
2775 dword = pDCTstat->Twrwr; //0x03
2776 val |= dword << 12;
2777 dword = pDCTstat->Trdrd; //0x03
2778 val |= dword << 14;
2779 dword = pDCTstat->TrwtWB; //0x07
2780 val |= dword;
2781 val = OtherTiming_A_D(pDCTstat, val);
2782 Set_NB32(dev, reg, val);
2783
2784}
2785
2786
2787static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
2788 struct DCTStatStruc *pDCTstat, u8 dct)
2789{
2790 u8 Trdrd;
2791 u8 byte;
2792 u32 dword;
2793 u32 val;
2794 u32 index_reg = 0x98 + 0x100 * dct;
2795 u32 dev = pDCTstat->dev_dct;
2796
2797 if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0)) {
2798 /* mixed (x4 or x8) DIMM types
2799 the largest DqsRcvEnGrossDelay of any DIMM minus the DqsRcvEnGrossDelay
2800 of any other DIMM is equal to the Critical Gross Delay Difference (CGDD) for Trdrd.*/
2801 byte = Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2802 if (byte == 0)
2803 Trdrd = 1;
2804 else
2805 Trdrd = 2;
2806
2807 } else {
2808 /*
2809 Trdrd with non-mixed DIMM types
2810 RdDqsTime are the same for all DIMMs and DqsRcvEn difference between
2811 any two DIMMs is less than half of a MEMCLK, BIOS should program Trdrd to 0000b,
2812 else BIOS should program Trdrd to 0001b.
2813
2814 RdDqsTime are the same for all DIMMs
2815 DDR400~DDR667 only use one set register
2816 DDR800 have two set register for DIMM0 and DIMM1 */
2817 Trdrd = 1;
2818 if (pDCTstat->Speed > 3) {
2819 /* DIMM0+DIMM1 exist */ //NOTE it should be 5
2820 val = bsf(pDCTstat->DIMMValid);
2821 dword = bsr(pDCTstat->DIMMValid);
2822 if (dword != val && dword != 0) {
2823 /* DCT Read DQS Timing Control - DIMM0 - Low */
2824 dword = Get_NB32_index_wait(dev, index_reg, 0x05);
2825 /* DCT Read DQS Timing Control - DIMM1 - Low */
2826 val = Get_NB32_index_wait(dev, index_reg, 0x105);
2827 if (val != dword)
2828 goto Trdrd_1;
2829
2830 /* DCT Read DQS Timing Control - DIMM0 - High */
2831 dword = Get_NB32_index_wait(dev, index_reg, 0x06);
2832 /* DCT Read DQS Timing Control - DIMM1 - High */
2833 val = Get_NB32_index_wait(dev, index_reg, 0x106);
2834 if (val != dword)
2835 goto Trdrd_1;
2836 }
2837 }
2838
2839 /* DqsRcvEn difference between any two DIMMs is
2840 less than half of a MEMCLK */
2841 /* DqsRcvEn byte 1,0*/
2842 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x10))
2843 goto Trdrd_1;
2844 /* DqsRcvEn byte 3,2*/
2845 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x11))
2846 goto Trdrd_1;
2847 /* DqsRcvEn byte 5,4*/
2848 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x20))
2849 goto Trdrd_1;
2850 /* DqsRcvEn byte 7,6*/
2851 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x21))
2852 goto Trdrd_1;
2853 /* DqsRcvEn ECC*/
2854 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x12))
2855 goto Trdrd_1;
2856 Trdrd = 0;
2857 Trdrd_1:
2858 ;
2859 }
2860 pDCTstat->Trdrd = Trdrd;
2861
2862}
2863
2864
2865static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
2866 struct DCTStatStruc *pDCTstat, u8 dct)
2867{
2868 u8 Twrwr = 0;
2869 u32 index_reg = 0x98 + 0x100 * dct;
2870 u32 dev = pDCTstat->dev_dct;
2871 u32 val;
2872 u32 dword;
2873
2874 /* WrDatGrossDlyByte only use one set register when DDR400~DDR667
2875 DDR800 have two set register for DIMM0 and DIMM1 */
2876 if (pDCTstat->Speed > 3) {
2877 val = bsf(pDCTstat->DIMMValid);
2878 dword = bsr(pDCTstat->DIMMValid);
2879 if (dword != val && dword != 0) {
2880 /*the largest WrDatGrossDlyByte of any DIMM minus the
2881 WrDatGrossDlyByte of any other DIMM is equal to CGDD */
2882 val = Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2883 }
2884 if (val == 0)
2885 Twrwr = 2;
2886 else
2887 Twrwr = 3;
2888 }
2889 pDCTstat->Twrwr = Twrwr;
2890}
2891
2892
2893static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
2894 struct DCTStatStruc *pDCTstat, u8 dct)
2895{
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002896 u8 byte, bytex, val;
Marc Jones8ae8c882007-12-19 01:32:08 +00002897 u32 index_reg = 0x98 + 0x100 * dct;
2898 u32 dev = pDCTstat->dev_dct;
2899
2900 /* On any given byte lane, the largest WrDatGrossDlyByte delay of
2901 any DIMM minus the DqsRcvEnGrossDelay delay of any other DIMM is
2902 equal to the Critical Gross Delay Difference (CGDD) for Twrrd.*/
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002903
2904 /* WrDatGrossDlyByte only use one set register when DDR400~DDR667
2905 DDR800 have two set register for DIMM0 and DIMM1 */
2906 if (pDCTstat->Speed > 3) {
2907 val = Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2908 } else {
2909 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 1); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM0 */
2910 pDCTstat->WrDatGrossH = (u8) val; /* low byte = max value */
2911 }
2912
Marc Jones8ae8c882007-12-19 01:32:08 +00002913 Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002914
Marc Jones8ae8c882007-12-19 01:32:08 +00002915 bytex = pDCTstat->DqsRcvEnGrossL;
2916 byte = pDCTstat->WrDatGrossH;
2917 if (byte > bytex) {
2918 byte -= bytex;
2919 if (byte == 1)
2920 bytex = 1;
2921 else
2922 bytex = 2;
2923 } else {
2924 bytex = 0;
2925 }
2926 pDCTstat->Twrrd = bytex;
2927}
2928
2929
2930static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
2931 struct DCTStatStruc *pDCTstat, u8 dct)
2932{
2933 u8 byte, bytex;
2934 u32 index_reg = 0x98 + 0x100 * dct;
2935 u32 dev = pDCTstat->dev_dct;
2936
2937 /* On any given byte lane, the largest WrDatGrossDlyByte delay of
2938 any DIMM minus the DqsRcvEnGrossDelay delay of any other DIMM is
2939 equal to the Critical Gross Delay Difference (CGDD) for TrwtTO. */
2940 Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2941 Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2942 bytex = pDCTstat->DqsRcvEnGrossL;
2943 byte = pDCTstat->WrDatGrossH;
2944 if (bytex > byte) {
2945 bytex -= byte;
2946 if ((bytex == 1) || (bytex == 2))
2947 bytex = 3;
2948 else
2949 bytex = 4;
2950 } else {
2951 byte -= bytex;
2952 if ((byte == 0) || (byte == 1))
2953 bytex = 2;
2954 else
2955 bytex = 1;
2956 }
2957
2958 pDCTstat->TrwtTO = bytex;
2959}
2960
2961
2962static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
2963 struct DCTStatStruc *pDCTstat)
2964{
2965 /* TrwtWB ensures read-to-write data-bus turnaround.
2966 This value should be one more than the programmed TrwtTO.*/
2967 pDCTstat->TrwtWB = pDCTstat->TrwtTO + 1;
2968}
2969
2970
2971static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat,
2972 u8 dct, u32 dev, u32 index_reg,
2973 u32 index)
2974{
2975 u8 Smallest_0, Largest_0, Smallest_1, Largest_1;
2976 u8 i;
2977 u32 val;
2978 u8 byte;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002979 u8 ecc_reg = 0;
Marc Jones8ae8c882007-12-19 01:32:08 +00002980
2981 Smallest_0 = 0xFF;
2982 Smallest_1 = 0xFF;
2983 Largest_0 = 0;
2984 Largest_1 = 0;
2985
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002986 if (index == 0x12)
2987 ecc_reg = 1;
2988
Marc Jones8ae8c882007-12-19 01:32:08 +00002989 for (i=0; i < 8; i+=2) {
2990 if ( pDCTstat->DIMMValid & (1 << i)) {
2991 val = Get_NB32_index_wait(dev, index_reg, index);
2992 byte = val & 0xFF;
2993 if (byte < Smallest_0)
2994 Smallest_0 = byte;
2995 if (byte > Largest_0)
2996 Largest_0 = byte;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002997 if (!(ecc_reg)) {
2998 byte = (val >> 16) & 0xFF;
2999 if (byte < Smallest_1)
3000 Smallest_1 = byte;
3001 if (byte > Largest_1)
3002 Largest_1 = byte;
3003 }
Marc Jones8ae8c882007-12-19 01:32:08 +00003004 }
3005 index += 3;
3006 } /* while ++i */
3007
3008 /* check if total DqsRcvEn delay difference between any
3009 two DIMMs is less than half of a MEMCLK */
3010 if ((Largest_0 - Smallest_0) > 31)
3011 return 1;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003012 if (!(ecc_reg))
3013 if ((Largest_1 - Smallest_1) > 31)
3014 return 1;
Marc Jones8ae8c882007-12-19 01:32:08 +00003015 return 0;
3016}
3017
3018
3019static u8 Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
3020 u32 dev, u32 index_reg)
3021{
3022 u8 Smallest, Largest;
3023 u32 val;
3024 u8 byte, bytex;
3025
3026 /* The largest DqsRcvEnGrossDelay of any DIMM minus the
3027 DqsRcvEnGrossDelay of any other DIMM is equal to the Critical
3028 Gross Delay Difference (CGDD) */
3029 /* DqsRcvEn byte 1,0 */
3030 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x10);
3031 Largest = val & 0xFF;
3032 Smallest = (val >> 8) & 0xFF;
3033
3034 /* DqsRcvEn byte 3,2 */
3035 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x11);
3036 byte = val & 0xFF;
3037 bytex = (val >> 8) & 0xFF;
3038 if (bytex < Smallest)
3039 Smallest = bytex;
3040 if (byte > Largest)
3041 Largest = byte;
3042
3043 /* DqsRcvEn byte 5,4 */
3044 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x20);
3045 byte = val & 0xFF;
3046 bytex = (val >> 8) & 0xFF;
3047 if (bytex < Smallest)
3048 Smallest = bytex;
3049 if (byte > Largest)
3050 Largest = byte;
3051
3052 /* DqsRcvEn byte 7,6 */
3053 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x21);
3054 byte = val & 0xFF;
3055 bytex = (val >> 8) & 0xFF;
3056 if (bytex < Smallest)
3057 Smallest = bytex;
3058 if (byte > Largest)
3059 Largest = byte;
3060
3061 if (pDCTstat->DimmECCPresent> 0) {
3062 /*DqsRcvEn Ecc */
3063 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x12);
3064 byte = val & 0xFF;
3065 bytex = (val >> 8) & 0xFF;
3066 if (bytex < Smallest)
3067 Smallest = bytex;
3068 if (byte > Largest)
3069 Largest = byte;
3070 }
3071
3072 pDCTstat->DqsRcvEnGrossL = Largest;
3073 return Largest - Smallest;
3074}
3075
3076
3077static u8 Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat,
3078 u8 dct, u32 dev, u32 index_reg)
3079{
3080 u8 Smallest, Largest;
3081 u32 val;
3082 u8 byte, bytex;
3083
3084 /* The largest WrDatGrossDlyByte of any DIMM minus the
3085 WrDatGrossDlyByte of any other DIMM is equal to CGDD */
3086 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x01); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM0 */
3087 Largest = val & 0xFF;
3088 Smallest = (val >> 8) & 0xFF;
3089 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x101); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM1 */
3090 byte = val & 0xFF;
3091 bytex = (val >> 8) & 0xFF;
3092 if (bytex < Smallest)
3093 Smallest = bytex;
3094 if (byte > Largest)
3095 Largest = byte;
3096
3097 // FIXME: Add Cx support.
3098
3099 pDCTstat->WrDatGrossH = Largest;
3100 return Largest - Smallest;
3101}
3102
3103static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
3104 u32 dev, u32 index_reg,
3105 u32 index)
3106{
3107 u8 Smallest, Largest;
3108 u8 i;
3109 u8 byte;
3110 u32 val;
3111 u16 word;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003112 u8 ecc_reg = 0;
Marc Jones8ae8c882007-12-19 01:32:08 +00003113
3114 Smallest = 7;
3115 Largest = 0;
3116
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003117 if (index == 0x12)
3118 ecc_reg = 1;
3119
Marc Jones8ae8c882007-12-19 01:32:08 +00003120 for (i=0; i < 8; i+=2) {
3121 if ( pDCTstat->DIMMValid & (1 << i)) {
3122 val = Get_NB32_index_wait(dev, index_reg, index);
3123 val &= 0x00E000E0;
3124 byte = (val >> 5) & 0xFF;
3125 if (byte < Smallest)
3126 Smallest = byte;
3127 if (byte > Largest)
3128 Largest = byte;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003129 if (!(ecc_reg)) {
3130 byte = (val >> (16 + 5)) & 0xFF;
3131 if (byte < Smallest)
3132 Smallest = byte;
3133 if (byte > Largest)
3134 Largest = byte;
3135 }
Marc Jones8ae8c882007-12-19 01:32:08 +00003136 }
3137 index += 3;
3138 } /* while ++i */
3139
3140 word = Smallest;
3141 word <<= 8;
3142 word |= Largest;
3143
3144 return word;
3145}
3146
3147static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat,
3148 u8 dct, u32 dev, u32 index_reg,
3149 u32 index)
3150{
3151 u8 Smallest, Largest;
3152 u8 i, j;
3153 u32 val;
3154 u8 byte;
3155 u16 word;
3156
3157 Smallest = 3;
3158 Largest = 0;
3159 for (i=0; i < 2; i++) {
3160 val = Get_NB32_index_wait(dev, index_reg, index);
3161 val &= 0x60606060;
3162 val >>= 5;
3163 for (j=0; j < 4; j++) {
3164 byte = val & 0xFF;
3165 if (byte < Smallest)
3166 Smallest = byte;
3167 if (byte > Largest)
3168 Largest = byte;
3169 val >>= 8;
3170 } /* while ++j */
3171 index++;
3172 } /*while ++i*/
3173
3174 if (pDCTstat->DimmECCPresent > 0) {
3175 index++;
3176 val = Get_NB32_index_wait(dev, index_reg, index);
3177 val &= 0x00000060;
3178 val >>= 5;
3179 byte = val & 0xFF;
3180 if (byte < Smallest)
3181 Smallest = byte;
3182 if (byte > Largest)
3183 Largest = byte;
3184 }
3185
3186 word = Smallest;
3187 word <<= 8;
3188 word |= Largest;
3189
3190 return word;
3191}
3192
3193
3194
3195static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
3196 struct DCTStatStruc *pDCTstat)
3197{
3198 print_t("\tmct_FinalMCT_D: Clr Cl, Wb\n");
3199
3200
3201 mct_ClrClToNB_D(pMCTstat, pDCTstat);
3202 mct_ClrWbEnhWsbDis_D(pMCTstat, pDCTstat);
3203}
3204
3205
3206static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat)
3207{
3208 print_t("\tmct_InitialMCT_D: Set Cl, Wb\n");
3209 mct_SetClToNB_D(pMCTstat, pDCTstat);
3210 mct_SetWbEnhWsbDis_D(pMCTstat, pDCTstat);
3211}
3212
3213
3214static u32 mct_NodePresent_D(void)
3215{
3216 u32 val;
3217 val = 0x12001022;
3218 return val;
3219}
3220
3221
3222static void mct_init(struct MCTStatStruc *pMCTstat,
3223 struct DCTStatStruc *pDCTstat)
3224{
3225 u32 lo, hi;
3226 u32 addr;
3227
3228 pDCTstat->GangedMode = 0;
3229 pDCTstat->DRPresent = 1;
3230
3231 /* enable extend PCI configuration access */
3232 addr = 0xC001001F;
3233 _RDMSR(addr, &lo, &hi);
3234 if (hi & (1 << (46-32))) {
3235 pDCTstat->Status |= 1 << SB_ExtConfig;
3236 } else {
3237 hi |= 1 << (46-32);
3238 _WRMSR(addr, lo, hi);
3239 }
3240}
3241
3242
3243static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
3244 struct DCTStatStruc *pDCTstat)
3245{
3246 u32 reg;
3247 u32 val;
3248 u32 dev = pDCTstat->dev_dct;
3249
3250 /* Clear Legacy BIOS Mode bit */
3251 reg = 0x94;
3252 val = Get_NB32(dev, reg);
3253 val &= ~(1<<LegacyBiosMode);
3254 Set_NB32(dev, reg, val);
3255}
3256
3257
3258static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
3259 struct DCTStatStruc *pDCTstatA)
3260{
3261 u8 Node;
3262 u32 Drambase, Dramlimit;
3263 u32 val;
3264 u32 reg;
3265 u32 dev;
3266 u32 devx;
3267 u32 dword;
3268 struct DCTStatStruc *pDCTstat;
3269
3270 pDCTstat = pDCTstatA + 0;
3271 dev = pDCTstat->dev_map;
3272
3273 /* Copy dram map from F1x40/44,F1x48/4c,
3274 to F1x120/124(Node0),F1x120/124(Node1),...*/
3275 for (Node=0; Node < MAX_NODES_SUPPORTED; Node++) {
3276 pDCTstat = pDCTstatA + Node;
3277 devx = pDCTstat->dev_map;
3278
3279 /* get base/limit from Node0 */
3280 reg = 0x40 + (Node << 3); /* Node0/Dram Base 0 */
3281 val = Get_NB32(dev, reg);
3282 Drambase = val >> ( 16 + 3);
3283
3284 reg = 0x44 + (Node << 3); /* Node0/Dram Base 0 */
3285 val = Get_NB32(dev, reg);
3286 Dramlimit = val >> (16 + 3);
3287
3288 /* set base/limit to F1x120/124 per Node */
3289 if (pDCTstat->NodePresent) {
3290 reg = 0x120; /* F1x120,DramBase[47:27] */
3291 val = Get_NB32(devx, reg);
3292 val &= 0xFFE00000;
3293 val |= Drambase;
3294 Set_NB32(devx, reg, val);
3295
3296 reg = 0x124;
3297 val = Get_NB32(devx, reg);
3298 val &= 0xFFE00000;
3299 val |= Dramlimit;
3300 Set_NB32(devx, reg, val);
3301
3302 if ( pMCTstat->GStatus & ( 1 << GSB_HWHole)) {
3303 reg = 0xF0;
3304 val = Get_NB32(devx, reg);
3305 val |= (1 << DramMemHoistValid);
3306 val &= ~(0xFF << 24);
3307 dword = (pMCTstat->HoleBase >> (24 - 8)) & 0xFF;
3308 dword <<= 24;
3309 val |= dword;
3310 Set_NB32(devx, reg, val);
3311 }
3312
3313 }
3314 }
3315}
3316
3317static void SetCSTriState(struct MCTStatStruc *pMCTstat,
3318 struct DCTStatStruc *pDCTstat, u8 dct)
3319{
3320 u32 val;
3321 u32 dev = pDCTstat->dev_dct;
3322 u32 index_reg = 0x98 + 0x100 * dct;
3323 u8 cs;
3324 u32 index;
3325 u16 word;
3326
3327 /* Tri-state unused chipselects when motherboard
3328 termination is available */
3329
3330 // FIXME: skip for Ax
3331
3332 word = pDCTstat->CSPresent;
3333 if (pDCTstat->Status & (1 << SB_Registered)) {
3334 for (cs = 0; cs < 8; cs++) {
3335 if (word & (1 << cs)) {
3336 if (!(cs & 1))
3337 word |= 1 << (cs + 1);
3338 }
3339 }
3340 }
3341 word = (~word) & 0xFF;
3342 index = 0x0c;
3343 val = Get_NB32_index_wait(dev, index_reg, index);
3344 val |= word;
3345 Set_NB32_index_wait(dev, index_reg, index, val);
3346}
3347
3348
3349
3350static void SetCKETriState(struct MCTStatStruc *pMCTstat,
3351 struct DCTStatStruc *pDCTstat, u8 dct)
3352{
3353 u32 val;
3354 u32 dev;
3355 u32 index_reg = 0x98 + 0x100 * dct;
3356 u8 cs;
3357 u32 index;
3358 u16 word;
3359
3360 /* Tri-state unused CKEs when motherboard termination is available */
3361
3362 // FIXME: skip for Ax
3363
3364 dev = pDCTstat->dev_dct;
3365 word = 0x101;
3366 for (cs = 0; cs < 8; cs++) {
3367 if (pDCTstat->CSPresent & (1 << cs)) {
3368 if (!(cs & 1))
3369 word &= 0xFF00;
3370 else
3371 word &= 0x00FF;
3372 }
3373 }
3374
3375 index = 0x0c;
3376 val = Get_NB32_index_wait(dev, index_reg, index);
3377 if ((word & 0x00FF) == 1)
3378 val |= 1 << 12;
3379 else
3380 val &= ~(1 << 12);
3381
3382 if ((word >> 8) == 1)
3383 val |= 1 << 13;
3384 else
3385 val &= ~(1 << 13);
3386
3387 Set_NB32_index_wait(dev, index_reg, index, val);
3388}
3389
3390
3391static void SetODTTriState(struct MCTStatStruc *pMCTstat,
3392 struct DCTStatStruc *pDCTstat, u8 dct)
3393{
3394 u32 val;
3395 u32 dev;
3396 u32 index_reg = 0x98 + 0x100 * dct;
3397 u8 cs;
3398 u32 index;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003399 u8 odt;
3400 u8 max_dimms;
Marc Jones8ae8c882007-12-19 01:32:08 +00003401
3402 // FIXME: skip for Ax
3403
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003404 /* Tri-state unused ODTs when motherboard termination is available */
3405 max_dimms = (u8) mctGet_NVbits(NV_MAX_DIMMS);
3406 odt = 0x0F; /* tristate all the pins then clear the used ones. */
3407
Marc Jones8ae8c882007-12-19 01:32:08 +00003408 for (cs = 0; cs < 8; cs += 2) {
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003409 if (pDCTstat->CSPresent & (1 << cs)) {
3410 odt &= ~(1 << (cs / 2));
3411
3412 /* if quad-rank capable platform clear adtitional pins */
3413 if (max_dimms != MAX_CS_SUPPORTED) {
3414 if (pDCTstat->CSPresent & (1 << (cs + 1)))
3415 odt &= ~(4 << (cs / 2));
3416 }
Marc Jones8ae8c882007-12-19 01:32:08 +00003417 }
3418 }
3419
3420 index = 0x0C;
3421 val = Get_NB32_index_wait(dev, index_reg, index);
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003422 val |= (odt << 8);
Marc Jones8ae8c882007-12-19 01:32:08 +00003423 Set_NB32_index_wait(dev, index_reg, index, val);
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003424
Marc Jones8ae8c882007-12-19 01:32:08 +00003425}
3426
3427
3428static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
3429 struct DCTStatStruc *pDCTstat, u8 dct)
3430{
3431 u8 i;
3432 u32 index_reg = 0x98 + 0x100 * dct;
3433 u32 dev = pDCTstat->dev_dct;
3434 u32 val;
3435 u32 valx = 0;
3436 u32 dword;
3437 const u8 *p;
3438
3439 val = Get_NB32_index_wait(dev, index_reg, 0x00);
3440 dword = 0;
3441 for (i=0; i < 6; i++) {
3442 switch (i) {
3443 case 0:
3444 case 4:
3445 p = Table_Comp_Rise_Slew_15x;
3446 valx = p[(val >> 16) & 3];
3447 break;
3448 case 1:
3449 case 5:
3450 p = Table_Comp_Fall_Slew_15x;
3451 valx = p[(val >> 16) & 3];
3452 break;
3453 case 2:
3454 p = Table_Comp_Rise_Slew_20x;
3455 valx = p[(val >> 8) & 3];
3456 break;
3457 case 3:
3458 p = Table_Comp_Fall_Slew_20x;
3459 valx = p[(val >> 8) & 3];
3460 break;
3461
3462 }
3463 dword |= valx << (5 * i);
3464 }
3465
3466 /* Override/Exception */
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003467 if (!pDCTstat->GangedMode) {
3468 i = 0; /* use i for the dct setting required */
3469 if (pDCTstat->MAdimms[0] < 4)
3470 i = 1;
3471 if (((pDCTstat->Speed == 2) || (pDCTstat->Speed == 3)) && (pDCTstat->MAdimms[i] == 4))
3472 dword &= 0xF18FFF18;
3473 index_reg = 0x98; /* force dct = 0 */
3474 }
Marc Jones8ae8c882007-12-19 01:32:08 +00003475
3476 Set_NB32_index_wait(dev, index_reg, 0x0a, dword);
3477}
3478
3479
3480static void WaitRoutine_D(u32 time)
3481{
3482 while(time) {
3483 _EXECFENCE;
3484 time--;
3485 }
3486}
3487
3488
3489static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
3490 struct DCTStatStruc *pDCTstat)
3491{
3492 u32 reg;
3493 u32 val;
3494 u32 dev = pDCTstat->dev_dct;
3495
3496 /* GhEnhancement #18429 modified by askar: For low NB CLK :
3497 * Memclk ratio, the DCT may need to arbitrate early to avoid
3498 * unnecessary bubbles.
3499 * bit 19 of F2x[1,0]78 Dram Control Register, set this bit only when
3500 * NB CLK : Memclk ratio is between 3:1 (inclusive) to 4:5 (inclusive)
3501 */
3502
3503 reg = 0x78;
3504 val = Get_NB32(dev, reg);
3505
3506 //FIXME: check for Cx
3507 if (CheckNBCOFEarlyArbEn(pMCTstat, pDCTstat))
3508 val |= (1 << EarlyArbEn);
3509
3510 Set_NB32(dev, reg, val);
3511
3512}
3513
3514
3515static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
3516 struct DCTStatStruc *pDCTstat)
3517{
3518 u32 reg;
3519 u32 val;
3520 u32 tmp;
3521 u32 rem;
3522 u32 dev = pDCTstat->dev_dct;
3523 u32 hi, lo;
3524 u8 NbDid = 0;
3525
3526 /* Check if NB COF >= 4*Memclk, if it is not, return a fatal error
3527 */
3528
3529 /* 3*(Fn2xD4[NBFid]+4)/(2^NbDid)/(3+Fn2x94[MemClkFreq]) */
3530 _RDMSR(0xC0010071, &lo, &hi);
3531 if (lo & (1 << 22))
3532 NbDid |= 1;
3533
3534
3535 reg = 0x94;
3536 val = Get_NB32(dev, reg);
3537 if (!(val & (1 << MemClkFreqVal)))
3538 val = Get_NB32(dev, reg * 0x100); /* get the DCT1 value */
3539
3540 val &= 0x07;
3541 val += 3;
3542 if (NbDid)
3543 val <<= 1;
3544 tmp = val;
3545
3546 dev = pDCTstat->dev_nbmisc;
3547 reg = 0xD4;
3548 val = Get_NB32(dev, reg);
3549 val &= 0x1F;
3550 val += 3;
3551 val *= 3;
3552 val = val / tmp;
3553 rem = val % tmp;
3554 tmp >>= 1;
3555
3556 // Yes this could be nicer but this was how the asm was....
3557 if (val < 3) { /* NClk:MemClk < 3:1 */
3558 return 0;
3559 } else if (val > 4) { /* NClk:MemClk >= 5:1 */
3560 return 0;
3561 } else if ((val == 4) && (rem > tmp)) { /* NClk:MemClk > 4.5:1 */
3562 return 0;
3563 } else {
3564 return 1; /* 3:1 <= NClk:MemClk <= 4.5:1*/
3565 }
3566}
3567
3568
3569static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
3570 struct DCTStatStruc *pDCTstatA)
3571{
3572 u8 Node;
3573 u32 i;
3574 struct DCTStatStruc *pDCTstat;
3575 u16 start, stop;
3576 u8 *p;
3577 u16 host_serv1, host_serv2;
3578
3579 /* Initialize Data structures by clearing all entries to 0 */
3580 p = (u8 *) pMCTstat;
3581 for (i = 0; i < sizeof(struct MCTStatStruc); i++) {
3582 p[i] = 0;
3583 }
3584
3585 for (Node = 0; Node < 8; Node++) {
3586 pDCTstat = pDCTstatA + Node;
3587 host_serv1 = pDCTstat->HostBiosSrvc1;
3588 host_serv2 = pDCTstat->HostBiosSrvc2;
3589
3590 p = (u8 *) pDCTstat;
3591 start = 0;
3592 stop = ((u16) &((struct DCTStatStruc *)0)->CH_MaxRdLat[2]);
3593 for (i = start; i < stop ; i++) {
3594 p[i] = 0;
3595 }
3596
3597 start = ((u16) &((struct DCTStatStruc *)0)->CH_D_BC_RCVRDLY[2][4]);
3598 stop = sizeof(struct DCTStatStruc);
3599 for (i = start; i < stop; i++) {
3600 p[i] = 0;
3601 }
3602 pDCTstat->HostBiosSrvc1 = host_serv1;
3603 pDCTstat->HostBiosSrvc2 = host_serv2;
3604 }
3605}
3606
3607
3608static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
3609 struct DCTStatStruc *pDCTstat)
3610{
3611 u8 i;
3612 u32 reg_off;
3613 u32 dev = pDCTstat->dev_dct;
3614
3615 // FIXME: skip for Ax
3616 if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533Mhz
3617 for (i=0; i < 2; i++) {
3618 reg_off = 0x100 * i;
3619 Set_NB32(dev, 0x98 + reg_off, 0x0D000030);
3620 Set_NB32(dev, 0x9C + reg_off, 0x00000806);
3621 Set_NB32(dev, 0x98 + reg_off, 0x4D040F30);
3622 }
3623 }
3624}
3625
3626
3627void mct_AdjustDelayRange_D(struct MCTStatStruc *pMCTstat,
3628 struct DCTStatStruc *pDCTstat, u8 *dqs_pos)
3629{
3630 // FIXME: Skip for Ax
3631 if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533Mhz
3632 *dqs_pos = 32;
3633 }
3634}
3635
3636
3637void mct_SetClToNB_D(struct MCTStatStruc *pMCTstat,
3638 struct DCTStatStruc *pDCTstat)
3639{
3640 u32 lo, hi;
3641 u32 msr;
3642
3643 // FIXME: Maybe check the CPUID? - not for now.
3644 // pDCTstat->LogicalCPUID;
3645
3646 msr = BU_CFG2;
3647 _RDMSR(msr, &lo, &hi);
3648 lo |= 1 << ClLinesToNbDis;
3649 _WRMSR(msr, lo, hi);
3650}
3651
3652
3653void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
3654 struct DCTStatStruc *pDCTstat)
3655{
3656
3657 u32 lo, hi;
3658 u32 msr;
3659
3660 // FIXME: Maybe check the CPUID? - not for now.
3661 // pDCTstat->LogicalCPUID;
3662
3663 msr = BU_CFG2;
3664 _RDMSR(msr, &lo, &hi);
3665 if (!pDCTstat->ClToNB_flag)
3666 lo &= ~(1<<ClLinesToNbDis);
3667 _WRMSR(msr, lo, hi);
3668
3669}
3670
3671
3672void mct_SetWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
3673 struct DCTStatStruc *pDCTstat)
3674{
3675 u32 lo, hi;
3676 u32 msr;
3677
3678 // FIXME: Maybe check the CPUID? - not for now.
3679 // pDCTstat->LogicalCPUID;
3680
3681 msr = BU_CFG;
3682 _RDMSR(msr, &lo, &hi);
3683 hi |= (1 << WbEnhWsbDis_D);
3684 _WRMSR(msr, lo, hi);
3685}
3686
3687
3688void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
3689 struct DCTStatStruc *pDCTstat)
3690{
3691 u32 lo, hi;
3692 u32 msr;
3693
3694 // FIXME: Maybe check the CPUID? - not for now.
3695 // pDCTstat->LogicalCPUID;
3696
3697 msr = BU_CFG;
3698 _RDMSR(msr, &lo, &hi);
3699 hi &= ~(1 << WbEnhWsbDis_D);
3700 _WRMSR(msr, lo, hi);
3701}
3702
3703
3704void mct_SetDramConfigHi_D(struct DCTStatStruc *pDCTstat, u32 dct,
3705 u32 DramConfigHi)
3706{
3707 /* Bug#15114: Comp. update interrupted by Freq. change can cause
3708 * subsequent update to be invalid during any MemClk frequency change:
3709 * Solution: From the bug report:
3710 * 1. A software-initiated frequency change should be wrapped into the
3711 * following sequence :
3712 * - a) Disable Compensation (F2[1, 0]9C_x08[30] )
3713 * b) Reset the Begin Compensation bit (D3CMP->COMP_CONFIG[0]) in all the compensation engines
3714 * c) Do frequency change
3715 * d) Enable Compensation (F2[1, 0]9C_x08[30] )
3716 * 2. A software-initiated Disable Compensation should always be
3717 * followed by step b) of the above steps.
3718 * Silicon Status: Fixed In Rev B0
3719 *
3720 * Errata#177: DRAM Phy Automatic Compensation Updates May Be Invalid
3721 * Solution: BIOS should disable the phy automatic compensation prior
3722 * to initiating a memory clock frequency change as follows:
3723 * 1. Disable PhyAutoComp by writing 1'b1 to F2x[1, 0]9C_x08[30]
3724 * 2. Reset the Begin Compensation bits by writing 32'h0 to
3725 * F2x[1, 0]9C_x4D004F00
3726 * 3. Perform frequency change
3727 * 4. Enable PhyAutoComp by writing 1'b0 to F2x[1, 0]9C_08[30]
3728 * In addition, any time software disables the automatic phy
3729 * compensation it should reset the begin compensation bit per step 2.
3730 * Silicon Status: Fixed in DR-B0
3731 */
3732
3733 u32 dev = pDCTstat->dev_dct;
3734 u32 index_reg = 0x98 + 0x100 * dct;
3735 u32 index;
3736
3737 u32 val;
3738
3739 index = 0x08;
3740 val = Get_NB32_index_wait(dev, index_reg, index);
3741 Set_NB32_index_wait(dev, index_reg, index, val | (1 << DisAutoComp));
3742
3743 //FIXME: check for Bx Cx CPU
3744 // if Ax mct_SetDramConfigHi_Samp_D
3745
3746 /* errata#177 */
3747 index = 0x4D014F00; /* F2x[1, 0]9C_x[D0FFFFF:D000000] DRAM Phy Debug Registers */
3748 index |= 1 << DctAccessWrite;
3749 val = 0;
3750 Set_NB32_index_wait(dev, index_reg, index, val);
3751
3752 Set_NB32(dev, 0x94 + 0x100 * dct, DramConfigHi);
3753
3754 index = 0x08;
3755 val = Get_NB32_index_wait(dev, index_reg, index);
3756 Set_NB32_index_wait(dev, index_reg, index, val & (~(1 << DisAutoComp)));
3757}
3758
3759static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
3760 struct DCTStatStruc *pDCTstatA)
3761{
3762 u8 Node;
3763 struct DCTStatStruc *pDCTstat;
3764
3765 /* Errata 178
3766 *
3767 * Bug#15115: Uncertainty In The Sync Chain Leads To Setup Violations
3768 * In TX FIFO
3769 * Solution: BIOS should program DRAM Control Register[RdPtrInit] =
3770 * 5h, (F2x[1, 0]78[3:0] = 5h).
3771 * Silicon Status: Fixed In Rev B0
3772 *
3773 * Bug#15880: Determine validity of reset settings for DDR PHY timing.
3774 * Solutiuon: At least, set WrDqs fine delay to be 0 for DDR2 training.
3775 */
3776
3777 for (Node = 0; Node < 8; Node++) {
3778 pDCTstat = pDCTstatA + Node;
3779
3780 if (pDCTstat->NodePresent)
3781 mct_BeforeDQSTrain_Samp_D(pMCTstat, pDCTstat);
3782 mct_ResetDLL_D(pMCTstat, pDCTstat, 0);
3783 mct_ResetDLL_D(pMCTstat, pDCTstat, 1);
3784
3785 }
3786}
3787
3788static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
3789 struct DCTStatStruc *pDCTstat, u8 dct)
3790{
3791 u8 Receiver;
Marc Jones8ae8c882007-12-19 01:32:08 +00003792 u32 dev = pDCTstat->dev_dct;
3793 u32 reg_off = 0x100 * dct;
3794 u32 addr;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003795 u32 lo, hi;
3796 u8 wrap32dis = 0;
Marc Jones8ae8c882007-12-19 01:32:08 +00003797 u8 valid = 0;
3798
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00003799 /* Skip reset DLL for B3 */
3800 if (pDCTstat->LogicalCPUID & AMD_DR_B3) {
3801 return;
3802 }
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003803
3804 addr = HWCR;
3805 _RDMSR(addr, &lo, &hi);
3806 if(lo & (1<<17)) { /* save the old value */
3807 wrap32dis = 1;
3808 }
3809 lo |= (1<<17); /* HWCR.wrap32dis */
3810 lo &= ~(1<<15); /* SSEDIS */
3811 /* Setting wrap32dis allows 64-bit memory references in 32bit mode */
3812 _WRMSR(addr, lo, hi);
3813
3814
Marc Jones8ae8c882007-12-19 01:32:08 +00003815 pDCTstat->Channel = dct;
3816 Receiver = mct_InitReceiver_D(pDCTstat, dct);
3817 /* there are four receiver pairs, loosely associated with chipselects.*/
3818 for (; Receiver < 8; Receiver += 2) {
3819 if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, dct, Receiver)) {
3820 addr = mct_GetRcvrSysAddr_D(pMCTstat, pDCTstat, dct, Receiver, &valid);
3821 if (valid) {
3822 mct_Read1LTestPattern_D(pMCTstat, pDCTstat, addr); /* cache fills */
Marc Jones8ae8c882007-12-19 01:32:08 +00003823
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003824 /* Write 0000_8000h to register F2x[1,0]9C_xD080F0C */
3825 Set_NB32_index_wait(dev, 0x98 + reg_off, 0x4D080F0C, 0x00008000);
3826 mct_Wait(80); /* wait >= 300ns */
3827
3828 /* Write 0000_0000h to register F2x[1,0]9C_xD080F0C */
3829 Set_NB32_index_wait(dev, 0x98 + reg_off, 0x4D080F0C, 0x00000000);
3830 mct_Wait(800); /* wait >= 2us */
Marc Jones8ae8c882007-12-19 01:32:08 +00003831 break;
3832 }
3833 }
3834 }
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003835 if(!wrap32dis) {
3836 addr = HWCR;
3837 _RDMSR(addr, &lo, &hi);
3838 lo &= ~(1<<17); /* restore HWCR.wrap32dis */
3839 _WRMSR(addr, lo, hi);
3840 }
Marc Jones8ae8c882007-12-19 01:32:08 +00003841}
3842
3843
3844static void mct_EnableDatIntlv_D(struct MCTStatStruc *pMCTstat,
3845 struct DCTStatStruc *pDCTstat)
3846{
3847 u32 dev = pDCTstat->dev_dct;
3848 u32 val;
3849
3850 /* Enable F2x110[DctDatIntlv] */
3851 // Call back not required mctHookBeforeDatIntlv_D()
3852 // FIXME Skip for Ax
3853 if (!pDCTstat->GangedMode) {
3854 val = Get_NB32(dev, 0x110);
3855 val |= 1 << 5; // DctDatIntlv
3856 Set_NB32(dev, 0x110, val);
3857
3858 // FIXME Skip for Cx
3859 dev = pDCTstat->dev_nbmisc;
3860 val = Get_NB32(dev, 0x8C); // NB Configuration Hi
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003861 val |= 1 << (36-32); // DisDatMask
Marc Jones8ae8c882007-12-19 01:32:08 +00003862 Set_NB32(dev, 0x8C, val);
3863 }
3864}
3865
3866
3867static void mct_SetupSync_D(struct MCTStatStruc *pMCTstat,
3868 struct DCTStatStruc *pDCTstat)
3869{
3870 /* set F2x78[ChSetupSync] when F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup,
3871 * CkeSetup] setups for one DCT are all 0s and at least one of the setups,
3872 * F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup, CkeSetup], of the other
3873 * controller is 1
3874 */
3875 u32 cha, chb;
3876 u32 dev = pDCTstat->dev_dct;
3877 u32 val;
3878
3879 cha = pDCTstat->CH_ADDR_TMG[0] & 0x0202020;
3880 chb = pDCTstat->CH_ADDR_TMG[1] & 0x0202020;
3881
3882 if ((cha != chb) && ((cha == 0) || (chb == 0))) {
3883 val = Get_NB32(dev, 0x78);
3884 val |= ChSetupSync;
3885 Set_NB32(dev, 0x78, val);
3886 }
3887}
3888
3889static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct) {
3890
3891 u32 val;
3892 u32 reg_off = 0x100 * dct;
3893 u32 dev = pDCTstat->dev_dct;
3894
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00003895 if (pDCTstat->LogicalCPUID & (AMD_DR_B2 | AMD_DR_B3)) {
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003896 mct_Wait(10000); /* Wait 50 us*/
Marc Jones8ae8c882007-12-19 01:32:08 +00003897 val = Get_NB32(dev, 0x110);
3898 if ( val & (1 << DramEnabled)) {
3899 /* If 50 us expires while DramEnable =0 then do the following */
3900 val = Get_NB32(dev, 0x90 + reg_off);
3901 val &= ~(1 << Width128); /* Program Width128 = 0 */
3902 Set_NB32(dev, 0x90 + reg_off, val);
3903
3904 val = Get_NB32_index_wait(dev, 0x98 + reg_off, 0x05); /* Perform dummy CSR read to F2x09C_x05 */
3905
3906 if (pDCTstat->GangedMode) {
3907 val = Get_NB32(dev, 0x90 + reg_off);
3908 val |= 1 << Width128; /* Program Width128 = 0 */
3909 Set_NB32(dev, 0x90 + reg_off, val);
3910 }
3911 }
3912 }
3913}
3914
3915
3916/* ==========================================================
3917 * 6-bit Bank Addressing Table
3918 * RR=rows-13 binary
3919 * B=Banks-2 binary
3920 * CCC=Columns-9 binary
3921 * ==========================================================
3922 * DCT CCCBRR Rows Banks Columns 64-bit CS Size
3923 * Encoding
3924 * 0000 000000 13 2 9 128MB
3925 * 0001 001000 13 2 10 256MB
3926 * 0010 001001 14 2 10 512MB
3927 * 0011 010000 13 2 11 512MB
3928 * 0100 001100 13 3 10 512MB
3929 * 0101 001101 14 3 10 1GB
3930 * 0110 010001 14 2 11 1GB
3931 * 0111 001110 15 3 10 2GB
3932 * 1000 010101 14 3 11 2GB
3933 * 1001 010110 15 3 11 4GB
3934 * 1010 001111 16 3 10 4GB
3935 * 1011 010111 16 3 11 8GB
3936 */