nb/intel/sandybridge: Tidy up code and comments

- Reformat some lines of code
- Move MCHBAR registers and documentation into a separate file
- Add a few missing macros
- Rename some registers
- Rewrite several comments
- Use C-style comments for consistency
- Rewrite some hex constants
- Use HOST_BRIDGE instead of PCI_DEV(0, 0, 0)

With BUILD_TIMELESS=1, this commit does not change the result of:
- Asus P8Z77-V LX2 with native raminit.
- Asus P8Z77-M PRO with MRC raminit.

Change-Id: I6e113e48afd685ca63cfcb11ff9fcf9df6e41e46
Signed-off-by: Angel Pons <th3fanbus@gmail.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/39599
Reviewed-by: Felix Held <felix-coreboot@felixheld.de>
Reviewed-by: Patrick Rudolph <patrick.rudolph@9elements.com>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
diff --git a/src/northbridge/intel/sandybridge/acpi.c b/src/northbridge/intel/sandybridge/acpi.c
index 074e941..d043425 100644
--- a/src/northbridge/intel/sandybridge/acpi.c
+++ b/src/northbridge/intel/sandybridge/acpi.c
@@ -33,38 +33,39 @@
 	if (!dev)
 		return current;
 
-	pciexbar_reg=pci_read_config32(dev, PCIEXBAR);
+	pciexbar_reg = pci_read_config32(dev, PCIEXBAR);
 
-	// MMCFG not supported or not enabled.
+	/* MMCFG not supported or not enabled */
 	if (!(pciexbar_reg & (1 << 0)))
 		return current;
 
 	switch ((pciexbar_reg >> 1) & 3) {
-	case 0: // 256MB
-		pciexbar = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|(1 << 28));
+	case 0: /* 256MB */
+		pciexbar = pciexbar_reg & (0xffffffffULL << 28);
 		max_buses = 256;
 		break;
-	case 1: // 128M
-		pciexbar = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|(1 << 28)|(1 << 27));
+	case 1: /* 128M */
+		pciexbar = pciexbar_reg & (0xffffffffULL << 27);
 		max_buses = 128;
 		break;
-	case 2: // 64M
-		pciexbar = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|(1 << 28)|(1 << 27)|(1 << 26));
+	case 2: /* 64M */
+		pciexbar = pciexbar_reg & (0xffffffffULL << 26);
 		max_buses = 64;
 		break;
-	default: // RSVD
+	default: /* RSVD */
 		return current;
 	}
 
 	if (!pciexbar)
 		return current;
 
-	current += acpi_create_mcfg_mmconfig((acpi_mcfg_mmconfig_t *) current,
-			pciexbar, 0x0, 0x0, max_buses - 1);
+	current += acpi_create_mcfg_mmconfig((acpi_mcfg_mmconfig_t *) current, pciexbar, 0, 0,
+			max_buses - 1);
 
 	return current;
 }
 
+
 static unsigned long acpi_create_igfx_rmrr(const unsigned long current)
 {
 	const u32 base_mask = ~(u32)(MiB - 1);
@@ -73,7 +74,7 @@
 	if (!host)
 		return 0;
 
-	const u32 bgsm = pci_read_config32(host, BGSM) & base_mask;
+	const u32 bgsm  = pci_read_config32(host,  BGSM) & base_mask;
 	const u32 tolud = pci_read_config32(host, TOLUD) & base_mask;
 	if (!bgsm || !tolud)
 		return 0;
@@ -89,7 +90,7 @@
 		unsigned long tmp;
 
 		tmp = current;
-		current += acpi_create_dmar_drhd(current, 0, 0, IOMMU_BASE1);
+		current += acpi_create_dmar_drhd(current, 0, 0, GFXVT_BASE);
 		current += acpi_create_dmar_ds_pci(current, 0, 2, 0);
 		current += acpi_create_dmar_ds_pci(current, 0, 2, 1);
 		acpi_dmar_drhd_fixup(tmp, current);
@@ -104,34 +105,37 @@
 	}
 
 	const unsigned long tmp = current;
-	current += acpi_create_dmar_drhd(current,
-			DRHD_INCLUDE_PCI_ALL, 0, IOMMU_BASE2);
-	current += acpi_create_dmar_ds_ioapic(current,
-			2, PCH_IOAPIC_PCI_BUS, PCH_IOAPIC_PCI_SLOT, 0);
+	current += acpi_create_dmar_drhd(current, DRHD_INCLUDE_PCI_ALL, 0, VTVC0_BASE);
+
+	current += acpi_create_dmar_ds_ioapic(current, 2, PCH_IOAPIC_PCI_BUS,
+			PCH_IOAPIC_PCI_SLOT, 0);
+
 	size_t i;
 	for (i = 0; i < 8; ++i)
-		current += acpi_create_dmar_ds_msi_hpet(current,
-				0, PCH_HPET_PCI_BUS, PCH_HPET_PCI_SLOT, i);
+		current += acpi_create_dmar_ds_msi_hpet(current, 0, PCH_HPET_PCI_BUS,
+				PCH_HPET_PCI_SLOT, i);
+
 	acpi_dmar_drhd_fixup(tmp, current);
 
 	return current;
 }
 
-unsigned long northbridge_write_acpi_tables(struct device *const dev,
-					    unsigned long current,
+unsigned long northbridge_write_acpi_tables(struct device *const dev, unsigned long current,
 					    struct acpi_rsdp *const rsdp)
 {
-	const u32 capid0_a = pci_read_config32(dev, 0xe4);
+	const u32 capid0_a = pci_read_config32(dev, CAPID0_A);
 	if (capid0_a & (1 << 23))
 		return current;
 
 	printk(BIOS_DEBUG, "ACPI:     * DMAR\n");
+
 	acpi_dmar_t *const dmar = (acpi_dmar_t *)current;
+
 	acpi_create_dmar(dmar, DMAR_INTR_REMAP, acpi_fill_dmar);
 	current += dmar->header.length;
 	current = acpi_align_current(current);
-	acpi_add_table(rsdp, dmar);
 
+	acpi_add_table(rsdp, dmar);
 	current = acpi_align_current(current);
 
 	printk(BIOS_DEBUG, "current = %lx\n", current);
diff --git a/src/northbridge/intel/sandybridge/bootblock.c b/src/northbridge/intel/sandybridge/bootblock.c
index b6ba395..9dfeed6 100644
--- a/src/northbridge/intel/sandybridge/bootblock.c
+++ b/src/northbridge/intel/sandybridge/bootblock.c
@@ -20,19 +20,17 @@
 	uint32_t reg;
 
 	/*
-	 * The "io" variant of the config access is explicitly used to
-	 * setup the PCIEXBAR because CONFIG_MMCONF_SUPPORT is set to
-	 * to true. That way all subsequent non-explicit config accesses use
-	 * MCFG. This code also assumes that bootblock_northbridge_init() is
-	 * the first thing called in the non-asm boot block code. The final
-	 * assumption is that no assembly code is using the
+	 * The "io" variant of the config access is explicitly used to setup the
+	 * PCIEXBAR because CONFIG_MMCONF_SUPPORT is set to to true. That way, all
+	 * subsequent non-explicit config accesses use MCFG. This code also assumes
+	 * that bootblock_northbridge_init() is the first thing called in the non-asm
+	 * boot block code. The final assumption is that no assembly code is using the
 	 * CONFIG_MMCONF_SUPPORT option to do PCI config accesses.
 	 *
-	 * The PCIEXBAR is assumed to live in the memory mapped IO space under
-	 * 4GiB.
+	 * The PCIEXBAR is assumed to live in the memory mapped IO space under 4GiB.
 	 */
 	reg = 0;
-	pci_io_write_config32(PCI_DEV(0,0,0), PCIEXBAR + 4, reg);
+	pci_io_write_config32(HOST_BRIDGE, PCIEXBAR + 4, reg);
 	reg = CONFIG_MMCONF_BASE_ADDRESS | 4 | 1; /* 64MiB - 0-63 buses. */
-	pci_io_write_config32(PCI_DEV(0,0,0), PCIEXBAR, reg);
+	pci_io_write_config32(HOST_BRIDGE, PCIEXBAR, reg);
 }
diff --git a/src/northbridge/intel/sandybridge/chip.h b/src/northbridge/intel/sandybridge/chip.h
index 5f5bf31..8318156 100644
--- a/src/northbridge/intel/sandybridge/chip.h
+++ b/src/northbridge/intel/sandybridge/chip.h
@@ -19,9 +19,9 @@
 
 /*
  * Digital Port Hotplug Enable:
- *  0x04 = Enabled, 2ms short pulse
+ *  0x04 = Enabled, 2ms   short pulse
  *  0x05 = Enabled, 4.5ms short pulse
- *  0x06 = Enabled, 6ms short pulse
+ *  0x06 = Enabled, 6ms   short pulse
  *  0x07 = Enabled, 100ms short pulse
  */
 struct northbridge_intel_sandybridge_config {
@@ -48,7 +48,7 @@
 	struct i915_gpu_controller_info gfx;
 
 	/*
-	 * Maximum PCI mmio size in MiB.
+	 * Maximum PCI MMIO size in MiB.
 	 */
 	u16 pci_mmio_size;
 
@@ -63,7 +63,8 @@
 	bool ec_present;
 	bool ddr3lv_support;
 
-	/* N mode functionality. Leave this setting at 0.
+	/*
+	 * N mode functionality. Leave this setting at 0.
 	 * 0 Auto
 	 * 1 1N
 	 * 2 2N
@@ -74,12 +75,13 @@
 		DDR_NMODE_2N,
 	} nmode;
 
-	/* DDR refresh rate config. JEDEC Standard No.21-C Annex K allows
-	 * for DIMM SPD data to specify whether double-rate is required for
-	 * extended operating temperature range.
-	 * 0 Enable double rate based upon temperature thresholds
-	 * 1 Normal rate
-	 * 2 Always enable double rate
+	/*
+	 * DDR refresh rate config. JEDEC Standard No.21-C Annex K allows for DIMM SPD data to
+	 * specify whether double-rate is required for extended operating temperature range.
+	 *
+	 *   0 Enable double rate based upon temperature thresholds
+	 *   1 Normal rate
+	 *   2 Always enable double rate
 	 */
 	enum {
 		DDR_REFRESH_RATE_TEMP_THRES = 0,
@@ -93,7 +95,7 @@
 	 *  [1] = overcurrent pin
 	 *  [2] = length
 	 *
-	 * Ports 0-7 can be mapped to OC0-OC3
+	 * Ports 0-7  can be mapped to OC0-OC3
 	 * Ports 8-13 can be mapped to OC4-OC7
 	 *
 	 * Port Length
diff --git a/src/northbridge/intel/sandybridge/early_init.c b/src/northbridge/intel/sandybridge/early_init.c
index 10ac071..390fadc 100644
--- a/src/northbridge/intel/sandybridge/early_init.c
+++ b/src/northbridge/intel/sandybridge/early_init.c
@@ -25,49 +25,49 @@
 
 static void systemagent_vtd_init(void)
 {
-	const u32 capid0_a = pci_read_config32(PCI_DEV(0, 0, 0), CAPID0_A);
+	const u32 capid0_a = pci_read_config32(HOST_BRIDGE, CAPID0_A);
 	if (capid0_a & (1 << 23))
 		return;
 
-	/* setup BARs */
-	MCHBAR32(VTD1_BASE + 4) = IOMMU_BASE1 >> 32;
-	MCHBAR32(VTD1_BASE)     = IOMMU_BASE1 | 1;
-	MCHBAR32(VTD2_BASE + 4) = IOMMU_BASE2 >> 32;
-	MCHBAR32(VTD2_BASE)     = IOMMU_BASE2 | 1;
+	/* Setup BARs */
+	MCHBAR32(GFXVTBAR + 4) = GFXVT_BASE >> 32;
+	MCHBAR32(GFXVTBAR)     = GFXVT_BASE | 1;
+	MCHBAR32(VTVC0BAR + 4) = VTVC0_BASE >> 32;
+	MCHBAR32(VTVC0BAR)     = VTVC0_BASE | 1;
 
-	/* lock policies */
-	write32((void *)(IOMMU_BASE1 + 0xff0), 0x80000000);
+	/* Lock policies */
+	write32((void *)(GFXVT_BASE + 0xff0), 0x80000000);
 
 	const struct device *const azalia = pcidev_on_root(0x1b, 0);
 	if (azalia && azalia->enabled) {
-		write32((void *)(IOMMU_BASE2 + 0xff0), 0x20000000);
-		write32((void *)(IOMMU_BASE2 + 0xff0), 0xa0000000);
+		write32((void *)(VTVC0_BASE + 0xff0), 0x20000000);
+		write32((void *)(VTVC0_BASE + 0xff0), 0xa0000000);
 	} else {
-		write32((void *)(IOMMU_BASE2 + 0xff0), 0x80000000);
+		write32((void *)(VTVC0_BASE + 0xff0), 0x80000000);
 	}
 }
 
 static void enable_pam_region(void)
 {
-	pci_write_config8(PCI_DEV(0, 0x00, 0), PAM0, 0x30);
-	pci_write_config8(PCI_DEV(0, 0x00, 0), PAM1, 0x33);
-	pci_write_config8(PCI_DEV(0, 0x00, 0), PAM2, 0x33);
-	pci_write_config8(PCI_DEV(0, 0x00, 0), PAM3, 0x33);
-	pci_write_config8(PCI_DEV(0, 0x00, 0), PAM4, 0x33);
-	pci_write_config8(PCI_DEV(0, 0x00, 0), PAM5, 0x33);
-	pci_write_config8(PCI_DEV(0, 0x00, 0), PAM6, 0x33);
+	pci_write_config8(HOST_BRIDGE, PAM0, 0x30);
+	pci_write_config8(HOST_BRIDGE, PAM1, 0x33);
+	pci_write_config8(HOST_BRIDGE, PAM2, 0x33);
+	pci_write_config8(HOST_BRIDGE, PAM3, 0x33);
+	pci_write_config8(HOST_BRIDGE, PAM4, 0x33);
+	pci_write_config8(HOST_BRIDGE, PAM5, 0x33);
+	pci_write_config8(HOST_BRIDGE, PAM6, 0x33);
 }
 
 static void sandybridge_setup_bars(void)
 {
 	printk(BIOS_DEBUG, "Setting up static northbridge registers...");
 	/* Set up all hardcoded northbridge BARs */
-	pci_write_config32(PCI_DEV(0, 0x00, 0), EPBAR, DEFAULT_EPBAR | 1);
-	pci_write_config32(PCI_DEV(0, 0x00, 0), EPBAR + 4, (0LL+DEFAULT_EPBAR) >> 32);
-	pci_write_config32(PCI_DEV(0, 0x00, 0), MCHBAR, (uintptr_t)DEFAULT_MCHBAR | 1);
-	pci_write_config32(PCI_DEV(0, 0x00, 0), MCHBAR + 4, (0LL+(uintptr_t)DEFAULT_MCHBAR) >> 32);
-	pci_write_config32(PCI_DEV(0, 0x00, 0), DMIBAR, (uintptr_t)DEFAULT_DMIBAR | 1);
-	pci_write_config32(PCI_DEV(0, 0x00, 0), DMIBAR + 4, (0LL+(uintptr_t)DEFAULT_DMIBAR) >> 32);
+	pci_write_config32(HOST_BRIDGE, EPBAR,  DEFAULT_EPBAR | 1);
+	pci_write_config32(HOST_BRIDGE, EPBAR  + 4, (0LL + DEFAULT_EPBAR) >> 32);
+	pci_write_config32(HOST_BRIDGE, MCHBAR, (uintptr_t)DEFAULT_MCHBAR | 1);
+	pci_write_config32(HOST_BRIDGE, MCHBAR + 4, (0LL + (uintptr_t)DEFAULT_MCHBAR) >> 32);
+	pci_write_config32(HOST_BRIDGE, DMIBAR, (uintptr_t)DEFAULT_DMIBAR | 1);
+	pci_write_config32(HOST_BRIDGE, DMIBAR + 4, (0LL + (uintptr_t)DEFAULT_DMIBAR) >> 32);
 
 	printk(BIOS_DEBUG, " done\n");
 }
@@ -76,10 +76,9 @@
 {
 	u32 reg32;
 	u16 reg16;
-	u8 reg8;
-	u8 gfxsize;
+	u8 reg8, gfxsize;
 
-	reg16 = pci_read_config16(PCI_DEV(0,2,0), PCI_DEVICE_ID);
+	reg16 = pci_read_config16(PCI_DEV(0, 2, 0), PCI_DEVICE_ID);
 	switch (reg16) {
 	case 0x0102: /* GT1 Desktop */
 	case 0x0106: /* GT1 Mobile */
@@ -105,7 +104,7 @@
 		/* Setup IGD memory by setting GGC[7:3] = 1 for 32MB */
 		gfxsize = 0;
 	}
-	reg16 = pci_read_config16(PCI_DEV(0,0,0), GGC);
+	reg16 = pci_read_config16(HOST_BRIDGE, GGC);
 	reg16 &= ~0x00f8;
 	reg16 |= (gfxsize + 1) << 3;
 	/* Program GTT memory by setting GGC[9:8] = 2MB */
@@ -113,7 +112,7 @@
 	reg16 |= 2 << 8;
 	/* Enable VGA decode */
 	reg16 &= ~0x0002;
-	pci_write_config16(PCI_DEV(0,0,0), GGC, reg16);
+	pci_write_config16(HOST_BRIDGE, GGC, reg16);
 
 	/* Enable 256MB aperture */
 	reg8 = pci_read_config8(PCI_DEV(0, 2, 0), MSAC);
@@ -123,7 +122,7 @@
 
 	/* Erratum workarounds */
 	reg32 = MCHBAR32(SAPMCTL);
-	reg32 |= (1 << 9)|(1 << 10);
+	reg32 |= (1 << 9) | (1 << 10);
 	MCHBAR32(SAPMCTL) = reg32;
 
 	/* Enable SA Clock Gating */
@@ -131,52 +130,56 @@
 	MCHBAR32(SAPMCTL) = reg32 | 1;
 
 	/* GPU RC6 workaround for sighting 366252 */
-	reg32 = MCHBAR32(0x5d14);
+	reg32 = MCHBAR32(SSKPD_HI);
 	reg32 |= (1 << 31);
-	MCHBAR32(0x5d14) = reg32;
+	MCHBAR32(SSKPD_HI) = reg32;
 
-	/* VLW */
+	/* VLW (Virtual Legacy Wire?) */
 	reg32 = MCHBAR32(0x6120);
 	reg32 &= ~(1 << 0);
 	MCHBAR32(0x6120) = reg32;
 
-	reg32 = MCHBAR32(PAIR_CTL);
+	reg32 = MCHBAR32(INTRDIRCTL);
 	reg32 |= (1 << 4) | (1 << 5);
-	MCHBAR32(PAIR_CTL) = reg32;
+	MCHBAR32(INTRDIRCTL) = reg32;
 }
 
 static void start_peg_link_training(void)
 {
-	u32 tmp;
-	u32 deven;
+	u32 tmp, deven;
 
-	/* PEG on IvyBridge+ needs a special startup sequence.
-	 * As the MRC has its own initialization code skip it. */
-	if (((pci_read_config16(PCI_DEV(0, 0, 0), PCI_DEVICE_ID) &
-			BASE_REV_MASK) != BASE_REV_IVB) ||
-		CONFIG(HAVE_MRC))
+	const u16 base_rev = pci_read_config16(HOST_BRIDGE, PCI_DEVICE_ID) & BASE_REV_MASK;
+	/*
+	 * PEG on IvyBridge+ needs a special startup sequence.
+	 * As the MRC has its own initialization code skip it.
+	 */
+	if ((base_rev != BASE_REV_IVB) || CONFIG(HAVE_MRC))
 		return;
 
-	deven = pci_read_config32(PCI_DEV(0, 0, 0), DEVEN);
+	deven = pci_read_config32(HOST_BRIDGE, DEVEN);
 
+	/*
+	 * For each PEG device, set bit 5 to use three retries for OC (Offset Calibration).
+	 * We also clear DEFER_OC (bit 16) in order to start PEG training.
+	 */
 	if (deven & DEVEN_PEG10) {
-		tmp = pci_read_config32(PCI_DEV(0, 1, 0), 0xC24) & ~(1 << 16);
-		pci_write_config32(PCI_DEV(0, 1, 0), 0xC24, tmp | (1 << 5));
+		tmp = pci_read_config32(PCI_DEV(0, 1, 0), AFE_PWRON) & ~(1 << 16);
+		pci_write_config32(PCI_DEV(0, 1, 0), AFE_PWRON, tmp | (1 << 5));
 	}
 
 	if (deven & DEVEN_PEG11) {
-		tmp = pci_read_config32(PCI_DEV(0, 1, 1), 0xC24) & ~(1 << 16);
-		pci_write_config32(PCI_DEV(0, 1, 1), 0xC24, tmp | (1 << 5));
+		tmp = pci_read_config32(PCI_DEV(0, 1, 1), AFE_PWRON) & ~(1 << 16);
+		pci_write_config32(PCI_DEV(0, 1, 1), AFE_PWRON, tmp | (1 << 5));
 	}
 
 	if (deven & DEVEN_PEG12) {
-		tmp = pci_read_config32(PCI_DEV(0, 1, 2), 0xC24) & ~(1 << 16);
-		pci_write_config32(PCI_DEV(0, 1, 2), 0xC24, tmp | (1 << 5));
+		tmp = pci_read_config32(PCI_DEV(0, 1, 2), AFE_PWRON) & ~(1 << 16);
+		pci_write_config32(PCI_DEV(0, 1, 2), AFE_PWRON, tmp | (1 << 5));
 	}
 
 	if (deven & DEVEN_PEG60) {
-		tmp = pci_read_config32(PCI_DEV(0, 6, 0), 0xC24) & ~(1 << 16);
-		pci_write_config32(PCI_DEV(0, 6, 0), 0xC24, tmp | (1 << 5));
+		tmp = pci_read_config32(PCI_DEV(0, 6, 0), AFE_PWRON) & ~(1 << 16);
+		pci_write_config32(PCI_DEV(0, 6, 0), AFE_PWRON, tmp | (1 << 5));
 	}
 }
 
@@ -187,17 +190,17 @@
 	u8 reg8;
 
 	/* Device ID Override Enable should be done very early */
-	capid0_a = pci_read_config32(PCI_DEV(0, 0, 0), 0xe4);
+	capid0_a = pci_read_config32(HOST_BRIDGE, CAPID0_A);
 	if (capid0_a & (1 << 10)) {
 		const size_t is_mobile = get_platform_type() == PLATFORM_MOBILE;
 
-		reg8 = pci_read_config8(PCI_DEV(0, 0, 0), 0xf3);
+		reg8 = pci_read_config8(HOST_BRIDGE, DIDOR);
 		reg8 &= ~7; /* Clear 2:0 */
 
 		if (is_mobile)
 			reg8 |= 1; /* Set bit 0 */
 
-		pci_write_config8(PCI_DEV(0, 0, 0), 0xf3, reg8);
+		pci_write_config8(HOST_BRIDGE, DIDOR, reg8);
 	}
 
 	/* Setup all BARs required for early PCIe and raminit */
@@ -210,24 +213,25 @@
 	systemagent_vtd_init();
 
 	/* Device Enable, don't touch PEG bits */
-	deven = pci_read_config32(PCI_DEV(0, 0, 0), DEVEN) | DEVEN_IGD;
-	pci_write_config32(PCI_DEV(0, 0, 0), DEVEN, deven);
+	deven = pci_read_config32(HOST_BRIDGE, DEVEN) | DEVEN_IGD;
+	pci_write_config32(HOST_BRIDGE, DEVEN, deven);
 
 	sandybridge_setup_graphics();
 
-	/* Write magic value to start PEG link training.
-	 * This should be done in PCI device enumeration, but
-	 * the PCIe specification requires to wait at least 100msec
-	 * after reset for devices to come up.
-	 * As we don't want to increase boot time, enable it early and
-	 * assume the PEG is up as soon as PCI enumeration starts.
-	 * TODO: use time stamps to ensure the timings are met */
+	/*
+	 * Write magic values to start PEG link training. This should be done in PCI device
+	 * enumeration, but the PCIe specification requires to wait at least 100msec after
+	 * reset for devices to come up. As we don't want to increase boot time, enable it
+	 * early and assume that PEG is up as soon as PCI enumeration starts.
+	 *
+	 * TODO: use timestamps to ensure the timings are met.
+	 */
 	start_peg_link_training();
 }
 
 void northbridge_romstage_finalize(int s3resume)
 {
-	MCHBAR16(SSKPD) = 0xCAFE;
+	MCHBAR16(SSKPD_HI) = 0xCAFE;
 
 	romstage_handoff_init(s3resume);
 }
diff --git a/src/northbridge/intel/sandybridge/finalize.c b/src/northbridge/intel/sandybridge/finalize.c
index 6a3156e..ab2a21c 100644
--- a/src/northbridge/intel/sandybridge/finalize.c
+++ b/src/northbridge/intel/sandybridge/finalize.c
@@ -16,36 +16,34 @@
 #include <device/pci_ops.h>
 #include "sandybridge.h"
 
-#define PCI_DEV_SNB PCI_DEV(0, 0, 0)
-
 void intel_sandybridge_finalize_smm(void)
 {
-	pci_or_config16(PCI_DEV_SNB, GGC, 1 << 0);
-	pci_or_config16(PCI_DEV_SNB, PAVPC, 1 << 2);
-	pci_or_config32(PCI_DEV_SNB, DPR, 1 << 0);
-	pci_or_config32(PCI_DEV_SNB, MESEG_MASK, MELCK);
-	pci_or_config32(PCI_DEV_SNB, REMAPBASE, 1 << 0);
-	pci_or_config32(PCI_DEV_SNB, REMAPLIMIT, 1 << 0);
-	pci_or_config32(PCI_DEV_SNB, TOM, 1 << 0);
-	pci_or_config32(PCI_DEV_SNB, TOUUD, 1 << 0);
-	pci_or_config32(PCI_DEV_SNB, BDSM, 1 << 0);
-	pci_or_config32(PCI_DEV_SNB, BGSM, 1 << 0);
-	pci_or_config32(PCI_DEV_SNB, TSEGMB, 1 << 0);
-	pci_or_config32(PCI_DEV_SNB, TOLUD, 1 << 0);
+	pci_or_config16(HOST_BRIDGE, GGC,    1 << 0);
+	pci_or_config16(HOST_BRIDGE, PAVPC,  1 << 2);
+	pci_or_config32(HOST_BRIDGE, DPR,    1 << 0);
+	pci_or_config32(HOST_BRIDGE, MESEG_MASK, MELCK);
+	pci_or_config32(HOST_BRIDGE, REMAPBASE,  1 << 0);
+	pci_or_config32(HOST_BRIDGE, REMAPLIMIT, 1 << 0);
+	pci_or_config32(HOST_BRIDGE, TOM,    1 << 0);
+	pci_or_config32(HOST_BRIDGE, TOUUD,  1 << 0);
+	pci_or_config32(HOST_BRIDGE, BDSM,   1 << 0);
+	pci_or_config32(HOST_BRIDGE, BGSM,   1 << 0);
+	pci_or_config32(HOST_BRIDGE, TSEGMB, 1 << 0);
+	pci_or_config32(HOST_BRIDGE, TOLUD,  1 << 0);
 
-	MCHBAR32_OR(MMIO_PAVP_CTL, 1 << 0);	/* PAVP */
-	MCHBAR32_OR(SAPMCTL, 1 << 31);		/* SA PM */
-	MCHBAR32_OR(0x6020, 1 << 0);		/* UMA GFX */
-	MCHBAR32_OR(0x63fc, 1 << 0);		/* VTDTRK */
-	MCHBAR32_OR(0x6800, 1 << 31);
-	MCHBAR32_OR(0x7000, 1 << 31);
-	MCHBAR32_OR(0x77fc, 1 << 0);
+	MCHBAR32_OR(PAVP_MSG,  1 <<  0);	/* PAVP */
+	MCHBAR32_OR(SAPMCTL,   1 << 31);	/* SA PM */
+	MCHBAR32_OR(UMAGFXCTL, 1 <<  0);	/* UMA GFX */
+	MCHBAR32_OR(VTDTRKLCK, 1 <<  0);	/* VTDTRK */
+	MCHBAR32_OR(REQLIM,    1 << 31);
+	MCHBAR32_OR(DMIVCLIM,  1 << 31);
+	MCHBAR32_OR(CRDTLCK,   1 <<  0);
 
 	/* Memory Controller Lockdown */
 	MCHBAR8(MC_LOCK) = 0x8f;
 
 	/* Read+write the following */
-	MCHBAR32(0x6030) = MCHBAR32(0x6030);
-	MCHBAR32(0x6034) = MCHBAR32(0x6034);
-	MCHBAR32(0x6008) = MCHBAR32(0x6008);
+	MCHBAR32(VDMBDFBARKVM)  = MCHBAR32(VDMBDFBARKVM);
+	MCHBAR32(VDMBDFBARPAVP) = MCHBAR32(VDMBDFBARPAVP);
+	MCHBAR32(HDAUDRID)      = MCHBAR32(HDAUDRID);
 }
diff --git a/src/northbridge/intel/sandybridge/gma.c b/src/northbridge/intel/sandybridge/gma.c
index e6dfbc4..f0232e0 100644
--- a/src/northbridge/intel/sandybridge/gma.c
+++ b/src/northbridge/intel/sandybridge/gma.c
@@ -57,7 +57,7 @@
 	{ 0xa240, 0x00000000 },
 	{ 0xa244, 0x00000000 },
 	{ 0xa248, 0x8000421e },
-	{ 0 }
+	{ 0 },
 };
 
 static const struct gt_powermeter snb_pm_gt2[] = {
@@ -80,7 +80,7 @@
 	{ 0xa240, 0x00000000 },
 	{ 0xa244, 0x00000000 },
 	{ 0xa248, 0x8000421e },
-	{ 0 }
+	{ 0 },
 };
 
 static const struct gt_powermeter ivb_pm_gt1[] = {
@@ -136,7 +136,7 @@
 	{ 0xaa3c, 0x00001c00 },
 	{ 0xaa54, 0x00000004 },
 	{ 0xaa60, 0x00060000 },
-	{ 0 }
+	{ 0 },
 };
 
 static const struct gt_powermeter ivb_pm_gt2_17w[] = {
@@ -192,7 +192,7 @@
 	{ 0xaa3c, 0x00003900 },
 	{ 0xaa54, 0x00000008 },
 	{ 0xaa60, 0x00110000 },
-	{ 0 }
+	{ 0 },
 };
 
 static const struct gt_powermeter ivb_pm_gt2_35w[] = {
@@ -248,12 +248,12 @@
 	{ 0xaa3c, 0x00003900 },
 	{ 0xaa54, 0x00000008 },
 	{ 0xaa60, 0x00110000 },
-	{ 0 }
+	{ 0 },
 };
 
-/* some vga option roms are used for several chipsets but they only have one
- * PCI ID in their header. If we encounter such an option rom, we need to do
- * the mapping ourselves
+/*
+ * Some VGA option roms are used for several chipsets but they only have one PCI ID in their
+ * header. If we encounter such an option rom, we need to do the mapping ourselves.
  */
 
 u32 map_oprom_vendev(u32 vendev)
@@ -262,17 +262,17 @@
 
 	switch (vendev) {
 	case 0x80860102:		/* SNB GT1 Desktop */
-	case 0x8086010a:		/* SNB GT1 Server */
+	case 0x8086010a:		/* SNB GT1 Server  */
 	case 0x80860112:		/* SNB GT2 Desktop */
-	case 0x80860116:		/* SNB GT2 Mobile */
+	case 0x80860116:		/* SNB GT2 Mobile  */
 	case 0x80860122:		/* SNB GT2 Desktop >=1.3GHz */
-	case 0x80860126:		/* SNB GT2 Mobile >=1.3GHz */
+	case 0x80860126:		/* SNB GT2 Mobile  >=1.3GHz */
 	case 0x80860152:		/* IVB GT1 Desktop */
-	case 0x80860156:		/* IVB GT1 Mobile */
+	case 0x80860156:		/* IVB GT1 Mobile  */
 	case 0x80860162:		/* IVB GT2 Desktop */
-	case 0x80860166:		/* IVB GT2 Mobile */
-	case 0x8086016a:		/* IVB GT2 Server */
-		new_vendev = 0x80860106;/* SNB GT1 Mobile */
+	case 0x80860166:		/* IVB GT2 Mobile  */
+	case 0x8086016a:		/* IVB GT2 Server  */
+		new_vendev = 0x80860106;/* SNB GT1 Mobile  */
 		break;
 	}
 
@@ -385,18 +385,15 @@
 
 			if (tdp <= 17) {
 				/* <=17W ULV */
-				printk(BIOS_DEBUG, "IVB GT2 17W "
-				       "Power Meter Weights\n");
+				printk(BIOS_DEBUG, "IVB GT2 17W Power Meter Weights\n");
 				gtt_write_powermeter(ivb_pm_gt2_17w);
 			} else if ((tdp >= 25) && (tdp <= 35)) {
 				/* 25W-35W */
-				printk(BIOS_DEBUG, "IVB GT2 25W-35W "
-				       "Power Meter Weights\n");
+				printk(BIOS_DEBUG, "IVB GT2 25W-35W Power Meter Weights\n");
 				gtt_write_powermeter(ivb_pm_gt2_35w);
 			} else {
 				/* All others */
-				printk(BIOS_DEBUG, "IVB GT2 35W "
-				       "Power Meter Weights\n");
+				printk(BIOS_DEBUG, "IVB GT2 35W Power Meter Weights\n");
 				gtt_write_powermeter(ivb_pm_gt2_35w);
 			}
 		}
@@ -552,7 +549,7 @@
 	/* Setup Digital Port Hotplug */
 	reg32 = gtt_read(0xc4030);
 	if (!reg32) {
-		reg32 = (conf->gpu_dp_b_hotplug & 0x7) << 2;
+		reg32  = (conf->gpu_dp_b_hotplug & 0x7) <<  2;
 		reg32 |= (conf->gpu_dp_c_hotplug & 0x7) << 10;
 		reg32 |= (conf->gpu_dp_d_hotplug & 0x7) << 18;
 		gtt_write(0xc4030, reg32);
@@ -599,15 +596,15 @@
 {
 	u16 reg16;
 
-	/* clear DMISCI status */
+	/* Clear DMISCI status */
 	reg16 = inw(DEFAULT_PMBASE + TCO1_STS);
 	reg16 &= DMISCI_STS;
 	outw(DEFAULT_PMBASE + TCO1_STS, reg16);
 
-	/* clear acpi tco status */
+	/* Clear ACPI TCO status */
 	outl(DEFAULT_PMBASE + GPE0_STS, TCOSCI_STS);
 
-	/* enable acpi tco scis */
+	/* Enable ACPI TCO SCIs */
 	reg16 = inw(DEFAULT_PMBASE + GPE0_EN);
 	reg16 |= TCOSCI_EN;
 	outw(DEFAULT_PMBASE + GPE0_EN, reg16);
@@ -654,10 +651,9 @@
 	intel_gma_restore_opregion();
 }
 
-const struct i915_gpu_controller_info *
-intel_gma_get_controller_info(void)
+const struct i915_gpu_controller_info *intel_gma_get_controller_info(void)
 {
-	struct device *dev = pcidev_on_root(0x2, 0);
+	struct device *dev = pcidev_on_root(2, 0);
 	if (!dev) {
 		return NULL;
 	}
@@ -675,10 +671,8 @@
 	drivers_intel_gma_displays_ssdt_generate(gfx);
 }
 
-static unsigned long
-gma_write_acpi_tables(struct device *const dev,
-		      unsigned long current,
-		      struct acpi_rsdp *const rsdp)
+static unsigned long gma_write_acpi_tables(struct device *const dev, unsigned long current,
+					   struct acpi_rsdp *const rsdp)
 {
 	igd_opregion_t *opregion = (igd_opregion_t *)current;
 	global_nvs_t *gnvs;
@@ -706,44 +700,46 @@
 	return "GFX0";
 }
 
-/* called by pci set_vga_bridge function */
+/* Called by PCI set_vga_bridge function */
 static void gma_func0_disable(struct device *dev)
 {
 	u16 reg16;
 	struct device *dev_host = pcidev_on_root(0, 0);
 
 	reg16 = pci_read_config16(dev_host, GGC);
-	reg16 |= (1 << 1); /* disable VGA decode */
+	reg16 |= (1 << 1); /* Disable VGA decode */
 	pci_write_config16(dev_host, GGC, reg16);
 
 	dev->enabled = 0;
 }
 
 static struct pci_operations gma_pci_ops = {
-	.set_subsystem    = pci_dev_set_subsystem,
+	.set_subsystem = pci_dev_set_subsystem,
 };
 
 static struct device_operations gma_func0_ops = {
-	.read_resources		= pci_dev_read_resources,
-	.set_resources		= pci_dev_set_resources,
-	.enable_resources	= pci_dev_enable_resources,
+	.read_resources           = pci_dev_read_resources,
+	.set_resources            = pci_dev_set_resources,
+	.enable_resources         = pci_dev_enable_resources,
 	.acpi_fill_ssdt_generator = gma_ssdt,
-	.init			= gma_func0_init,
-	.scan_bus		= 0,
-	.enable			= 0,
-	.disable		= gma_func0_disable,
-	.ops_pci		= &gma_pci_ops,
-	.acpi_name		= gma_acpi_name,
-	.write_acpi_tables	= gma_write_acpi_tables,
+	.init                     = gma_func0_init,
+	.scan_bus                 = NULL,
+	.enable                   = NULL,
+	.disable                  = gma_func0_disable,
+	.ops_pci                  = &gma_pci_ops,
+	.acpi_name                = gma_acpi_name,
+	.write_acpi_tables        = gma_write_acpi_tables,
 };
 
-static const unsigned short pci_device_ids[] = { 0x0102, 0x0106, 0x010a, 0x0112,
-						 0x0116, 0x0122, 0x0126, 0x0156,
-						 0x0166, 0x0162, 0x016a, 0x0152,
-						 0 };
+static const unsigned short pci_device_ids[] = {
+	0x0102, 0x0106, 0x010a, 0x0112,
+	0x0116, 0x0122, 0x0126, 0x0156,
+	0x0166, 0x0162, 0x016a, 0x0152,
+	0
+};
 
 static const struct pci_driver gma __pci_driver = {
-	.ops	 = &gma_func0_ops,
-	.vendor	 = PCI_VENDOR_ID_INTEL,
+	.ops     = &gma_func0_ops,
+	.vendor  = PCI_VENDOR_ID_INTEL,
 	.devices = pci_device_ids,
 };
diff --git a/src/northbridge/intel/sandybridge/gma.h b/src/northbridge/intel/sandybridge/gma.h
index 899edbb..bf04dee 100644
--- a/src/northbridge/intel/sandybridge/gma.h
+++ b/src/northbridge/intel/sandybridge/gma.h
@@ -17,9 +17,10 @@
 
 struct i915_gpu_controller_info;
 
-int i915lightup_sandy(const struct i915_gpu_controller_info *info,
-		u32 physbase, u16 pio, u8 *mmio, u32 lfb);
-int i915lightup_ivy(const struct i915_gpu_controller_info *info,
-		u32 physbase, u16 pio, u8 *mmio, u32 lfb);
+int i915lightup_sandy(const struct i915_gpu_controller_info *info, u32 physbase, u16 pio,
+			u8 *mmio, u32 lfb);
+
+int i915lightup_ivy(const struct i915_gpu_controller_info *info, u32 physbase, u16 pio,
+			u8 *mmio, u32 lfb);
 
 #endif /* NORTHBRIDGE_INTEL_SANDYBRIDGE_GMA_H */
diff --git a/src/northbridge/intel/sandybridge/mchbar_regs.h b/src/northbridge/intel/sandybridge/mchbar_regs.h
new file mode 100644
index 0000000..929392b
--- /dev/null
+++ b/src/northbridge/intel/sandybridge/mchbar_regs.h
@@ -0,0 +1,430 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2007-2008 coresystems GmbH
+ * Copyright (C) 2011 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SANDYBRIDGE_MCHBAR_REGS_H__
+#define __SANDYBRIDGE_MCHBAR_REGS_H__
+
+/*
+ * ### IOSAV command queue notes ###
+ *
+ * Intel provides a command queue of depth four.
+ * Every command is configured by using multiple MCHBAR registers.
+ * On executing the command queue, you have to specify its depth (number of commands).
+ *
+ * The macros for these registers can take some integer parameters, within these bounds:
+ *   channel:   [0..1]
+ *   index:     [0..3]
+ *   lane:      [0..8]
+ *
+ * Note that these ranges are 'closed': both endpoints are included.
+ *
+ *
+ *
+ * ### Register description ###
+ *
+ * IOSAV_n_SP_CMD_ADDR_ch(channel, index)
+ *   Sub-sequence command addresses. Controls the address, bank address and slotrank signals.
+ *
+ *   Bitfields:
+ *   [0..15]    Row / Column Address.
+ *   [16..18]   The result of (10 + [16..18]) is the number of valid row bits.
+ *                  Note: Value 1 is not implemented. Not that it really matters, though.
+ *                        Value 7 is reserved, as the hardware does not support it.
+ *   [20..22]   Bank Address.
+ *   [24..25]   Rank select. Let's call it "ranksel", as it is mentioned later.
+ *
+ * IOSAV_n_ADDR_UPDATE_ch(channel, index)
+ *   How the address shall be updated after executing the sub-sequence command.
+ *
+ *   Bitfields:
+ *   [0]        Increment CAS/RAS by 1.
+ *   [1]        Increment CAS/RAS by 8.
+ *   [2]        Increment bank select by 1.
+ *   [3..4]     Increment rank select by 1, 2 or 3.
+ *   [5..9]     Known as "addr_wrap". Address bits will wrap around the [addr_wrap..0] range.
+ *   [10..11]   LFSR update:
+ *                  00: Do not use the LFSR function.
+ *                  01: Undefined, treat as Reserved.
+ *                  10: Apply LFSR on the [addr_wrap..0] bit range.
+ *                  11: Apply LFSR on the [addr_wrap..3] bit range.
+ *
+ *   [12..15]   Update rate. The number of command runs between address updates. For example:
+ *                  0: Update every command run.
+ *                  1: Update every second command run. That is, half of the command rate.
+ *                  N: Update after N command runs without updates.
+ *
+ *   [16..17]   LFSR behavior on the deselect cycles (when no sub-seq command is issued):
+ *                  0: No change w.r.t. the last issued command.
+ *                  1: LFSR XORs with address & command (excluding CS), but does not update.
+ *                  2: LFSR XORs with address & command (excluding CS), and updates.
+ *
+ * IOSAV_n_SP_CMD_CTRL_ch(channel, index)
+ *   Special command control register. Controls the DRAM command signals.
+ *
+ *   Bitfields:
+ *   [0]        !RAS signal.
+ *   [1]        !CAS signal.
+ *   [2]        !WE  signal.
+ *   [4..7]     CKE, per rank and channel.
+ *   [8..11]    ODT, per rank and channel.
+ *   [12]       Chip Select mode control.
+ *   [13..16]   Chip select, per rank and channel. It works as follows:
+ *
+ *          entity CS_BLOCK is
+ *              port (
+ *                  MODE    : in  std_logic;                -- Mode select at [12]
+ *                  RANKSEL : in  std_logic_vector(0 to 3); -- Decoded "ranksel" value
+ *                  CS_CTL  : in  std_logic_vector(0 to 3); -- Chip select control at [13..16]
+ *                  CS_Q    : out std_logic_vector(0 to 3)  -- CS signals
+ *              );
+ *          end entity CS_BLOCK;
+ *
+ *          architecture RTL of CS_BLOCK is
+ *          begin
+ *              if MODE = '1' then
+ *                  CS_Q <= not RANKSEL and CS_CTL;
+ *              else
+ *                  CS_Q <= CS_CTL;
+ *              end if;
+ *          end architecture RTL;
+ *
+ *   [17]       Auto Precharge. Only valid when using 10 row bits!
+ *
+ * IOSAV_n_SUBSEQ_CTRL_ch(channel, index)
+ *   Sub-sequence parameters. Controls repetititons, delays and data orientation.
+ *
+ *   Bitfields:
+ *   [0..8]     Number of repetitions of the sub-sequence command.
+ *   [10..14]   Gap, number of clock-cycles to wait before sending the next command.
+ *   [16..24]   Number of clock-cycles to idle between sub-sequence commands.
+ *   [26..27]   The direction of the data.
+ *                  00: None, does not handle data
+ *                  01: Read
+ *                  10: Write
+ *                  11: Read & Write
+ *
+ * IOSAV_n_ADDRESS_LFSR_ch(channel, index)
+ *   23-bit LFSR state register. It is written into the LFSR when the sub-sequence is loaded,
+ *   and then read back from the LFSR when the sub-sequence is done.
+ *
+ *   Bitfields:
+ *   [0..22]    LFSR state.
+ *
+ * IOSAV_SEQ_CTL_ch(channel)
+ *   Control the sequence level in IOSAV: number of sub-sequences, iterations, maintenance...
+ *
+ *   Bitfields:
+ *   [0..7]     Number of full sequence executions. When this field becomes non-zero, then the
+ *              sequence starts running immediately. This value is decremented after completing
+ *              a full sequence iteration. When it is zero, the sequence is done. No decrement
+ *              is done if this field is set to 0xff. This is the "infinite repeat" mode, and
+ *              it is manually aborted by clearing this field.
+ *
+ *   [8..16]    Number of wait cycles after each sequence iteration. This wait's purpose is to
+ *              allow performing maintenance in infinite loops. When non-zero, RCOMP, refresh
+ *              and ZQXS operations can take place.
+ *
+ *   [17]       Stop-on-error mode: Whether to stop sequence execution when an error occurs.
+ *   [18..19]   Number of sub-sequences. The programmed value is the index of the last sub-seq.
+ *   [20]       If set, keep refresh disabled until the next sequence execution.
+ *                  DANGER: Refresh must be re-enabled within the (9 * tREFI) period!
+ *
+ *   [22]       If set, sequence execution will not prevent refresh. This cannot be set when
+ *              bit [20] is also set, or was set on the previous sequence. This bit exists so
+ *              that the sequence machine can be used as a timer without affecting the memory.
+ *
+ *   [23]       If set, a output pin is asserted on the first detected error. This output can
+ *              be used as a trigger for an oscilloscope or a logic analyzer, which is handy.
+ *
+ * IOSAV_DATA_CTL_ch(channel)
+ *   Data-related controls in IOSAV mode.
+ *
+ *   Bitfields:
+ *   [0..7]     WDB (Write Data Buffer) pattern length: [0..7] = (length / 8) - 1;
+ *   [8..15]    WDB read pointer. Points at the data used for IOSAV write transactions.
+ *   [16..23]   Comparison pointer. Used to compare data from IOSAV read transactions.
+ *   [24]       If set, increment pointers only when micro-breakpoint is active.
+ *
+ * IOSAV_STATUS_ch(channel)
+ *   State of the IOSAV sequence machine. Should be polled after sending an IOSAV sequence.
+ *
+ *   Bitfields:
+ *   [0]        IDLE:  IOSAV is sleeping.
+ *   [1]        BUSY:  IOSAV is running a sequence.
+ *   [2]        DONE:  IOSAV has completed a sequence.
+ *   [3]        ERROR: IOSAV detected an error and stopped on it, when using Stop-on-error.
+ *   [4]        PANIC: The refresh machine issued a Panic Refresh, and IOSAV was aborted.
+ *   [5]        RCOMP: RComp failure. Unused, consider Reserved.
+ *   [6]        Cleared with a new sequence, and set when done and refresh counter is drained.
+ *
+ */
+
+/* Indexed register helper macros */
+#define Gz(r, z)	((r) + ((z) <<  8))
+#define Ly(r, y)	((r) + ((y) <<  2))
+#define Cx(r, x)	((r) + ((x) << 10))
+#define CxLy(r, x, y)	((r) + ((x) << 10) + ((y) << 2))
+#define GzLy(r, z, y)	((r) + ((z) <<  8) + ((y) << 2))
+
+/* Byte lane training register base addresses */
+#define LANEBASE_B0	0x0000
+#define LANEBASE_B1	0x0200
+#define LANEBASE_B2	0x0400
+#define LANEBASE_B3	0x0600
+#define LANEBASE_ECC	0x0800 /* ECC lane is in the middle of the data lanes */
+#define LANEBASE_B4	0x1000
+#define LANEBASE_B5	0x1200
+#define LANEBASE_B6	0x1400
+#define LANEBASE_B7	0x1600
+
+/* Byte lane register offsets */
+#define GDCRTRAININGRESULT(ch, y)	GzLy(0x0004, ch, y) /* Test results for PI config */
+#define GDCRTRAININGRESULT1(ch)		GDCRTRAININGRESULT(ch, 0) /* 0x0004 */
+#define GDCRTRAININGRESULT2(ch)		GDCRTRAININGRESULT(ch, 1) /* 0x0008 */
+#define GDCRRX(ch, rank)		GzLy(0x10, ch, rank) /* Time setting for lane Rx */
+#define GDCRTX(ch, rank)		GzLy(0x20, ch, rank) /* Time setting for lane Tx */
+
+/* Register definitions */
+#define GDCRCLKRANKSUSED_ch(ch)		Gz(0x0c00, ch) /* Indicates which rank is populated */
+#define GDCRCLKCOMP_ch(ch)		Gz(0x0c04, ch) /* RCOMP result register */
+#define GDCRCKPICODE_ch(ch)		Gz(0x0c14, ch) /* PI coding for DDR CLK pins */
+#define GDCRCKLOGICDELAY_ch(ch)		Gz(0x0c18, ch) /* Logic delay of 1 QCLK in CLK slice */
+#define GDDLLFUSE_ch(ch)		Gz(0x0c20, ch) /* Used for fuse download to the DLLs */
+#define GDCRCLKDEBUGMUXCFG_ch(ch)	Gz(0x0c3c, ch) /* Debug MUX control */
+
+#define GDCRCMDDEBUGMUXCFG_Cz_S(ch)	Gz(0x0e3c, ch) /* Debug MUX control */
+
+#define CRCOMPOFST1_ch(ch)		Gz(0x1810, ch) /* DQ, CTL and CLK Offset values */
+
+#define GDCRTRAININGMOD_ch(ch)		Gz(0x3000, ch) /* Data training mode control */
+#define GDCRTRAININGRESULT1_ch(ch)	Gz(0x3004, ch) /* Training results according to PI */
+#define GDCRTRAININGRESULT2_ch(ch)	Gz(0x3008, ch)
+
+#define GDCRCTLRANKSUSED_ch(ch)		Gz(0x3200, ch) /* Indicates which rank is populated */
+#define GDCRCMDCOMP_ch(ch)		Gz(0x3204, ch) /* COMP values register */
+#define GDCRCMDCTLCOMP_ch(ch)		Gz(0x3208, ch) /* COMP values register */
+#define GDCRCMDPICODING_ch(ch)		Gz(0x320c, ch) /* Command and control PI coding */
+
+#define GDCRTRAININGMOD			0x3400 /* Data training mode control register */
+#define GDCRDATACOMP			0x340c /* COMP values register */
+
+#define CRCOMPOFST2			0x3714 /* CMD DRV, SComp and Static Leg controls */
+
+/* MC per-channel registers */
+#define TC_DBP_ch(ch)			Cx(0x4000, ch) /* Timings: BIN */
+#define TC_RAP_ch(ch)			Cx(0x4004, ch) /* Timings: Regular access */
+#define TC_RWP_ch(ch)			Cx(0x4008, ch) /* Timings: Read / Write */
+#define TC_OTHP_ch(ch)			Cx(0x400c, ch) /* Timings: Other parameters */
+#define SCHED_SECOND_CBIT_ch(ch)	Cx(0x401c, ch) /* More chicken bits */
+#define SCHED_CBIT_ch(ch)		Cx(0x4020, ch) /* Chicken bits in scheduler */
+#define SC_ROUNDT_LAT_ch(ch)		Cx(0x4024, ch) /* Round-trip latency per rank */
+#define SC_IO_LATENCY_ch(ch)		Cx(0x4028, ch) /* IO Latency Configuration */
+#define SCRAMBLING_SEED_1_ch(ch)	Cx(0x4034, ch) /* Scrambling seed 1 */
+#define SCRAMBLING_SEED_2_LO_ch(ch)	Cx(0x4038, ch) /* Scrambling seed 2 low */
+#define SCRAMBLING_SEED_2_HI_ch(ch)	Cx(0x403c, ch) /* Scrambling seed 2 high */
+
+/* IOSAV Bytelane Bit-wise error */
+#define IOSAV_By_BW_SERROR_ch(ch, y)	CxLy(0x4040, ch, y)
+
+/* IOSAV Bytelane Bit-wise compare mask */
+#define IOSAV_By_BW_MASK_ch(ch, y)	CxLy(0x4080, ch, y)
+
+/*
+ * Defines the number of transactions (non-VC1 RD CAS commands) between two priority ticks.
+ * Different counters for transactions that are issued on the ring agents (core or GT) and
+ * transactions issued in the SA.
+ */
+#define SC_PR_CNT_CONFIG_ch(ch)	Cx(0x40a8, ch)
+#define SC_PCIT_ch(ch)		Cx(0x40ac, ch) /* Page-close idle timer setup - 8 bits */
+#define PM_PDWN_CONFIG_ch(ch)	Cx(0x40b0, ch) /* Power-down (CKE-off) operation config */
+#define ECC_INJECT_COUNT_ch(ch)	Cx(0x40b4, ch) /* ECC error injection count */
+#define ECC_DFT_ch(ch)		Cx(0x40b8, ch) /* ECC DFT features (ECC4ANA, error inject) */
+#define SC_WR_ADD_DELAY_ch(ch)	Cx(0x40d0, ch) /* Extra WR delay to overcome WR-flyby issue */
+
+#define IOSAV_By_BW_SERROR_C_ch(ch, y)	CxLy(0x4140, ch, y) /* IOSAV Bytelane Bit-wise error */
+
+/* IOSAV sub-sequence control registers */
+#define IOSAV_n_SP_CMD_ADDR_ch(ch, y)	CxLy(0x4200, ch, y) /* Special command address. */
+#define IOSAV_n_ADDR_UPDATE_ch(ch, y)	CxLy(0x4210, ch, y) /* Address update control */
+#define IOSAV_n_SP_CMD_CTRL_ch(ch, y)	CxLy(0x4220, ch, y) /* Control of command signals */
+#define IOSAV_n_SUBSEQ_CTRL_ch(ch, y)	CxLy(0x4230, ch, y) /* Sub-sequence controls */
+#define IOSAV_n_ADDRESS_LFSR_ch(ch, y)	CxLy(0x4240, ch, y) /* 23-bit LFSR state value */
+
+#define PM_THML_STAT_ch(ch)	Cx(0x4280, ch) /* Thermal status of each rank */
+#define IOSAV_SEQ_CTL_ch(ch)	Cx(0x4284, ch) /* IOSAV sequence level control */
+#define IOSAV_DATA_CTL_ch(ch)	Cx(0x4288, ch) /* Data control in IOSAV mode */
+#define IOSAV_STATUS_ch(ch)	Cx(0x428c, ch) /* State of the IOSAV sequence machine */
+#define TC_ZQCAL_ch(ch)		Cx(0x4290, ch) /* ZQCAL control register */
+#define TC_RFP_ch(ch)		Cx(0x4294, ch) /* Refresh Parameters */
+#define TC_RFTP_ch(ch)		Cx(0x4298, ch) /* Refresh Timing Parameters */
+#define TC_MR2_SHADOW_ch(ch)	Cx(0x429c, ch) /* MR2 shadow - copy of DDR configuration */
+#define MC_INIT_STATE_ch(ch)	Cx(0x42a0, ch) /* IOSAV mode control */
+#define TC_SRFTP_ch(ch)		Cx(0x42a4, ch) /* Self-refresh timing parameters */
+#define IOSAV_ERROR_ch(ch)	Cx(0x42ac, ch) /* Data vector count of the first error */
+#define IOSAV_DC_MASK_ch(ch)	Cx(0x42b0, ch) /* IOSAV data check masking */
+
+#define IOSAV_By_ERROR_COUNT_ch(ch, y)	CxLy(0x4340, ch, y) /* Per-byte 16-bit error count */
+#define IOSAV_G_ERROR_COUNT_ch(ch)	Cx(0x4364, ch) /* Global 16-bit error count */
+
+/** WARNING: Only applies to Ivy Bridge! */
+#define IOSAV_BYTE_SERROR_ch(ch)	Cx(0x4368, ch) /** Byte-Wise Sticky Error */
+#define IOSAV_BYTE_SERROR_C_ch(ch)	Cx(0x436c, ch) /** Byte-Wise Sticky Error Clear */
+
+#define PM_TRML_M_CONFIG_ch(ch)		Cx(0x4380, ch) /* Thermal mode configuration */
+#define PM_CMD_PWR_ch(ch)		Cx(0x4384, ch) /* Power contribution of commands */
+#define PM_BW_LIMIT_CONFIG_ch(ch)	Cx(0x4388, ch) /* Bandwidth throttling on overtemp */
+#define SC_WDBWM_ch(ch)			Cx(0x438c, ch) /* Watermarks and starvation counter */
+
+/* MC Channel Broadcast registers */
+#define TC_DBP			0x4c00 /* Timings: BIN */
+#define TC_RAP			0x4c04 /* Timings: Regular access */
+#define TC_RWP			0x4c08 /* Timings: Read / Write */
+#define TC_OTHP			0x4c0c /* Timings: Other parameters */
+#define SCHED_SECOND_CBIT	0x4c1c /* More chicken bits */
+#define SCHED_CBIT		0x4c20 /* Chicken bits in scheduler */
+#define SC_ROUNDT_LAT		0x4c24 /* Round-trip latency per rank */
+#define SC_IO_LATENCY		0x4c28 /* IO Latency Configuration */
+#define SCRAMBLING_SEED_1	0x4c34 /* Scrambling seed 1 */
+#define SCRAMBLING_SEED_2_LO	0x4c38 /* Scrambling seed 2 low */
+#define SCRAMBLING_SEED_2_HI	0x4c3c /* Scrambling seed 2 high */
+
+#define IOSAV_By_BW_SERROR(y)	Ly(0x4c40, y) /* IOSAV Bytelane Bit-wise error */
+#define IOSAV_By_BW_MASK(y)	Ly(0x4c80, y) /* IOSAV Bytelane Bit-wise compare mask */
+
+/*
+ * Defines the number of transactions (non-VC1 RD CAS commands) between two priority ticks.
+ * Different counters for transactions that are issued on the ring agents (core or GT) and
+ * transactions issued in the SA.
+ */
+#define SC_PR_CNT_CONFIG	0x4ca8
+#define SC_PCIT			0x4cac /* Page-close idle timer setup - 8 bits */
+#define PM_PDWN_CONFIG		0x4cb0 /* Power-down (CKE-off) operation config */
+#define ECC_INJECT_COUNT	0x4cb4 /* ECC error injection count */
+#define ECC_DFT			0x4cb8 /* ECC DFT features (ECC4ANA, error inject) */
+#define SC_WR_ADD_DELAY		0x4cd0 /* Extra WR delay to overcome WR-flyby issue */
+
+/** Opportunistic reads configuration during write-major-mode (WMM) */
+#define WMM_READ_CONFIG		0x4cd4 /** WARNING: Only exists on IVB! */
+
+#define IOSAV_By_BW_SERROR_C(y)	Ly(0x4d40, y) /* IOSAV Bytelane Bit-wise error */
+
+#define IOSAV_n_SP_CMD_ADDR(n)	Ly(0x4e00, n) /* Sub-sequence special command address */
+#define IOSAV_n_ADDR_UPDATE(n)	Ly(0x4e10, n) /* Address update after command execution */
+#define IOSAV_n_SP_CMD_CTRL(n)	Ly(0x4e20, n) /* Command signals in sub-sequence command */
+#define IOSAV_n_SUBSEQ_CTRL(n)	Ly(0x4e30, n) /* Sub-sequence command parameter control */
+#define IOSAV_n_ADDRESS_LFSR(n)	Ly(0x4e40, n) /* 23-bit LFSR value of the sequence */
+
+#define PM_THML_STAT		0x4e80 /* Thermal status of each rank */
+#define IOSAV_SEQ_CTL		0x4e84 /* IOSAV sequence level control */
+#define IOSAV_DATA_CTL		0x4e88 /* Data control in IOSAV mode */
+#define IOSAV_STATUS		0x4e8c /* State of the IOSAV sequence machine */
+#define TC_ZQCAL		0x4e90 /* ZQCAL control register */
+#define TC_RFP			0x4e94 /* Refresh Parameters */
+#define TC_RFTP			0x4e98 /* Refresh Timing Parameters */
+#define TC_MR2_SHADOW		0x4e9c /* MR2 shadow - copy of DDR configuration */
+#define MC_INIT_STATE		0x4ea0 /* IOSAV mode control */
+#define TC_SRFTP		0x4ea4 /* Self-refresh timing parameters */
+
+/**
+ * Auxiliary register in mcmnts synthesis FUB (Functional Unit Block). Additionally, this
+ * register is also used to enable IOSAV_n_SP_CMD_ADDR optimization on Ivy Bridge.
+ */
+#define MCMNTS_SPARE		0x4ea8 /** WARNING: Reserved, use only on IVB! */
+
+#define IOSAV_ERROR		0x4eac /* Data vector count of the first error */
+#define IOSAV_DC_MASK		0x4eb0 /* IOSAV data check masking */
+
+#define IOSAV_By_ERROR_COUNT(y)	Ly(0x4f40, y) /* Per-byte 16-bit error counter */
+#define IOSAV_G_ERROR_COUNT	0x4f64 /* Global 16-bit error counter */
+
+/** WARNING: Only applies to Ivy Bridge! */
+#define IOSAV_BYTE_SERROR	0x4f68 /** Byte-Wise Sticky Error */
+#define IOSAV_BYTE_SERROR_C	0x4f6c /** Byte-Wise Sticky Error Clear */
+
+#define PM_TRML_M_CONFIG	0x4f80 /* Thermal mode configuration */
+#define PM_CMD_PWR		0x4f84 /* Power contribution of commands */
+#define PM_BW_LIMIT_CONFIG	0x4f88 /* Bandwidth throttling on overtemperature */
+#define SC_WDBWM		0x4f8c /* Watermarks and starvation counter config */
+
+/* No, there's no need to get mad about the Memory Address Decoder */
+#define MAD_CHNL		0x5000		/* Address Decoder Channel Configuration */
+#define MAD_DIMM(ch)		Ly(0x5004, ch)	/* Channel characteristics */
+#define MAD_DIMM_CH0		MAD_DIMM(0)	/* Channel 0 is at 0x5004 */
+#define MAD_DIMM_CH1		MAD_DIMM(1)	/* Channel 1 is at 0x5008 */
+#define MAD_DIMM_CH2		MAD_DIMM(2)	/* Channel 2 is at 0x500c (unused on SNB) */
+
+#define MAD_ZR			0x5014	/* Address Decode Zones */
+#define MCDECS_SPARE		0x5018 /* Spare register in mcdecs synthesis FUB */
+#define MCDECS_CBIT		0x501c /* Chicken bits in mcdecs synthesis FUB */
+
+#define CHANNEL_HASH		0x5024 /** WARNING: Only exists on IVB! */
+
+#define MC_INIT_STATE_G		0x5030 /* High-level behavior in IOSAV mode */
+#define MRC_REVISION		0x5034 /* MRC Revision */
+#define PM_DLL_CONFIG		0x5064 /* Memory Controller I/O DLL config */
+#define RCOMP_TIMER		0x5084 /* RCOMP evaluation timer register */
+
+#define MC_LOCK			0x50fc /* Memory Controlller Lock register */
+
+#define GFXVTBAR		0x5400 /* Base address for IGD */
+#define VTVC0BAR		0x5410 /* Base address for PEG, USB, SATA, etc. */
+
+/* On Ivy Bridge, this is used to enable Power Aware Interrupt Routing */
+#define INTRDIRCTL		0x5418 /* Interrupt Redirection Control */
+
+/* PAVP message register. Bit 0 locks PAVP settings, and bits [31..20] are an offset. */
+#define PAVP_MSG		0x5500
+
+#define MEM_TRML_ESTIMATION_CONFIG	0x5880
+#define MEM_TRML_THRESHOLDS_CONFIG	0x5888
+#define MEM_TRML_INTERRUPT		0x58a8
+
+/* Some power MSRs are also represented in MCHBAR */
+#define MCH_PKG_POWER_LIMIT_LO	0x59a0 /* Turbo Power Limit 1 parameters */
+#define MCH_PKG_POWER_LIMIT_HI	0x59a4 /* Turbo Power Limit 2 parameters */
+
+#define SSKPD			0x5d10 /* 64-bit scratchpad register */
+#define SSKPD_HI		0x5d14
+#define BIOS_RESET_CPL		0x5da8 /* 8-bit */
+
+/* PCODE will sample SAPM-related registers at the end of Phase 4. */
+#define MC_BIOS_REQ		0x5e00 /* Memory frequency request register */
+#define MC_BIOS_DATA		0x5e04 /* Miscellaneous information for BIOS */
+#define SAPMCTL			0x5f00 /* Bit 3 enables DDR EPG (C7i) on IVB */
+#define M_COMP			0x5f08 /* Memory COMP control */
+#define SAPMTIMERS		0x5f10 /* SAPM timers in 10ns (100 MHz) units */
+
+/* WARNING: Only applies to Sandy Bridge! */
+#define BANDTIMERS_SNB		0x5f18 /* MPLL and PPLL time to do self-banding */
+
+/** WARNING: Only applies to Ivy Bridge! */
+#define SAPMTIMERS2_IVB		0x5f18 /** Extra latency for DDRIO EPG exit (C7i) */
+#define BANDTIMERS_IVB		0x5f20 /** MPLL and PPLL time to do self-banding */
+
+/* Finalize registers. The names come from Haswell, as the finalize sequence is the same. */
+#define HDAUDRID		0x6008
+#define UMAGFXCTL		0x6020
+#define VDMBDFBARKVM		0x6030
+#define VDMBDFBARPAVP		0x6034
+#define VTDTRKLCK		0x63fc
+#define REQLIM			0x6800
+#define DMIVCLIM		0x7000
+#define PEGCTL			0x7010 /* Bit 0 is PCIPWRGAT (clock gate all PEG controllers) */
+#define CRDTCTL3		0x740c /* Minimum completion credits for PCIe/DMI */
+#define CRDTCTL4		0x7410 /* Read Return Tracker credits */
+#define CRDTLCK			0x77fc
+
+#endif /* __SANDYBRIDGE_MCHBAR_REGS_H__ */
diff --git a/src/northbridge/intel/sandybridge/memmap.c b/src/northbridge/intel/sandybridge/memmap.c
index 03e8db6..60b6dcd 100644
--- a/src/northbridge/intel/sandybridge/memmap.c
+++ b/src/northbridge/intel/sandybridge/memmap.c
@@ -26,18 +26,17 @@
 static uintptr_t smm_region_start(void)
 {
 	/* Base of TSEG is top of usable DRAM */
-	uintptr_t tom = pci_read_config32(PCI_DEV(0, 0, 0), TSEGMB);
-	return tom;
+	return pci_read_config32(HOST_BRIDGE, TSEGMB);
 }
 
 void *cbmem_top_chipset(void)
 {
-	return (void *) smm_region_start();
+	return (void *)smm_region_start();
 }
 
 static uintptr_t northbridge_get_tseg_base(void)
 {
-	return ALIGN_DOWN(smm_region_start(), 1*MiB);
+	return ALIGN_DOWN(smm_region_start(), 1 * MiB);
 }
 
 static size_t northbridge_get_tseg_size(void)
@@ -48,24 +47,27 @@
 void smm_region(uintptr_t *start, size_t *size)
 {
 	*start = northbridge_get_tseg_base();
-	*size = northbridge_get_tseg_size();
+	*size  = northbridge_get_tseg_size();
 }
 
 void fill_postcar_frame(struct postcar_frame *pcf)
 {
-	uintptr_t top_of_ram;
+	uintptr_t top_of_ram = (uintptr_t)cbmem_top();
 
-	top_of_ram = (uintptr_t)cbmem_top();
-	/* Cache 8MiB below the top of ram. On sandybridge systems the top of
+	/*
+	 * Cache 8MiB below the top of ram. On sandybridge systems the top of
 	 * RAM under 4GiB is the start of the TSEG region. It is required to
 	 * be 8MiB aligned. Set this area as cacheable so it can be used later
-	 * for ramstage before setting up the entire RAM as cacheable. */
-	postcar_frame_add_mtrr(pcf, top_of_ram - 8*MiB, 8*MiB, MTRR_TYPE_WRBACK);
+	 * for ramstage before setting up the entire RAM as cacheable.
+	 */
+	postcar_frame_add_mtrr(pcf, top_of_ram - 8 * MiB, 8 * MiB, MTRR_TYPE_WRBACK);
 
-	/* Cache 8MiB at the top of ram. Top of RAM on sandybridge systems
+	/*
+	 * Cache 8MiB at the top of ram. Top of RAM on sandybridge systems
 	 * is where the TSEG region resides. However, it is not restricted
 	 * to SMM mode until SMM has been relocated. By setting the region
 	 * to cacheable it provides faster access when relocating the SMM
-	 * handler as well as using the TSEG region for other purposes. */
-	postcar_frame_add_mtrr(pcf, top_of_ram, 8*MiB, MTRR_TYPE_WRBACK);
+	 * handler as well as using the TSEG region for other purposes.
+	 */
+	postcar_frame_add_mtrr(pcf, top_of_ram, 8 * MiB, MTRR_TYPE_WRBACK);
 }
diff --git a/src/northbridge/intel/sandybridge/northbridge.c b/src/northbridge/intel/sandybridge/northbridge.c
index eb102db..23c1489 100644
--- a/src/northbridge/intel/sandybridge/northbridge.c
+++ b/src/northbridge/intel/sandybridge/northbridge.c
@@ -35,11 +35,9 @@
 int bridge_silicon_revision(void)
 {
 	if (bridge_revision_id < 0) {
-		uint8_t stepping = cpuid_eax(1) & 0xf;
-		uint8_t bridge_id = pci_read_config16(
-			pcidev_on_root(0, 0),
-			PCI_DEVICE_ID) & 0xf0;
-		bridge_revision_id = bridge_id | stepping;
+		uint8_t stepping = cpuid_eax(1) & 0x0f;
+		uint8_t bridge_id = pci_read_config16(pcidev_on_root(0, 0), PCI_DEVICE_ID);
+		bridge_revision_id = (bridge_id & 0xf0) | stepping;
 	}
 	return bridge_revision_id;
 }
@@ -66,18 +64,19 @@
 
 	pciexbar_reg = pci_read_config32(dev, PCIEXBAR);
 
+	/* MMCFG not supported or not enabled */
 	if (!(pciexbar_reg & (1 << 0)))
 		return 0;
 
 	switch ((pciexbar_reg >> 1) & 3) {
-	case 0: // 256MB
-		*base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|(1 << 28));
+	case 0: /* 256MB */
+		*base = pciexbar_reg & (0xffffffffULL << 28);
 		return 256;
-	case 1: // 128M
-		*base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|(1 << 28)|(1 << 27));
+	case 1: /* 128M */
+		*base = pciexbar_reg & (0xffffffffULL << 27);
 		return 128;
-	case 2: // 64M
-		*base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|(1 << 28)|(1 << 27)|(1 << 26));
+	case 2: /* 64M */
+		*base = pciexbar_reg & (0xffffffffULL << 26);
 		return 64;
 	}
 
@@ -88,15 +87,14 @@
 {
 	mmio_resource(dev, index++, uma_memory_base >> 10, uma_memory_size >> 10);
 
-	mmio_resource(dev, index++, legacy_hole_base_k,
-			(0xc0000 >> 10) - legacy_hole_base_k);
-	reserved_ram_resource(dev, index++, 0xc0000 >> 10,
-			(0x100000 - 0xc0000) >> 10);
+	mmio_resource(dev, index++, legacy_hole_base_k, (0xc0000 >> 10) - legacy_hole_base_k);
+
+	reserved_ram_resource(dev, index++, 0xc0000 >> 10, (0x100000 - 0xc0000) >> 10);
 
 #if CONFIG(CHROMEOS_RAMOOPS)
 	reserved_ram_resource(dev, index++,
 			CONFIG_CHROMEOS_RAMOOPS_RAM_START >> 10,
-			CONFIG_CHROMEOS_RAMOOPS_RAM_SIZE >> 10);
+			CONFIG_CHROMEOS_RAMOOPS_RAM_SIZE  >> 10);
 #endif
 
 	if ((bridge_silicon_revision() & BASE_REV_MASK) == BASE_REV_SNB) {
@@ -106,10 +104,10 @@
 	}
 
 	/* Reserve IOMMU BARs */
-	const u32 capid0_a = pci_read_config32(dev, 0xe4);
+	const u32 capid0_a = pci_read_config32(dev, CAPID0_A);
 	if (!(capid0_a & (1 << 23))) {
-		mmio_resource(dev, index++, IOMMU_BASE1 >> 10, 4);
-		mmio_resource(dev, index++, IOMMU_BASE2 >> 10, 4);
+		mmio_resource(dev, index++, GFXVT_BASE >> 10, 4);
+		mmio_resource(dev, index++, VTVC0_BASE >> 10, 4);
 	}
 }
 
@@ -149,7 +147,7 @@
 	struct device *mch = pcidev_on_root(0, 0);
 
 	/* Top of Upper Usable DRAM, including remap */
-	touud = pci_read_config32(mch, TOUUD+4);
+	touud  = pci_read_config32(mch, TOUUD + 4);
 	touud <<= 32;
 	touud |= pci_read_config32(mch, TOUUD);
 
@@ -157,17 +155,17 @@
 	tolud = pci_read_config32(mch, TOLUD);
 
 	/* Top of Memory - does not account for any UMA */
-	tom = pci_read_config32(mch, 0xa4);
+	tom  = pci_read_config32(mch, TOM + 4);
 	tom <<= 32;
-	tom |= pci_read_config32(mch, 0xa0);
+	tom |= pci_read_config32(mch, TOM);
 
 	printk(BIOS_DEBUG, "TOUUD 0x%llx TOLUD 0x%08x TOM 0x%llx\n",
 	       touud, tolud, tom);
 
-	/* ME UMA needs excluding if total memory <4GB */
-	me_base = pci_read_config32(mch, 0x74);
+	/* ME UMA needs excluding if total memory < 4GB */
+	me_base  = pci_read_config32(mch, MESEG_BASE + 4);
 	me_base <<= 32;
-	me_base |= pci_read_config32(mch, 0x70);
+	me_base |= pci_read_config32(mch, MESEG_BASE);
 
 	printk(BIOS_DEBUG, "MEBASE 0x%llx\n", me_base);
 
@@ -206,30 +204,28 @@
 	}
 
 	/* Calculate TSEG size from its base which must be below GTT */
-	tseg_base = pci_read_config32(mch, 0xb8);
+	tseg_base = pci_read_config32(mch, TSEGMB);
 	uma_size = (uma_memory_base - tseg_base) >> 10;
 	tomk -= uma_size;
 	uma_memory_base = tomk * 1024ULL;
 	uma_memory_size += uma_size * 1024ULL;
-	printk(BIOS_DEBUG, "TSEG base 0x%08x size %uM\n",
-	       tseg_base, uma_size >> 10);
+	printk(BIOS_DEBUG, "TSEG base 0x%08x size %uM\n", tseg_base, uma_size >> 10);
 
 	printk(BIOS_INFO, "Available memory below 4GB: %lluM\n", tomk >> 10);
 
 	/* Report the memory regions */
 	ram_resource(dev, 3, 0, legacy_hole_base_k);
 	ram_resource(dev, 4, legacy_hole_base_k + legacy_hole_size_k,
-	     (tomk - (legacy_hole_base_k + legacy_hole_size_k)));
+		    (tomk - (legacy_hole_base_k + legacy_hole_size_k)));
 
 	/*
-	 * If >= 4GB installed then memory from TOLUD to 4GB
-	 * is remapped above TOM, TOUUD will account for both
+	 * If >= 4GB installed, then memory from TOLUD to 4GB is remapped above TOM.
+	 * TOUUD will account for both memory chunks.
 	 */
 	touud >>= 10; /* Convert to KB */
 	if (touud > 4096 * 1024) {
 		ram_resource(dev, 5, 4096 * 1024, touud - (4096 * 1024));
-		printk(BIOS_INFO, "Available memory above 4GB: %lluM\n",
-		       (touud >> 10) - 4096);
+		printk(BIOS_INFO, "Available memory above 4GB: %lluM\n", (touud >> 10) - 4096);
 	}
 
 	add_fixed_resources(dev, 6);
@@ -253,17 +249,18 @@
 	return NULL;
 }
 
-	/* TODO We could determine how many PCIe busses we need in
-	 * the bar. For now that number is hardcoded to a max of 64.
-	 */
+/*
+ * TODO We could determine how many PCIe busses we need in the bar.
+ * For now, that number is hardcoded to a max of 64.
+ */
 static struct device_operations pci_domain_ops = {
-	.read_resources   = pci_domain_read_resources,
-	.set_resources    = pci_domain_set_resources,
-	.enable_resources = NULL,
-	.init             = NULL,
-	.scan_bus         = pci_domain_scan_bus,
+	.read_resources    = pci_domain_read_resources,
+	.set_resources     = pci_domain_set_resources,
+	.enable_resources  = NULL,
+	.init              = NULL,
+	.scan_bus          = pci_domain_scan_bus,
 	.write_acpi_tables = northbridge_write_acpi_tables,
-	.acpi_name        = northbridge_acpi_name,
+	.acpi_name         = northbridge_acpi_name,
 };
 
 static void mc_read_resources(struct device *dev)
@@ -291,7 +288,7 @@
 	/* Steps prior to DMI ASPM */
 	if ((bridge_silicon_revision() & BASE_REV_MASK) == BASE_REV_SNB) {
 		reg32 = DMIBAR32(0x250);
-		reg32 &= ~((1 << 22)|(1 << 20));
+		reg32 &= ~((1 << 22) | (1 << 20));
 		reg32 |= (1 << 21);
 		DMIBAR32(0x250) = reg32;
 	}
@@ -304,6 +301,7 @@
 		reg32 = DMIBAR32(0x1f8);
 		reg32 |= (1 << 16);
 		DMIBAR32(0x1f8) = reg32;
+
 	} else if (bridge_silicon_revision() >= SNB_STEP_D1) {
 		reg32 = DMIBAR32(0x1f8);
 		reg32 &= ~(1 << 26);
@@ -374,10 +372,15 @@
 
 	dev = pcidev_on_root(0, 0);
 	pci_write_config32(dev, DEVEN, reg);
+
 	if (!(reg & (DEVEN_PEG60 | DEVEN_PEG10 | DEVEN_PEG11 | DEVEN_PEG12))) {
-		/* Set the PEG clock gating bit.
-		 * Disables the IO clock on all PEG devices. */
-		MCHBAR32(0x7010) = MCHBAR32(0x7010) | 0x01;
+		/*
+		 * Set the PEG clock gating bit. Disables the IO clock on all PEG devices.
+		 *
+		 * FIXME: If not clock gating, this register still needs to be written to once,
+		 *        to lock it down. Also, never clock gate on Ivy Bridge stepping A0!
+		 */
+		MCHBAR32_OR(PEGCTL, 1);
 		printk(BIOS_DEBUG, "Disabling PEG IO clock.\n");
 	}
 }
@@ -394,10 +397,10 @@
 
 	if ((bridge_silicon_revision() & BASE_REV_MASK) == BASE_REV_IVB) {
 		/* Enable Power Aware Interrupt Routing */
-		u8 pair = MCHBAR8(PAIR_CTL);
-		pair &= ~0xf;	/* Clear 3:0 */
-		pair |= 0x4;	/* Fixed Priority */
-		MCHBAR8(PAIR_CTL) = pair;
+		u8 pair = MCHBAR8(INTRDIRCTL);
+		pair &= ~0x0f;	/* Clear 3:0 */
+		pair |=  0x04;	/* Fixed Priority */
+		MCHBAR8(INTRDIRCTL) = pair;
 
 		/* 30h for IvyBridge */
 		bridge_type |= 0x30;
@@ -407,9 +410,7 @@
 	}
 	MCHBAR32(SAPMTIMERS) = bridge_type;
 
-	/* Turn off unused devices. Has to be done before
-	 * setting BIOS_RESET_CPL.
-	 */
+	/* Turn off unused devices. Has to be done before setting BIOS_RESET_CPL. */
 	disable_peg();
 
 	/*
@@ -426,17 +427,17 @@
 	set_power_limits(28);
 
 	/*
-	 * CPUs with configurable TDP also need power limits set
-	 * in MCHBAR.  Use same values from MSR_PKG_POWER_LIMIT.
+	 * CPUs with configurable TDP also need power limits set in MCHBAR.
+	 * Use the same values from MSR_PKG_POWER_LIMIT.
 	 */
 	if (cpu_config_tdp_levels()) {
 		msr_t msr = rdmsr(MSR_PKG_POWER_LIMIT);
-		MCHBAR32(MC_TURBO_PL1) = msr.lo;
-		MCHBAR32(MC_TURBO_PL2) = msr.hi;
+		MCHBAR32(MCH_PKG_POWER_LIMIT_LO) = msr.lo;
+		MCHBAR32(MCH_PKG_POWER_LIMIT_HI) = msr.hi;
 	}
 
 	/* Set here before graphics PM init */
-	MCHBAR32(MMIO_PAVP_CTL) = 0x00100001;
+	MCHBAR32(PAVP_MSG) = 0x00100001;
 }
 
 void northbridge_write_smram(u8 smram)
@@ -445,16 +446,16 @@
 }
 
 static struct pci_operations intel_pci_ops = {
-	.set_subsystem    = pci_dev_set_subsystem,
+	.set_subsystem = pci_dev_set_subsystem,
 };
 
 static struct device_operations mc_ops = {
-	.read_resources   = mc_read_resources,
-	.set_resources    = pci_dev_set_resources,
-	.enable_resources = pci_dev_enable_resources,
-	.init             = northbridge_init,
-	.scan_bus         = 0,
-	.ops_pci          = &intel_pci_ops,
+	.read_resources           = mc_read_resources,
+	.set_resources            = pci_dev_set_resources,
+	.enable_resources         = pci_dev_enable_resources,
+	.init                     = northbridge_init,
+	.scan_bus                 = NULL,
+	.ops_pci                  = &intel_pci_ops,
 	.acpi_fill_ssdt_generator = generate_cpu_entries,
 };
 
@@ -465,8 +466,8 @@
 };
 
 static const struct pci_driver mc_driver __pci_driver = {
-	.ops    = &mc_ops,
-	.vendor = PCI_VENDOR_ID_INTEL,
+	.ops     = &mc_ops,
+	.vendor  = PCI_VENDOR_ID_INTEL,
 	.devices = pci_device_ids,
 };
 
diff --git a/src/northbridge/intel/sandybridge/pcie.c b/src/northbridge/intel/sandybridge/pcie.c
index 258ade2..05f05ec 100644
--- a/src/northbridge/intel/sandybridge/pcie.c
+++ b/src/northbridge/intel/sandybridge/pcie.c
@@ -54,9 +54,9 @@
 	if (dev->path.pci.devfn == PCI_DEVFN(0, 0) &&
 	    port->bus->secondary == 0 &&
 	    (port->path.pci.devfn == PCI_DEVFN(1, 0) ||
-	    port->path.pci.devfn == PCI_DEVFN(1, 1) ||
-	    port->path.pci.devfn == PCI_DEVFN(1, 2) ||
-	    port->path.pci.devfn == PCI_DEVFN(6, 0)))
+	     port->path.pci.devfn == PCI_DEVFN(1, 1) ||
+	     port->path.pci.devfn == PCI_DEVFN(1, 2) ||
+	     port->path.pci.devfn == PCI_DEVFN(6, 0)))
 		return "DEV0";
 
 	return NULL;
@@ -81,9 +81,11 @@
 #endif
 };
 
-static const unsigned short pci_device_ids[] = { 0x0101, 0x0105, 0x0109, 0x010d,
-						 0x0151, 0x0155, 0x0159, 0x015d,
-						 0 };
+static const unsigned short pci_device_ids[] = {
+	0x0101, 0x0105, 0x0109, 0x010d,
+	0x0151, 0x0155, 0x0159, 0x015d,
+	0,
+};
 
 static const struct pci_driver pch_pcie __pci_driver = {
 	.ops		= &device_ops,
diff --git a/src/northbridge/intel/sandybridge/pei_data.h b/src/northbridge/intel/sandybridge/pei_data.h
index 8e98bec..8114bcc 100644
--- a/src/northbridge/intel/sandybridge/pei_data.h
+++ b/src/northbridge/intel/sandybridge/pei_data.h
@@ -33,10 +33,10 @@
 #include <stdint.h>
 
 typedef struct {
-	uint16_t mode;                // 0: Disable, 1: Enable, 2: Auto, 3: Smart Auto
-	uint16_t hs_port_switch_mask; // 4 bit mask, 1: switchable, 0: not switchable
-	uint16_t preboot_support;     // 0: No xHCI preOS driver, 1: xHCI preOS driver
-	uint16_t xhci_streams;        // 0: Disable, 1: Enable
+	uint16_t mode;                /* 0: Disable, 1: Enable, 2: Auto, 3: Smart Auto */
+	uint16_t hs_port_switch_mask; /* 4 bit mask, 1: switchable, 0: not switchable  */
+	uint16_t preboot_support;     /* 0: No xHCI preOS driver, 1: xHCI preOS driver */
+	uint16_t xhci_streams;        /* 0: Disable, 1: Enable */
 } pch_usb3_controller_settings;
 
 typedef void (*tx_byte_func)(unsigned char byte);
@@ -57,17 +57,19 @@
 	uint32_t pmbase;
 	uint32_t gpiobase;
 	uint32_t thermalbase;
-	uint32_t system_type; // 0 Mobile, 1 Desktop/Server
+	uint32_t system_type; /* 0 Mobile, 1 Desktop/Server */
 	uint32_t tseg_size;
 	uint8_t spd_addresses[4];
 	uint8_t ts_addresses[4];
 	int boot_mode;
 	int ec_present;
 	int gbe_enable;
-	// 0 = leave channel enabled
-	// 1 = disable dimm 0 on channel
-	// 2 = disable dimm 1 on channel
-	// 3 = disable dimm 0+1 on channel
+	/*
+	 * 0 = leave channel enabled
+	 * 1 = disable dimm 0 on channel
+	 * 2 = disable dimm 1 on channel
+	 * 3 = disable dimm 0+1 on channel
+	 */
 	int dimm_channel0_disabled;
 	int dimm_channel1_disabled;
 	/* Seed values saved in CMOS */
@@ -90,46 +92,50 @@
 	 *  [1] = overcurrent pin
 	 *  [2] = length
 	 *
-	 * Ports 0-7 can be mapped to OC0-OC3
+	 * Ports 0-7  can be mapped to OC0-OC3
 	 * Ports 8-13 can be mapped to OC4-OC7
 	 *
 	 * Port Length
 	 *  MOBILE:
-	 *   < 0x050 = Setting 1 (back panel, 1-5in, lowest tx amplitude)
-	 *   < 0x140 = Setting 2 (back panel, 5-14in, highest tx amplitude)
+	 *   < 0x050 = Setting 1 (back panel,           1 to  5 in, lowest  tx amplitude)
+	 *   < 0x140 = Setting 2 (back panel,           5 to 14 in, highest tx amplitude)
 	 *  DESKTOP:
-	 *   < 0x080 = Setting 1 (front/back panel, <8in, lowest tx amplitude)
-	 *   < 0x130 = Setting 2 (back panel, 8-13in, higher tx amplitude)
-	 *   < 0x150 = Setting 3 (back panel, 13-15in, highest tx amplitude)
+	 *   < 0x080 = Setting 1 (front/back panel, less than 8 in, lowest  tx amplitude)
+	 *   < 0x130 = Setting 2 (back panel,           8 to 13 in, higher  tx amplitude)
+	 *   < 0x150 = Setting 3 (back panel,          13 to 15 in, highest tx amplitude)
 	 */
 	uint16_t usb_port_config[16][3];
 	/* See the usb3 struct above for details */
 	pch_usb3_controller_settings usb3;
-	/* SPD data array for onboard RAM.
-	 * spd_data [1..3] are ignored, instead the "dimm_channel{0,1}_disabled"
-	 * flag and the spd_addresses are used to determine which DIMMs should
-	 * use the SPD from spd_data[0].
+	/*
+	 * SPD data array for onboard RAM. Note that spd_data [1..3] are ignored: instead,
+	 * the "dimm_channel{0,1}_disabled" flag and the spd_addresses are used to determine
+	 * which DIMMs should use the SPD from spd_data[0].
 	 */
 	uint8_t spd_data[4][256];
 	tx_byte_func tx_byte;
 	int ddr3lv_support;
-	/* pcie_init needs to be set to 1 to have the system agent initialize
-	 * PCIe. Note: This should only be required if your system has Gen3 devices
-	 * and it will increase your boot time by at least 100ms.
+	/*
+	 * pcie_init needs to be set to 1 to have the system agent initialize PCIe.
+	 * Note: This should only be required if your system has Gen3 devices and
+	 * it will increase your boot time by at least 100ms.
 	 */
 	int pcie_init;
-	/* N mode functionality. Leave this setting at 0.
-	 * 0 Auto
-	 * 1 1N
-	 * 2 2N
+	/*
+	 * N mode functionality. Leave this setting at 0.
+	 *
+	 * 0: Auto
+	 * 1: 1N
+	 * 2: 2N
 	 */
 	int nmode;
-	/* DDR refresh rate config. JEDEC Standard No.21-C Annex K allows
-	 * for DIMM SPD data to specify whether double-rate is required for
-	 * extended operating temperature range.
-	 * 0 Enable double rate based upon temperature thresholds
-	 * 1 Normal rate
-	 * 2 Always enable double rate
+	/*
+	 * DDR refresh rate config. JEDEC Standard No.21-C Annex K allows for DIMM SPD data to
+	 * specify whether double-rate is required for extended operating temperature range.
+	 *
+	 * 0: Enable double rate based upon temperature thresholds
+	 * 1: Normal rate
+	 * 2: Always enable double rate
 	 */
 	int ddr_refresh_rate_config;
 } __packed;
diff --git a/src/northbridge/intel/sandybridge/raminit.c b/src/northbridge/intel/sandybridge/raminit.c
index 60217b4..ca78eb3 100644
--- a/src/northbridge/intel/sandybridge/raminit.c
+++ b/src/northbridge/intel/sandybridge/raminit.c
@@ -35,47 +35,48 @@
 
 #define MRC_CACHE_VERSION 1
 
-/* FIXME: no ECC support.  */
-/* FIXME: no support for 3-channel chipsets.  */
+/* FIXME: no ECC support */
+/* FIXME: no support for 3-channel chipsets */
 
 static const char *ecc_decoder[] = {
 	"inactive",
 	"active on IO",
 	"disabled on IO",
-	"active"
+	"active",
 };
 
 static void wait_txt_clear(void)
 {
-	struct cpuid_result cp;
+	struct cpuid_result cp = cpuid_ext(1, 0);
 
-	cp = cpuid_ext(0x1, 0x0);
-	/* Check if TXT is supported?  */
-	if (!(cp.ecx & 0x40))
+	/* Check if TXT is supported */
+	if (!(cp.ecx & (1 << 6)))
 		return;
-	/* Some TXT public bit.  */
+
+	/* Some TXT public bit */
 	if (!(read32((void *)0xfed30010) & 1))
 		return;
-	/* Wait for TXT clear.  */
-	while (!(read8((void *)0xfed40000) & (1 << 7)));
+
+	/* Wait for TXT clear */
+	while (!(read8((void *)0xfed40000) & (1 << 7)))
+		;
 }
 
-/*
- * Disable a channel in ramctr_timing.
- */
-static void disable_channel(ramctr_timing *ctrl, int channel) {
+/* Disable a channel in ramctr_timing */
+static void disable_channel(ramctr_timing *ctrl, int channel)
+{
 	ctrl->rankmap[channel] = 0;
+
 	memset(&ctrl->rank_mirror[channel][0], 0, sizeof(ctrl->rank_mirror[0]));
+
 	ctrl->channel_size_mb[channel] = 0;
-	ctrl->cmd_stretch[channel] = 0;
-	ctrl->mad_dimm[channel] = 0;
-	memset(&ctrl->timings[channel][0], 0, sizeof(ctrl->timings[0]));
+	ctrl->cmd_stretch[channel]     = 0;
+	ctrl->mad_dimm[channel]        = 0;
+	memset(&ctrl->timings[channel][0],   0, sizeof(ctrl->timings[0]));
 	memset(&ctrl->info.dimm[channel][0], 0, sizeof(ctrl->info.dimm[0]));
 }
 
-/*
- * Fill cbmem with information for SMBIOS type 17.
- */
+/* Fill cbmem with information for SMBIOS type 17 */
 static void fill_smbios17(ramctr_timing *ctrl)
 {
 	int channel, slot;
@@ -89,54 +90,50 @@
 	}
 }
 
-/*
- * Dump in the log memory controller configuration as read from the memory
- * controller registers.
- */
+#define ON_OFF(val) (((val) & 1) ? "on" : "off")
+
+/* Print the memory controller configuration as read from the memory controller registers. */
 static void report_memory_config(void)
 {
 	u32 addr_decoder_common, addr_decode_ch[NUM_CHANNELS];
-	int i, refclk;
+	int i;
 
 	addr_decoder_common = MCHBAR32(MAD_CHNL);
-	addr_decode_ch[0] = MCHBAR32(MAD_DIMM_CH0);
-	addr_decode_ch[1] = MCHBAR32(MAD_DIMM_CH1);
+	addr_decode_ch[0]   = MCHBAR32(MAD_DIMM_CH0);
+	addr_decode_ch[1]   = MCHBAR32(MAD_DIMM_CH1);
 
-	refclk = MCHBAR32(MC_BIOS_REQ) & 0x100 ? 100 : 133;
+	const int refclk = MCHBAR32(MC_BIOS_REQ) & 0x100 ? 100 : 133;
 
 	printk(BIOS_DEBUG, "memcfg DDR3 ref clock %d MHz\n", refclk);
 	printk(BIOS_DEBUG, "memcfg DDR3 clock %d MHz\n",
 	       (MCHBAR32(MC_BIOS_DATA) * refclk * 100 * 2 + 50) / 100);
+
 	printk(BIOS_DEBUG, "memcfg channel assignment: A: %d, B % d, C % d\n",
-	       addr_decoder_common & 3, (addr_decoder_common >> 2) & 3,
+	       (addr_decoder_common >> 0) & 3,
+	       (addr_decoder_common >> 2) & 3,
 	       (addr_decoder_common >> 4) & 3);
 
 	for (i = 0; i < ARRAY_SIZE(addr_decode_ch); i++) {
 		u32 ch_conf = addr_decode_ch[i];
-		printk(BIOS_DEBUG, "memcfg channel[%d] config (%8.8x):\n", i,
-		       ch_conf);
-		printk(BIOS_DEBUG, "   ECC %s\n",
-		       ecc_decoder[(ch_conf >> 24) & 3]);
-		printk(BIOS_DEBUG, "   enhanced interleave mode %s\n",
-		       ((ch_conf >> 22) & 1) ? "on" : "off");
-		printk(BIOS_DEBUG, "   rank interleave %s\n",
-		       ((ch_conf >> 21) & 1) ? "on" : "off");
+		printk(BIOS_DEBUG, "memcfg channel[%d] config (%8.8x):\n", i, ch_conf);
+		printk(BIOS_DEBUG, "   ECC %s\n", ecc_decoder[(ch_conf >> 24) & 3]);
+		printk(BIOS_DEBUG, "   enhanced interleave mode %s\n", ON_OFF(ch_conf >> 22));
+		printk(BIOS_DEBUG, "   rank interleave %s\n", ON_OFF(ch_conf >> 21));
 		printk(BIOS_DEBUG, "   DIMMA %d MB width x%d %s rank%s\n",
-		       ((ch_conf >> 0) & 0xff) * 256,
+		       ((ch_conf >>  0) & 0xff) * 256,
 		       ((ch_conf >> 19) & 1) ? 16 : 8,
 		       ((ch_conf >> 17) & 1) ? "dual" : "single",
 		       ((ch_conf >> 16) & 1) ? "" : ", selected");
 		printk(BIOS_DEBUG, "   DIMMB %d MB width x%d %s rank%s\n",
-		       ((ch_conf >> 8) & 0xff) * 256,
+		       ((ch_conf >>  8) & 0xff) * 256,
 		       ((ch_conf >> 20) & 1) ? 16 : 8,
 		       ((ch_conf >> 18) & 1) ? "dual" : "single",
 		       ((ch_conf >> 16) & 1) ? ", selected" : "");
 	}
 }
+#undef ON_OFF
 
-/*
- * Return CRC16 match for all SPDs.
- */
+/* Return CRC16 match for all SPDs */
 static int verify_crc16_spds_ddr3(spd_raw_data *spd, ramctr_timing *ctrl)
 {
 	int channel, slot, spd_slot;
@@ -146,7 +143,7 @@
 		for (slot = 0; slot < NUM_SLOTS; slot++) {
 			spd_slot = 2 * channel + slot;
 			match &= ctrl->spd_crc[channel][slot] ==
-					spd_ddr3_calc_unique_crc(spd[spd_slot], sizeof(spd_raw_data));
+				spd_ddr3_calc_unique_crc(spd[spd_slot], sizeof(spd_raw_data));
 		}
 	}
 	return match;
@@ -166,7 +163,7 @@
 
 static void dram_find_spds_ddr3(spd_raw_data *spd, ramctr_timing *ctrl)
 {
-	int dimms = 0, dimms_on_channel;
+	int dimms = 0, ch_dimms;
 	int channel, slot, spd_slot;
 	dimm_info *dimm = &ctrl->info;
 
@@ -178,53 +175,55 @@
 	FOR_ALL_CHANNELS {
 		ctrl->channel_size_mb[channel] = 0;
 
-		dimms_on_channel = 0;
-		/* count dimms on channel */
+		ch_dimms = 0;
+		/* Count dimms on channel */
 		for (slot = 0; slot < NUM_SLOTS; slot++) {
 			spd_slot = 2 * channel + slot;
-			printk(BIOS_DEBUG,
-			       "SPD probe channel%d, slot%d\n", channel, slot);
+			printk(BIOS_DEBUG, "SPD probe channel%d, slot%d\n", channel, slot);
 
 			spd_decode_ddr3(&dimm->dimm[channel][slot], spd[spd_slot]);
 			if (dimm->dimm[channel][slot].dram_type == SPD_MEMORY_TYPE_SDRAM_DDR3)
-				dimms_on_channel++;
+				ch_dimms++;
 		}
 
 		for (slot = 0; slot < NUM_SLOTS; slot++) {
 			spd_slot = 2 * channel + slot;
-			printk(BIOS_DEBUG,
-			       "SPD probe channel%d, slot%d\n", channel, slot);
+			printk(BIOS_DEBUG, "SPD probe channel%d, slot%d\n", channel, slot);
 
-			/* search for XMP profile */
-			spd_xmp_decode_ddr3(&dimm->dimm[channel][slot],
-					spd[spd_slot],
+			/* Search for XMP profile */
+			spd_xmp_decode_ddr3(&dimm->dimm[channel][slot], spd[spd_slot],
 					DDR3_XMP_PROFILE_1);
 
 			if (dimm->dimm[channel][slot].dram_type != SPD_MEMORY_TYPE_SDRAM_DDR3) {
 				printram("No valid XMP profile found.\n");
 				spd_decode_ddr3(&dimm->dimm[channel][slot], spd[spd_slot]);
-			} else if (dimms_on_channel > dimm->dimm[channel][slot].dimms_per_channel) {
-				printram("XMP profile supports %u DIMMs, but %u DIMMs are installed.\n",
-						 dimm->dimm[channel][slot].dimms_per_channel,
-						 dimms_on_channel);
+
+			} else if (ch_dimms > dimm->dimm[channel][slot].dimms_per_channel) {
+				printram(
+				"XMP profile supports %u DIMMs, but %u DIMMs are installed.\n",
+					dimm->dimm[channel][slot].dimms_per_channel, ch_dimms);
+
 				if (CONFIG(NATIVE_RAMINIT_IGNORE_XMP_MAX_DIMMS))
-					printk(BIOS_WARNING, "XMP maximum DIMMs will be ignored.\n");
+					printk(BIOS_WARNING,
+						"XMP maximum DIMMs will be ignored.\n");
 				else
-					spd_decode_ddr3(&dimm->dimm[channel][slot], spd[spd_slot]);
+					spd_decode_ddr3(&dimm->dimm[channel][slot],
+							spd[spd_slot]);
+
 			} else if (dimm->dimm[channel][slot].voltage != 1500) {
-				/* TODO: support other DDR3 voltage than 1500mV */
+				/* TODO: Support DDR3 voltages other than 1500mV */
 				printram("XMP profile's requested %u mV is unsupported.\n",
 						 dimm->dimm[channel][slot].voltage);
 				spd_decode_ddr3(&dimm->dimm[channel][slot], spd[spd_slot]);
 			}
 
-			/* fill in CRC16 for MRC cache */
+			/* Fill in CRC16 for MRC cache */
 			ctrl->spd_crc[channel][slot] =
-					spd_ddr3_calc_unique_crc(spd[spd_slot], sizeof(spd_raw_data));
+				spd_ddr3_calc_unique_crc(spd[spd_slot], sizeof(spd_raw_data));
 
 			if (dimm->dimm[channel][slot].dram_type != SPD_MEMORY_TYPE_SDRAM_DDR3) {
-				// set dimm invalid
-				dimm->dimm[channel][slot].ranks = 0;
+				/* Mark DIMM as invalid */
+				dimm->dimm[channel][slot].ranks   = 0;
 				dimm->dimm[channel][slot].size_mb = 0;
 				continue;
 			}
@@ -232,30 +231,40 @@
 			dram_print_spd_ddr3(&dimm->dimm[channel][slot]);
 			dimms++;
 			ctrl->rank_mirror[channel][slot * 2] = 0;
-			ctrl->rank_mirror[channel][slot * 2 + 1] = dimm->dimm[channel][slot].flags.pins_mirrored;
+			ctrl->rank_mirror[channel][slot * 2 + 1] =
+				dimm->dimm[channel][slot].flags.pins_mirrored;
+
 			ctrl->channel_size_mb[channel] += dimm->dimm[channel][slot].size_mb;
 
 			ctrl->auto_self_refresh &= dimm->dimm[channel][slot].flags.asr;
-			ctrl->extended_temperature_range &= dimm->dimm[channel][slot].flags.ext_temp_refresh;
 
-			ctrl->rankmap[channel] |= ((1 << dimm->dimm[channel][slot].ranks) - 1) << (2 * slot);
-			printk(BIOS_DEBUG, "channel[%d] rankmap = 0x%x\n",
-			       channel, ctrl->rankmap[channel]);
+			ctrl->extended_temperature_range &=
+				dimm->dimm[channel][slot].flags.ext_temp_refresh;
+
+			ctrl->rankmap[channel] |=
+				((1 << dimm->dimm[channel][slot].ranks) - 1) << (2 * slot);
+
+			printk(BIOS_DEBUG, "channel[%d] rankmap = 0x%x\n", channel,
+				ctrl->rankmap[channel]);
 		}
-		if ((ctrl->rankmap[channel] & 3) && (ctrl->rankmap[channel] & 0xc)
-			&& dimm->dimm[channel][0].reference_card <= 5 && dimm->dimm[channel][1].reference_card <= 5) {
+		if ((ctrl->rankmap[channel] & 0x03) && (ctrl->rankmap[channel] & 0x0c)
+				&& dimm->dimm[channel][0].reference_card <= 5
+				&& dimm->dimm[channel][1].reference_card <= 5) {
+
 			const int ref_card_offset_table[6][6] = {
-				{ 0, 0, 0, 0, 2, 2, },
-				{ 0, 0, 0, 0, 2, 2, },
-				{ 0, 0, 0, 0, 2, 2, },
-				{ 0, 0, 0, 0, 1, 1, },
-				{ 2, 2, 2, 1, 0, 0, },
-				{ 2, 2, 2, 1, 0, 0, },
+				{ 0, 0, 0, 0, 2, 2 },
+				{ 0, 0, 0, 0, 2, 2 },
+				{ 0, 0, 0, 0, 2, 2 },
+				{ 0, 0, 0, 0, 1, 1 },
+				{ 2, 2, 2, 1, 0, 0 },
+				{ 2, 2, 2, 1, 0, 0 },
 			};
-			ctrl->ref_card_offset[channel] = ref_card_offset_table[dimm->dimm[channel][0].reference_card]
-				[dimm->dimm[channel][1].reference_card];
-		} else
+			ctrl->ref_card_offset[channel] = ref_card_offset_table
+					[dimm->dimm[channel][0].reference_card]
+					[dimm->dimm[channel][1].reference_card];
+		} else {
 			ctrl->ref_card_offset[channel] = 0;
+		}
 	}
 
 	if (!dimms)
@@ -265,29 +274,24 @@
 static void save_timings(ramctr_timing *ctrl)
 {
 	/* Save the MRC S3 restore data to cbmem */
-	mrc_cache_stash_data(MRC_TRAINING_DATA, MRC_CACHE_VERSION, ctrl,
-			sizeof(*ctrl));
+	mrc_cache_stash_data(MRC_TRAINING_DATA, MRC_CACHE_VERSION, ctrl, sizeof(*ctrl));
 }
 
-static int try_init_dram_ddr3(ramctr_timing *ctrl, int fast_boot,
-		int s3_resume, int me_uma_size)
+static int try_init_dram_ddr3(ramctr_timing *ctrl, int fast_boot, int s3resume, int me_uma_size)
 {
 	if (ctrl->sandybridge)
-		return try_init_dram_ddr3_sandy(ctrl, fast_boot, s3_resume, me_uma_size);
+		return try_init_dram_ddr3_snb(ctrl, fast_boot, s3resume, me_uma_size);
 	else
-		return try_init_dram_ddr3_ivy(ctrl, fast_boot, s3_resume, me_uma_size);
+		return try_init_dram_ddr3_ivb(ctrl, fast_boot, s3resume, me_uma_size);
 }
 
 static void init_dram_ddr3(int min_tck, int s3resume)
 {
-	int me_uma_size;
-	int cbmem_was_inited;
+	int me_uma_size, cbmem_was_inited, fast_boot, err;
 	ramctr_timing ctrl;
-	int fast_boot;
 	spd_raw_data spds[4];
 	struct region_device rdev;
 	ramctr_timing *ctrl_cached;
-	int err;
 	u32 cpu;
 
 	MCHBAR32(SAPMCTL) |= 1;
@@ -298,17 +302,14 @@
 
 	printk(BIOS_DEBUG, "Starting native Platform init\n");
 
-	u32 reg_5d10;
-
 	wait_txt_clear();
 
 	wrmsr(0x000002e6, (msr_t) { .lo = 0, .hi = 0 });
 
-	reg_5d10 = MCHBAR32(0x5d10);	// !!! = 0x00000000
-	if ((pci_read_config16(SOUTHBRIDGE, 0xa2) & 0xa0) == 0x20	/* 0x0004 */
-	    && reg_5d10 && !s3resume) {
-		MCHBAR32(0x5d10) = 0;
-		/* Need reset.  */
+	const u32 sskpd = MCHBAR32(SSKPD);	// !!! = 0x00000000
+	if ((pci_read_config16(SOUTHBRIDGE, 0xa2) & 0xa0) == 0x20 && sskpd && !s3resume) {
+		MCHBAR32(SSKPD) = 0;
+		/* Need reset */
 		system_reset();
 	}
 
@@ -316,10 +317,9 @@
 	early_init_dmi();
 	early_thermal_init();
 
-	/* try to find timings in MRC cache */
-	int cache_not_found = mrc_cache_get_current(MRC_TRAINING_DATA,
-						MRC_CACHE_VERSION, &rdev);
-	if (cache_not_found || (region_device_sz(&rdev) < sizeof(ctrl))) {
+	/* Try to find timings in MRC cache */
+	err = mrc_cache_get_current(MRC_TRAINING_DATA, MRC_CACHE_VERSION, &rdev);
+	if (err || (region_device_sz(&rdev) < sizeof(ctrl))) {
 		if (s3resume) {
 			/* Failed S3 resume, reset to come up cleanly */
 			system_reset();
@@ -329,7 +329,7 @@
 		ctrl_cached = rdev_mmap_full(&rdev);
 	}
 
-	/* verify MRC cache for fast boot */
+	/* Verify MRC cache for fast boot */
 	if (!s3resume && ctrl_cached) {
 		/* Load SPD unique information data. */
 		memset(spds, 0, sizeof(spds));
@@ -353,8 +353,8 @@
 				/* Failed S3 resume, reset to come up cleanly */
 				system_reset();
 			}
-			/* no need to erase bad mrc cache here, it gets overwritten on
-			 * successful boot. */
+			/* No need to erase bad MRC cache here, it gets overwritten on a
+			   successful boot */
 			printk(BIOS_ERR, "Stored timings are invalid !\n");
 			fast_boot = 0;
 		}
@@ -377,7 +377,7 @@
 	}
 
 	if (err) {
-		/* fallback: disable failing channel */
+		/* Fallback: disable failing channel */
 		printk(BIOS_ERR, "RAM training failed, trying fallback.\n");
 		printram("Disable failing channel.\n");
 
@@ -392,7 +392,7 @@
 		/* Reset DDR3 frequency */
 		dram_find_spds_ddr3(spds, &ctrl);
 
-		/* disable failing channel */
+		/* Disable failing channel */
 		disable_channel(&ctrl, GET_ERR_CHANNEL(err));
 
 		err = try_init_dram_ddr3(&ctrl, fast_boot, s3resume, me_uma_size);
diff --git a/src/northbridge/intel/sandybridge/raminit.h b/src/northbridge/intel/sandybridge/raminit.h
index 1939c83..6febfa3 100644
--- a/src/northbridge/intel/sandybridge/raminit.h
+++ b/src/northbridge/intel/sandybridge/raminit.h
@@ -29,4 +29,4 @@
 void mainboard_fill_pei_data(struct pei_data *pei_data);
 int fixup_sandybridge_errata(void);
 
-#endif				/* RAMINIT_H */
+#endif /* RAMINIT_H */
diff --git a/src/northbridge/intel/sandybridge/raminit_common.c b/src/northbridge/intel/sandybridge/raminit_common.c
index 3c3546a..2cb6a83 100644
--- a/src/northbridge/intel/sandybridge/raminit_common.c
+++ b/src/northbridge/intel/sandybridge/raminit_common.c
@@ -26,163 +26,8 @@
 #include "raminit_common.h"
 #include "sandybridge.h"
 
-/* FIXME: no ECC support.  */
-/* FIXME: no support for 3-channel chipsets.  */
-
-/*
- * ### IOSAV command queue notes ###
- *
- * Intel provides a command queue of depth four.
- * Every command is configured by using multiple MCHBAR registers.
- * On executing the command queue, you have to specify its depth (number of commands).
- *
- * The macros for these registers can take some integer parameters, within these bounds:
- *   channel:   [0..1]
- *   index:     [0..3]
- *   lane:      [0..8]
- *
- * Note that these ranges are 'closed': both endpoints are included.
- *
- *
- *
- * ### Register description ###
- *
- * IOSAV_n_SP_CMD_ADDR_ch(channel, index)
- *   Sub-sequence command addresses. Controls the address, bank address and slotrank signals.
- *
- *   Bitfields:
- *   [0..15]    Row / Column Address.
- *   [16..18]   The result of (10 + [16..18]) is the number of valid row bits.
- *                  Note: Value 1 is not implemented. Not that it really matters, though.
- *                        Value 7 is reserved, as the hardware does not support it.
- *   [20..22]   Bank Address.
- *   [24..25]   Rank select. Let's call it "ranksel", as it is mentioned later.
- *
- * IOSAV_n_ADDR_UPD_ch(channel, index)
- *   How the address shall be updated after executing the sub-sequence command.
- *
- *   Bitfields:
- *   [0]        Increment CAS/RAS by 1.
- *   [1]        Increment CAS/RAS by 8.
- *   [2]        Increment bank select by 1.
- *   [3..4]     Increment rank select by 1, 2 or 3.
- *   [5..9]     Known as "addr_wrap". Address bits will wrap around the [addr_wrap..0] range.
- *   [10..11]   LFSR update:
- *                  00: Do not use the LFSR function.
- *                  01: Undefined, treat as Reserved.
- *                  10: Apply LFSR on the [addr_wrap..0] bit range.
- *                  11: Apply LFSR on the [addr_wrap..3] bit range.
- *
- *   [12..15]   Update rate. The number of command runs between address updates. For example:
- *                  0: Update every command run.
- *                  1: Update every second command run. That is, half of the command rate.
- *                  N: Update after N command runs without updates.
- *
- *   [16..17]   LFSR behavior on the deselect cycles (when no sub-seq command is issued):
- *                  0: No change w.r.t. the last issued command.
- *                  1: LFSR XORs with address & command (excluding CS), but does not update.
- *                  2: LFSR XORs with address & command (excluding CS), and updates.
- *
- * IOSAV_n_SP_CMD_CTL_ch(channel, index)
- *   Special command control register. Controls the DRAM command signals.
- *
- *   Bitfields:
- *   [0]        !RAS signal.
- *   [1]        !CAS signal.
- *   [2]        !WE  signal.
- *   [4..7]     CKE, per rank and channel.
- *   [8..11]    ODT, per rank and channel.
- *   [12]       Chip Select mode control.
- *   [13..16]   Chip select, per rank and channel. It works as follows:
- *
- *          entity CS_BLOCK is
- *              port (
- *                  MODE    : in  std_logic;                -- Mode select at [12]
- *                  RANKSEL : in  std_logic_vector(0 to 3); -- Decoded "ranksel" value
- *                  CS_CTL  : in  std_logic_vector(0 to 3); -- Chip select control at [13..16]
- *                  CS_Q    : out std_logic_vector(0 to 3)  -- CS signals
- *              );
- *          end entity CS_BLOCK;
- *
- *          architecture RTL of CS_BLOCK is
- *          begin
- *              if MODE = '1' then
- *                  CS_Q <= not RANKSEL and CS_CTL;
- *              else
- *                  CS_Q <= CS_CTL;
- *              end if;
- *          end architecture RTL;
- *
- *   [17]       Auto Precharge. Only valid when using 10 row bits!
- *
- * IOSAV_n_SUBSEQ_CTL_ch(channel, index)
- *   Sub-sequence parameters. Controls repetititons, delays and data orientation.
- *
- *   Bitfields:
- *   [0..8]     Number of repetitions of the sub-sequence command.
- *   [10..14]   Gap, number of clock-cycles to wait before sending the next command.
- *   [16..24]   Number of clock-cycles to idle between sub-sequence commands.
- *   [26..27]   The direction of the data.
- *                  00: None, does not handle data
- *                  01: Read
- *                  10: Write
- *                  11: Read & Write
- *
- * IOSAV_n_ADDRESS_LFSR_ch(channel, index)
- *   23-bit LFSR state register. It is written into the LFSR when the sub-sequence is loaded,
- *   and then read back from the LFSR when the sub-sequence is done.
- *
- *   Bitfields:
- *   [0..22]    LFSR state.
- *
- * IOSAV_SEQ_CTL_ch(channel)
- *   Control the sequence level in IOSAV: number of sub-sequences, iterations, maintenance...
- *
- *   Bitfields:
- *   [0..7]     Number of full sequence executions. When this field becomes non-zero, then the
- *              sequence starts running immediately. This value is decremented after completing
- *              a full sequence iteration. When it is zero, the sequence is done. No decrement
- *              is done if this field is set to 0xff. This is the "infinite repeat" mode, and
- *              it is manually aborted by clearing this field.
- *
- *   [8..16]    Number of wait cycles after each sequence iteration. This wait's purpose is to
- *              allow performing maintenance in infinite loops. When non-zero, RCOMP, refresh
- *              and ZQXS operations can take place.
- *
- *   [17]       Stop-on-error mode: Whether to stop sequence execution when an error occurs.
- *   [18..19]   Number of sub-sequences. The programmed value is the index of the last sub-seq.
- *   [20]       If set, keep refresh disabled until the next sequence execution.
- *                  DANGER: Refresh must be re-enabled within the (9 * tREFI) period!
- *
- *   [22]       If set, sequence execution will not prevent refresh. This cannot be set when
- *              bit [20] is also set, or was set on the previous sequence. This bit exists so
- *              that the sequence machine can be used as a timer without affecting the memory.
- *
- *   [23]       If set, a output pin is asserted on the first detected error. This output can
- *              be used as a trigger for an oscilloscope or a logic analyzer, which is handy.
- *
- * IOSAV_DATA_CTL_ch(channel)
- *   Data-related controls in IOSAV mode.
- *
- *   Bitfields:
- *   [0..7]     WDB (Write Data Buffer) pattern length: [0..7] = (length / 8) - 1;
- *   [8..15]    WDB read pointer. Points at the data used for IOSAV write transactions.
- *   [16..23]   Comparison pointer. Used to compare data from IOSAV read transactions.
- *   [24]       If set, increment pointers only when micro-breakpoint is active.
- *
- * IOSAV_STATUS_ch(channel)
- *   State of the IOSAV sequence machine. Should be polled after sending an IOSAV sequence.
- *
- *   Bitfields:
- *   [0]        IDLE:  IOSAV is sleeping.
- *   [1]        BUSY:  IOSAV is running a sequence.
- *   [2]        DONE:  IOSAV has completed a sequence.
- *   [3]        ERROR: IOSAV detected an error and stopped on it, when using Stop-on-error.
- *   [4]        PANIC: The refresh machine issued a Panic Refresh, and IOSAV was aborted.
- *   [5]        RCOMP: RComp failure. Unused, consider Reserved.
- *   [6]        Cleared with a new sequence, and set when done and refresh counter is drained.
- *
- */
+/* FIXME: no ECC support */
+/* FIXME: no support for 3-channel chipsets */
 
 /* length:      [1..4] */
 #define IOSAV_RUN_ONCE(length)	((((length) - 1) << 18) | 1)
@@ -192,10 +37,11 @@
 	asm volatile ("sfence");
 }
 
-static void toggle_io_reset(void) {
-	/* toggle IO reset bit */
+/* Toggle IO reset bit */
+static void toggle_io_reset(void)
+{
 	u32 r32 = MCHBAR32(MC_INIT_STATE_G);
-	MCHBAR32(MC_INIT_STATE_G) = r32 | 0x20;
+	MCHBAR32(MC_INIT_STATE_G) = r32 |  0x20;
 	udelay(1);
 	MCHBAR32(MC_INIT_STATE_G) = r32 & ~0x20;
 	udelay(1);
@@ -210,43 +56,49 @@
 {
 	u32 reg;
 
-	// enable xover cmd
+	/* Enable xover cmd */
 	reg = 0x4000;
 
-	// enable xover ctl
-	if (rankmap & 0x3)
-		reg |= 0x20000;
+	/* Enable xover ctl */
+	if (rankmap & 0x03)
+		reg |= (1 << 17);
 
-	if (rankmap & 0xc)
-		reg |= 0x4000000;
+	if (rankmap & 0x0c)
+		reg |= (1 << 26);
 
 	return reg;
 }
 
-/* CAS write latency. To be programmed in MR2.
- * See DDR3 SPEC for MR2 documentation. */
+/* CAS write latency. To be programmed in MR2. See DDR3 SPEC for MR2 documentation. */
 u8 get_CWL(u32 tCK)
 {
-	/* Get CWL based on tCK using the following rule: */
+	/* Get CWL based on tCK using the following rule */
 	switch (tCK) {
 	case TCK_1333MHZ:
 		return 12;
+
 	case TCK_1200MHZ:
 	case TCK_1100MHZ:
 		return 11;
+
 	case TCK_1066MHZ:
 	case TCK_1000MHZ:
 		return 10;
+
 	case TCK_933MHZ:
 	case TCK_900MHZ:
 		return 9;
+
 	case TCK_800MHZ:
 	case TCK_700MHZ:
 		return 8;
+
 	case TCK_666MHZ:
 		return 7;
+
 	case TCK_533MHZ:
 		return 6;
+
 	default:
 		return 5;
 	}
@@ -260,22 +112,25 @@
 
 	ctrl->cas_supported = (1 << (MAX_CAS - MIN_CAS + 1)) - 1;
 	valid_dimms = 0;
+
 	FOR_ALL_CHANNELS for (slot = 0; slot < 2; slot++) {
+
 		const dimm_attr *dimm = &dimms->dimm[channel][slot];
 		if (dimm->dram_type != SPD_MEMORY_TYPE_SDRAM_DDR3)
 			continue;
+
 		valid_dimms++;
 
 		/* Find all possible CAS combinations */
 		ctrl->cas_supported &= dimm->cas_supported;
 
 		/* Find the smallest common latencies supported by all DIMMs */
-		ctrl->tCK = MAX(ctrl->tCK, dimm->tCK);
-		ctrl->tAA = MAX(ctrl->tAA, dimm->tAA);
-		ctrl->tWR = MAX(ctrl->tWR, dimm->tWR);
+		ctrl->tCK  = MAX(ctrl->tCK,  dimm->tCK);
+		ctrl->tAA  = MAX(ctrl->tAA,  dimm->tAA);
+		ctrl->tWR  = MAX(ctrl->tWR,  dimm->tWR);
 		ctrl->tRCD = MAX(ctrl->tRCD, dimm->tRCD);
 		ctrl->tRRD = MAX(ctrl->tRRD, dimm->tRRD);
-		ctrl->tRP = MAX(ctrl->tRP, dimm->tRP);
+		ctrl->tRP  = MAX(ctrl->tRP,  dimm->tRP);
 		ctrl->tRAS = MAX(ctrl->tRAS, dimm->tRAS);
 		ctrl->tRFC = MAX(ctrl->tRFC, dimm->tRFC);
 		ctrl->tWTR = MAX(ctrl->tWTR, dimm->tWTR);
@@ -286,8 +141,8 @@
 	}
 
 	if (!ctrl->cas_supported)
-		die("Unsupported DIMM combination. "
-		    "DIMMS do not support common CAS latency");
+		die("Unsupported DIMM combination. DIMMS do not support common CAS latency");
+
 	if (!valid_dimms)
 		die("No valid DIMMs found");
 }
@@ -298,12 +153,12 @@
 	int channel;
 
 	FOR_ALL_CHANNELS {
-		// enable xover clk
+		/* Enable xover clk */
 		reg = get_XOVER_CLK(ctrl->rankmap[channel]);
 		printram("XOVER CLK [%x] = %x\n", GDCRCKPICODE_ch(channel), reg);
 		MCHBAR32(GDCRCKPICODE_ch(channel)) = reg;
 
-		// enable xover ctl & xover cmd
+		/* Enable xover ctl & xover cmd */
 		reg = get_XOVER_CMD(ctrl->rankmap[channel]);
 		printram("XOVER CMD [%x] = %x\n", GDCRCMDPICODING_ch(channel), reg);
 		MCHBAR32(GDCRCMDPICODING_ch(channel)) = reg;
@@ -315,22 +170,21 @@
 	u32 addr, cpu, stretch;
 
 	stretch = ctrl->ref_card_offset[channel];
-	/* ODT stretch: Delay ODT signal by stretch value.
-	 * Useful for multi DIMM setups on the same channel. */
+	/*
+	 * ODT stretch:
+	 * Delay ODT signal by stretch value. Useful for multi DIMM setups on the same channel.
+	 */
 	cpu = cpu_get_cpuid();
 	if (IS_SANDY_CPU(cpu) && IS_SANDY_CPU_C(cpu)) {
 		if (stretch == 2)
 			stretch = 3;
+
 		addr = SCHED_SECOND_CBIT_ch(channel);
-		MCHBAR32_AND_OR(addr, 0xffffc3ff,
-			(stretch << 12) | (stretch << 10));
-		printk(RAM_DEBUG, "OTHP Workaround [%x] = %x\n", addr,
-			MCHBAR32(addr));
+		MCHBAR32_AND_OR(addr, 0xffffc3ff, (stretch << 12) | (stretch << 10));
+		printk(RAM_DEBUG, "OTHP Workaround [%x] = %x\n", addr, MCHBAR32(addr));
 	} else {
-		// OTHP
 		addr = TC_OTHP_ch(channel);
-		MCHBAR32_AND_OR(addr, 0xfff0ffff,
-			(stretch << 16) | (stretch << 18));
+		MCHBAR32_AND_OR(addr, 0xfff0ffff, (stretch << 16) | (stretch << 18));
 		printk(RAM_DEBUG, "OTHP [%x] = %x\n", addr, MCHBAR32(addr));
 	}
 }
@@ -341,38 +195,39 @@
 	int channel;
 
 	FOR_ALL_CHANNELS {
-		// DBP
+		/* BIN parameters */
 		reg = 0;
-		reg |= ctrl->tRCD;
-		reg |= (ctrl->tRP << 4);
-		reg |= (ctrl->CAS << 8);
-		reg |= (ctrl->CWL << 12);
+		reg |= (ctrl->tRCD <<  0);
+		reg |= (ctrl->tRP  <<  4);
+		reg |= (ctrl->CAS  <<  8);
+		reg |= (ctrl->CWL  << 12);
 		reg |= (ctrl->tRAS << 16);
 		printram("DBP [%x] = %x\n", TC_DBP_ch(channel), reg);
 		MCHBAR32(TC_DBP_ch(channel)) = reg;
 
-		// RAP
+		/* Regular access parameters */
 		reg = 0;
-		reg |= ctrl->tRRD;
-		reg |= (ctrl->tRTP << 4);
-		reg |= (ctrl->tCKE << 8);
+		reg |= (ctrl->tRRD <<  0);
+		reg |= (ctrl->tRTP <<  4);
+		reg |= (ctrl->tCKE <<  8);
 		reg |= (ctrl->tWTR << 12);
 		reg |= (ctrl->tFAW << 16);
-		reg |= (ctrl->tWR << 24);
+		reg |= (ctrl->tWR  << 24);
 		reg |= (3 << 30);
 		printram("RAP [%x] = %x\n", TC_RAP_ch(channel), reg);
 		MCHBAR32(TC_RAP_ch(channel)) = reg;
 
-		// OTHP
+		/* Other parameters */
 		addr = TC_OTHP_ch(channel);
 		reg = 0;
-		reg |= ctrl->tXPDLL;
-		reg |= (ctrl->tXP << 5);
+		reg |= (ctrl->tXPDLL << 0);
+		reg |= (ctrl->tXP    << 5);
 		reg |= (ctrl->tAONPD << 8);
 		reg |= 0xa0000;
 		printram("OTHP [%x] = %x\n", addr, reg);
 		MCHBAR32(addr) = reg;
 
+		/* FIXME: This register might as well not exist */
 		MCHBAR32(0x4014 + channel * 0x400) = 0;
 
 		MCHBAR32_OR(addr, 0x00020000);
@@ -380,33 +235,31 @@
 		dram_odt_stretch(ctrl, channel);
 
 		/*
-		 * TC-Refresh timing parameters
-		 * The tREFIx9 field should be programmed to minimum of
-		 * 8.9*tREFI (to allow for possible delays from ZQ or
-		 * isoc) and tRASmax (70us) divided by 1024.
+		 * TC-Refresh timing parameters:
+		 *   The tREFIx9 field should be programmed to minimum of 8.9 * tREFI (to allow
+		 *   for possible delays from ZQ or isoc) and tRASmax (70us) divided by 1024.
 		 */
 		val32 = MIN((ctrl->tREFI * 89) / 10, (70000 << 8) / ctrl->tCK);
 
-		reg = ((ctrl->tREFI & 0xffff) << 0) |
-			((ctrl->tRFC & 0x1ff) << 16) |
-			(((val32 / 1024) & 0x7f) << 25);
+		reg = ((ctrl->tREFI & 0xffff) <<  0) |
+		      ((ctrl->tRFC  & 0x01ff) << 16) | (((val32 / 1024) & 0x7f) << 25);
+
 		printram("REFI [%x] = %x\n", TC_RFTP_ch(channel), reg);
 		MCHBAR32(TC_RFTP_ch(channel)) = reg;
 
 		MCHBAR32_OR(TC_RFP_ch(channel),  0xff);
 
-		// SRFTP
+		/* Self-refresh timing parameters */
 		reg = 0;
 		val32 = tDLLK;
-		reg = (reg & ~0xfff) | val32;
+		reg   = (reg & ~0x00000fff) | (val32 <<  0);
 		val32 = ctrl->tXSOffset;
-		reg = (reg & ~0xf000) | (val32 << 12);
+		reg   = (reg & ~0x0000f000) | (val32 << 12);
 		val32 = tDLLK - ctrl->tXSOffset;
-		reg = (reg & ~0x3ff0000) | (val32 << 16);
+		reg   = (reg & ~0x03ff0000) | (val32 << 16);
 		val32 = ctrl->tMOD - 8;
-		reg = (reg & ~0xf0000000) | (val32 << 28);
-		printram("SRFTP [%x] = %x\n", TC_SRFTP_ch(channel),
-		       reg);
+		reg   = (reg & ~0xf0000000) | (val32 << 28);
+		printram("SRFTP [%x] = %x\n", TC_SRFTP_ch(channel), reg);
 		MCHBAR32(TC_SRFTP_ch(channel)) = reg;
 	}
 }
@@ -420,34 +273,32 @@
 		dimm_attr *dimmA, *dimmB;
 		u32 reg = 0;
 
-		if (info->dimm[channel][0].size_mb >=
-		    info->dimm[channel][1].size_mb) {
+		if (info->dimm[channel][0].size_mb >= info->dimm[channel][1].size_mb) {
 			dimmA = &info->dimm[channel][0];
 			dimmB = &info->dimm[channel][1];
-			reg |= 0 << 16;
+			reg |= (0 << 16);
 		} else {
 			dimmA = &info->dimm[channel][1];
 			dimmB = &info->dimm[channel][0];
-			reg |= 1 << 16;
+			reg |= (1 << 16);
 		}
 
 		if (dimmA && (dimmA->ranks > 0)) {
-			reg |= dimmA->size_mb / 256;
-			reg |= (dimmA->ranks - 1) << 17;
+			reg |= (dimmA->size_mb / 256) <<  0;
+			reg |= (dimmA->ranks - 1)     << 17;
 			reg |= (dimmA->width / 8 - 1) << 19;
 		}
 
 		if (dimmB && (dimmB->ranks > 0)) {
-			reg |= (dimmB->size_mb / 256) << 8;
-			reg |= (dimmB->ranks - 1) << 18;
+			reg |= (dimmB->size_mb / 256) <<  8;
+			reg |= (dimmB->ranks - 1)     << 18;
 			reg |= (dimmB->width / 8 - 1) << 20;
 		}
 
-		reg |= 1 << 21; /* rank interleave */
-		reg |= 1 << 22; /* enhanced interleave */
+		reg |= 1 << 21; /* Rank interleave */
+		reg |= 1 << 22; /* Enhanced interleave */
 
-		if ((dimmA && (dimmA->ranks > 0))
-		    || (dimmB && (dimmB->ranks > 0))) {
+		if ((dimmA && (dimmA->ranks > 0)) || (dimmB && (dimmB->ranks > 0))) {
 			ctrl->mad_dimm[channel] = reg;
 		} else {
 			ctrl->mad_dimm[channel] = 0;
@@ -459,7 +310,7 @@
 {
 	int channel;
 	FOR_ALL_CHANNELS {
-		MCHBAR32(MAD_DIMM_CH0 + channel * 4) = ctrl->mad_dimm[channel];
+		MCHBAR32(MAD_DIMM(channel)) = ctrl->mad_dimm[channel];
 	}
 }
 
@@ -469,6 +320,7 @@
 	u8 val;
 	reg = 0;
 	val = 0;
+
 	if (training) {
 		ch0size = ctrl->channel_size_mb[0] ? 256 : 0;
 		ch1size = ctrl->channel_size_mb[1] ? 256 : 0;
@@ -481,14 +333,15 @@
 		reg = MCHBAR32(MAD_ZR);
 		val = ch1size / 256;
 		reg = (reg & ~0xff000000) | val << 24;
-		reg = (reg & ~0xff0000) | (2 * val) << 16;
+		reg = (reg & ~0x00ff0000) | (2 * val) << 16;
 		MCHBAR32(MAD_ZR) = reg;
 		MCHBAR32(MAD_CHNL) = 0x24;
+
 	} else {
 		reg = MCHBAR32(MAD_ZR);
 		val = ch0size / 256;
 		reg = (reg & ~0xff000000) | val << 24;
-		reg = (reg & ~0xff0000) | (2 * val) << 16;
+		reg = (reg & ~0x00ff0000) | (2 * val) << 16;
 		MCHBAR32(MAD_ZR) = reg;
 		MCHBAR32(MAD_CHNL) = 0x21;
 	}
@@ -509,13 +362,14 @@
 
 	/* If this is zero, it just means devicetree.cb didn't set it */
 	if (!cfg || cfg->max_mem_clock_mhz == 0) {
+
 		if (CONFIG(NATIVE_RAMINIT_IGNORE_MAX_MEM_FUSES))
 			return TCK_1333MHZ;
 
 		rev = pci_read_config8(HOST_BRIDGE, PCI_DEVICE_ID);
 
 		if ((rev & BASE_REV_MASK) == BASE_REV_SNB) {
-			/* read Capabilities A Register DMFC bits */
+			/* Read Capabilities A Register DMFC bits */
 			reg32 = pci_read_config32(HOST_BRIDGE, CAPID0_A);
 			reg32 &= 0x7;
 
@@ -523,12 +377,12 @@
 			case 7: return TCK_533MHZ;
 			case 6: return TCK_666MHZ;
 			case 5: return TCK_800MHZ;
-			/* reserved: */
+			/* Reserved */
 			default:
 				break;
 			}
 		} else {
-			/* read Capabilities B Register DMFC bits */
+			/* Read Capabilities B Register DMFC bits */
 			reg32 = pci_read_config32(HOST_BRIDGE, CAPID0_B);
 			reg32 = (reg32 >> 4) & 0x7;
 
@@ -540,7 +394,7 @@
 			case 3: return TCK_1066MHZ;
 			case 2: return TCK_1200MHZ;
 			case 1: return TCK_1333MHZ;
-			/* reserved: */
+			/* Reserved */
 			default:
 				break;
 			}
@@ -582,11 +436,9 @@
 
 void dram_memorymap(ramctr_timing *ctrl, int me_uma_size)
 {
-	u32 reg, val, reclaim;
-	u32 tom, gfxstolen, gttsize;
-	size_t tsegsize, mmiosize, toludbase, touudbase, gfxstolenbase, gttbase,
-	    tsegbase, mestolenbase;
-	size_t tsegbasedelta, remapbase, remaplimit;
+	u32 reg, val, reclaim, tom, gfxstolen, gttsize;
+	size_t tsegbase, toludbase, remapbase, gfxstolenbase, mmiosize, gttbase;
+	size_t tsegsize, touudbase, remaplimit, mestolenbase, tsegbasedelta;
 	uint16_t ggc;
 
 	mmiosize = get_mmio_size();
@@ -594,10 +446,10 @@
 	ggc = pci_read_config16(HOST_BRIDGE, GGC);
 	if (!(ggc & 2)) {
 		gfxstolen = ((ggc >> 3) & 0x1f) * 32;
-		gttsize = ((ggc >> 8) & 0x3);
+		gttsize   = ((ggc >> 8) & 0x3);
 	} else {
 		gfxstolen = 0;
-		gttsize = 0;
+		gttsize   = 0;
 	}
 
 	tsegsize = CONFIG_SMM_TSEG_SIZE >> 20;
@@ -606,14 +458,14 @@
 
 	mestolenbase = tom - me_uma_size;
 
-	toludbase = MIN(4096 - mmiosize + gfxstolen + gttsize + tsegsize,
-			tom - me_uma_size);
+	toludbase = MIN(4096 - mmiosize + gfxstolen + gttsize + tsegsize, tom - me_uma_size);
+
 	gfxstolenbase = toludbase - gfxstolen;
 	gttbase = gfxstolenbase - gttsize;
 
 	tsegbase = gttbase - tsegsize;
 
-	// Round tsegbase down to nearest address aligned to tsegsize
+	/* Round tsegbase down to nearest address aligned to tsegsize */
 	tsegbasedelta = tsegbase & (tsegsize - 1);
 	tsegbase &= ~(tsegsize - 1);
 
@@ -621,24 +473,23 @@
 	gfxstolenbase -= tsegbasedelta;
 	toludbase -= tsegbasedelta;
 
-	// Test if it is possible to reclaim a hole in the RAM addressing
+	/* Test if it is possible to reclaim a hole in the RAM addressing */
 	if (tom - me_uma_size > toludbase) {
-		// Reclaim is possible
-		reclaim = 1;
-		remapbase = MAX(4096, tom - me_uma_size);
-		remaplimit =
-		    remapbase + MIN(4096, tom - me_uma_size) - toludbase - 1;
-		touudbase = remaplimit + 1;
+		/* Reclaim is possible */
+		reclaim    = 1;
+		remapbase  = MAX(4096, tom - me_uma_size);
+		remaplimit = remapbase + MIN(4096, tom - me_uma_size) - toludbase - 1;
+		touudbase  = remaplimit + 1;
 	} else {
 		// Reclaim not possible
-		reclaim = 0;
+		reclaim   = 0;
 		touudbase = tom - me_uma_size;
 	}
 
-	// Update memory map in pci-e configuration space
+	/* Update memory map in PCIe configuration space */
 	printk(BIOS_DEBUG, "Update PCI-E configuration space:\n");
 
-	// TOM (top of memory)
+	/* TOM (top of memory) */
 	reg = pci_read_config32(HOST_BRIDGE, TOM);
 	val = tom & 0xfff;
 	reg = (reg & ~0xfff00000) | (val << 20);
@@ -651,21 +502,21 @@
 	printk(BIOS_DEBUG, "PCI(0, 0, 0)[%x] = %x\n", TOM + 4, reg);
 	pci_write_config32(HOST_BRIDGE, TOM + 4, reg);
 
-	// TOLUD (top of low used dram)
+	/* TOLUD (Top Of Low Usable DRAM) */
 	reg = pci_read_config32(HOST_BRIDGE, TOLUD);
 	val = toludbase & 0xfff;
 	reg = (reg & ~0xfff00000) | (val << 20);
 	printk(BIOS_DEBUG, "PCI(0, 0, 0)[%x] = %x\n", TOLUD, reg);
 	pci_write_config32(HOST_BRIDGE, TOLUD, reg);
 
-	// TOUUD LSB (top of upper usable dram)
+	/* TOUUD LSB (Top Of Upper Usable DRAM) */
 	reg = pci_read_config32(HOST_BRIDGE, TOUUD);
 	val = touudbase & 0xfff;
 	reg = (reg & ~0xfff00000) | (val << 20);
 	printk(BIOS_DEBUG, "PCI(0, 0, 0)[%x] = %x\n", TOUUD, reg);
 	pci_write_config32(HOST_BRIDGE, TOUUD, reg);
 
-	// TOUUD MSB
+	/* TOUUD MSB */
 	reg = pci_read_config32(HOST_BRIDGE, TOUUD + 4);
 	val = touudbase & 0xfffff000;
 	reg = (reg & ~0x000fffff) | (val >> 12);
@@ -673,29 +524,29 @@
 	pci_write_config32(HOST_BRIDGE, TOUUD + 4, reg);
 
 	if (reclaim) {
-		// REMAP BASE
-		pci_write_config32(HOST_BRIDGE, REMAPBASE, remapbase << 20);
+		/* REMAP BASE */
+		pci_write_config32(HOST_BRIDGE, REMAPBASE,     remapbase << 20);
 		pci_write_config32(HOST_BRIDGE, REMAPBASE + 4, remapbase >> 12);
 
-		// REMAP LIMIT
-		pci_write_config32(HOST_BRIDGE, REMAPLIMIT, remaplimit << 20);
+		/* REMAP LIMIT */
+		pci_write_config32(HOST_BRIDGE, REMAPLIMIT,     remaplimit << 20);
 		pci_write_config32(HOST_BRIDGE, REMAPLIMIT + 4, remaplimit >> 12);
 	}
-	// TSEG
+	/* TSEG */
 	reg = pci_read_config32(HOST_BRIDGE, TSEGMB);
 	val = tsegbase & 0xfff;
 	reg = (reg & ~0xfff00000) | (val << 20);
 	printk(BIOS_DEBUG, "PCI(0, 0, 0)[%x] = %x\n", TSEGMB, reg);
 	pci_write_config32(HOST_BRIDGE, TSEGMB, reg);
 
-	// GFX stolen memory
+	/* GFX stolen memory */
 	reg = pci_read_config32(HOST_BRIDGE, BDSM);
 	val = gfxstolenbase & 0xfff;
 	reg = (reg & ~0xfff00000) | (val << 20);
 	printk(BIOS_DEBUG, "PCI(0, 0, 0)[%x] = %x\n", BDSM, reg);
 	pci_write_config32(HOST_BRIDGE, BDSM, reg);
 
-	// GTT stolen memory
+	/* GTT stolen memory */
 	reg = pci_read_config32(HOST_BRIDGE, BGSM);
 	val = gttbase & 0xfff;
 	reg = (reg & ~0xfff00000) | (val << 20);
@@ -709,7 +560,7 @@
 		printk(BIOS_DEBUG, "PCI(0, 0, 0)[%x] = %x\n", MESEG_MASK + 4, reg);
 		pci_write_config32(HOST_BRIDGE, MESEG_MASK + 4, reg);
 
-		// ME base
+		/* ME base */
 		reg = pci_read_config32(HOST_BRIDGE, MESEG_BASE);
 		val = mestolenbase & 0xfff;
 		reg = (reg & ~0xfff00000) | (val << 20);
@@ -722,12 +573,12 @@
 		printk(BIOS_DEBUG, "PCI(0, 0, 0)[%x] = %x\n", MESEG_BASE + 4, reg);
 		pci_write_config32(HOST_BRIDGE, MESEG_BASE + 4, reg);
 
-		// ME mask
+		/* ME mask */
 		reg = pci_read_config32(HOST_BRIDGE, MESEG_MASK);
 		val = (0x80000 - me_uma_size) & 0xfff;
 		reg = (reg & ~0xfff00000) | (val << 20);
-		reg = reg | ME_STLEN_EN;	// set ME memory enable
-		reg = reg | MELCK;		// set lockbit on ME mem
+		reg = reg | ME_STLEN_EN;	/* Set ME memory enable */
+		reg = reg | MELCK;		/* Set lock bit on ME mem */
 		printk(BIOS_DEBUG, "PCI(0, 0, 0)[%x] = %x\n", MESEG_MASK, reg);
 		pci_write_config32(HOST_BRIDGE, MESEG_MASK, reg);
 	}
@@ -745,21 +596,25 @@
 {
 	int channel, slotrank;
 
-	/* choose a populated channel.  */
+	/* Choose a populated channel */
 	channel = (ctrl->rankmap[0]) ? 0 : 1;
 
 	wait_for_iosav(channel);
 
-	/* choose a populated rank.  */
+	/* Choose a populated rank */
 	slotrank = (ctrl->rankmap[channel] & 1) ? 0 : 2;
 
 	/* DRAM command ZQCS */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f003;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x80c01;
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x0f003;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0x80c01;
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60000;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0;
 
-	// execute command queue - why is bit 22 set here?!
+	/*
+	 * Execute command queue - why is bit 22 set here?!
+	 *
+	 * This is actually using the IOSAV state machine as a timer, so refresh is allowed.
+	 */
 	MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = (1 << 22) | IOSAV_RUN_ONCE(1);
 
 	wait_for_iosav(channel);
@@ -770,101 +625,99 @@
 	u32 reg;
 	int channel;
 
-	while (!(MCHBAR32(RCOMP_TIMER) & 0x10000));
+	while (!(MCHBAR32(RCOMP_TIMER) & (1 << 16)))
+		;
 	do {
 		reg = MCHBAR32(IOSAV_STATUS_ch(0));
 	} while ((reg & 0x14) == 0);
 
-	// Set state of memory controller
+	/* Set state of memory controller */
 	reg = 0x112;
 	MCHBAR32(MC_INIT_STATE_G) = reg;
 	MCHBAR32(MC_INIT_STATE) = 0;
-	reg |= 2;		//ddr reset
+	reg |= 2;		/* DDR reset */
 	MCHBAR32(MC_INIT_STATE_G) = reg;
 
-	// Assert dimm reset signal
-	MCHBAR32_AND(MC_INIT_STATE_G, ~0x2);
+	/* Assert DIMM reset signal */
+	MCHBAR32_AND(MC_INIT_STATE_G, ~2);
 
-	// Wait 200us
+	/* Wait 200us */
 	udelay(200);
 
-	// Deassert dimm reset signal
+	/* Deassert DIMM reset signal */
 	MCHBAR32_OR(MC_INIT_STATE_G, 2);
 
-	// Wait 500us
+	/* Wait 500us */
 	udelay(500);
 
-	// Enable DCLK
+	/* Enable DCLK */
 	MCHBAR32_OR(MC_INIT_STATE_G, 4);
 
-	// XXX Wait 20ns
+	/* XXX Wait 20ns */
 	udelay(1);
 
 	FOR_ALL_CHANNELS {
-		// Set valid rank CKE
+		/* Set valid rank CKE */
 		reg = ctrl->rankmap[channel];
 		MCHBAR32(MC_INIT_STATE_ch(channel)) = reg;
 
-		// Wait 10ns for ranks to settle
-		//udelay(0.01);
+		/* Wait 10ns for ranks to settle */
+		// udelay(0.01);
 
 		reg = (reg & ~0xf0) | (ctrl->rankmap[channel] << 4);
 		MCHBAR32(MC_INIT_STATE_ch(channel)) = reg;
 
-		// Write reset using a NOP
+		/* Write reset using a NOP */
 		write_reset(ctrl);
 	}
 }
 
 static odtmap get_ODT(ramctr_timing *ctrl, u8 rank, int channel)
 {
-	/* Get ODT based on rankmap: */
-	int dimms_per_ch = (ctrl->rankmap[channel] & 1)
-					+ ((ctrl->rankmap[channel] >> 2) & 1);
+	/* Get ODT based on rankmap */
+	int dimms_per_ch = (ctrl->rankmap[channel] & 1) + ((ctrl->rankmap[channel] >> 2) & 1);
 
 	if (dimms_per_ch == 1) {
-		return (const odtmap){60, 60};
+		return (const odtmap){60,  60};
 	} else {
 		return (const odtmap){120, 30};
 	}
 }
 
-static void write_mrreg(ramctr_timing *ctrl, int channel, int slotrank,
-			int reg, u32 val)
+static void write_mrreg(ramctr_timing *ctrl, int channel, int slotrank, int reg, u32 val)
 {
 	wait_for_iosav(channel);
 
 	if (ctrl->rank_mirror[channel][slotrank]) {
 		/* DDR3 Rank1 Address mirror
-		 * swap the following pins:
-		 * A3<->A4, A5<->A6, A7<->A8, BA0<->BA1 */
+		   swap the following pins:
+		   A3<->A4, A5<->A6, A7<->A8, BA0<->BA1 */
 		reg = ((reg >> 1) & 1) | ((reg << 1) & 2);
-		val = (val & ~0x1f8) | ((val >> 1) & 0xa8)
-		    | ((val & 0xa8) << 1);
+		val = (val & ~0x1f8) | ((val >> 1) & 0xa8) | ((val & 0xa8) << 1);
 	}
 
 	/* DRAM command MRS */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f000;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x41001;
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x0f000;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0x41001;
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
 		(slotrank << 24) | (reg << 20) | val | 0x60000;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0;
 
 	/* DRAM command MRS */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f000;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x41001;
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f000;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0x41001;
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) =
 		(slotrank << 24) | (reg << 20) | val | 0x60000;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0;
 
 	/* DRAM command MRS */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x0f000;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x1001 | (ctrl->tMOD << 16);
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x0f000;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) = 0x1001 | (ctrl->tMOD << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
 		(slotrank << 24) | (reg << 20) | val | 0x60000;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0;
 
-	// execute command queue
+	/* Execute command queue */
 	MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(3);
 }
 
@@ -877,7 +730,7 @@
 	/* DLL Reset - self clearing - set after CLK frequency has been changed */
 	mr0reg = 0x100;
 
-	// Convert CAS to MCH register friendly
+	/* Convert CAS to MCH register friendly */
 	if (ctrl->CAS < 12) {
 		mch_cas = (u16) ((ctrl->CAS - 4) << 1);
 	} else {
@@ -885,15 +738,15 @@
 		mch_cas = ((mch_cas << 1) | 0x1);
 	}
 
-	// Convert tWR to MCH register friendly
+	/* Convert tWR to MCH register friendly */
 	mch_wr = mch_wr_t[ctrl->tWR - 5];
 
-	mr0reg = (mr0reg & ~0x4) | ((mch_cas & 0x1) << 2);
-	mr0reg = (mr0reg & ~0x70) | ((mch_cas & 0xe) << 3);
-	mr0reg = (mr0reg & ~0xe00) | (mch_wr << 9);
+	mr0reg = (mr0reg & ~0x0004) | ((mch_cas & 0x1) << 2);
+	mr0reg = (mr0reg & ~0x0070) | ((mch_cas & 0xe) << 3);
+	mr0reg = (mr0reg & ~0x0e00) |  (mch_wr << 9);
 
-	// Precharge PD - Fast (desktop) 0x1 or slow (mobile) 0x0 - mostly power-saving feature
-	mr0reg = (mr0reg & ~0x1000) | (!is_mobile << 12);
+	/* Precharge PD - Fast (desktop) 1 or slow (mobile) 0 - mostly power-saving feature */
+	mr0reg = (mr0reg & ~(1 << 12)) | (!is_mobile << 12);
 	return mr0reg;
 }
 
@@ -923,7 +776,7 @@
 	u32 mr1reg;
 
 	odt = get_ODT(ctrl, rank, channel);
-	mr1reg = 0x2;
+	mr1reg = 2;
 
 	mr1reg |= encode_odt(odt.rttnom);
 
@@ -952,7 +805,7 @@
 	srt = ctrl->extended_temperature_range && !ctrl->auto_self_refresh;
 
 	mr2reg = 0;
-	mr2reg = (mr2reg & ~0x7) | pasr;
+	mr2reg = (mr2reg & ~0x07) | pasr;
 	mr2reg = (mr2reg & ~0x38) | (cwl << 3);
 	mr2reg = (mr2reg & ~0x40) | (ctrl->auto_self_refresh << 6);
 	mr2reg = (mr2reg & ~0x80) | (srt << 7);
@@ -973,42 +826,41 @@
 
 	FOR_ALL_POPULATED_CHANNELS {
 		FOR_ALL_POPULATED_RANKS {
-			// MR2
+			/* MR2 */
 			dram_mr2(ctrl, slotrank, channel);
 
-			// MR3
+			/* MR3 */
 			dram_mr3(ctrl, slotrank, channel);
 
-			// MR1
+			/* MR1 */
 			dram_mr1(ctrl, slotrank, channel);
 
-			// MR0
+			/* MR0 */
 			dram_mr0(ctrl, slotrank, channel);
 		}
 	}
 
 	/* DRAM command NOP */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL(0)) = 0x7;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL(0)) = 0xf1001;
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL(0)) = 0x7;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL(0)) = 0xf1001;
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR(0)) = 0x60002;
-	MCHBAR32(IOSAV_n_ADDR_UPD(0)) = 0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE(0)) = 0;
 
 	/* DRAM command ZQCL */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL(1)) = 0x1f003;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL(1)) = 0x1901001;
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL(1)) = 0x1f003;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL(1)) = 0x1901001;
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR(1)) = 0x60400;
-	MCHBAR32(IOSAV_n_ADDR_UPD(1)) = 0x288;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE(1)) = 0x288;
 
-	// execute command queue on all channels? Why isn't bit 0 set here?
-	MCHBAR32(IOSAV_SEQ_CTL) = 0x40004;
+	/* Execute command queue on all channels. Do it four times. */
+	MCHBAR32(IOSAV_SEQ_CTL) = (1 << 18) | 4;
 
-	// Drain
 	FOR_ALL_CHANNELS {
-		// Wait for ref drained
+		/* Wait for ref drained */
 		wait_for_iosav(channel);
 	}
 
-	// Refresh enable
+	/* Refresh enable */
 	MCHBAR32_OR(MC_INIT_STATE_G, 8);
 
 	FOR_ALL_POPULATED_CHANNELS {
@@ -1018,20 +870,19 @@
 
 		slotrank = (ctrl->rankmap[channel] & 1) ? 0 : 2;
 
-		// Drain
+		/* Drain */
 		wait_for_iosav(channel);
 
 		/* DRAM command ZQCS */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f003;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x659001;
-		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
-			(slotrank << 24) | 0x60000;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x3e0;
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x0f003;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0x659001;
+		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60000;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x3e0;
 
-		// execute command queue
+		/* Execute command queue */
 		MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
 
-		// Drain
+		/* Drain */
 		wait_for_iosav(channel);
 	}
 }
@@ -1063,42 +914,41 @@
 			break;
 		case 1:
 			pi_coding_ctrl[slot] =
-			    ctrl->timings[channel][2 * slot + 0].pi_coding +
-			    full_shift;
+			    ctrl->timings[channel][2 * slot + 0].pi_coding + full_shift;
 			break;
 		case 2:
 			pi_coding_ctrl[slot] =
-			    ctrl->timings[channel][2 * slot + 1].pi_coding +
-			    full_shift;
+			    ctrl->timings[channel][2 * slot + 1].pi_coding + full_shift;
 			break;
 		case 3:
 			pi_coding_ctrl[slot] =
 			    (ctrl->timings[channel][2 * slot].pi_coding +
-			    ctrl->timings[channel][2 * slot + 1].pi_coding) / 2 +
-			    full_shift;
+			    ctrl->timings[channel][2 * slot + 1].pi_coding) / 2 + full_shift;
 			break;
 		}
 
-	/* enable CMD XOVER */
+	/* Enable CMD XOVER */
 	reg32 = get_XOVER_CMD(ctrl->rankmap[channel]);
-	reg32 |= ((pi_coding_ctrl[0] & 0x3f) << 6) | ((pi_coding_ctrl[0] & 0x40) << 9);
+	reg32 |= (pi_coding_ctrl[0] & 0x3f) <<  6;
+	reg32 |= (pi_coding_ctrl[0] & 0x40) <<  9;
 	reg32 |= (pi_coding_ctrl[1] & 0x7f) << 18;
 	reg32 |= (full_shift & 0x3f) | ((full_shift & 0x40) << 6);
 
 	MCHBAR32(GDCRCMDPICODING_ch(channel)) = reg32;
 
-	/* enable CLK XOVER */
+	/* Enable CLK XOVER */
 	reg_pi_code = get_XOVER_CLK(ctrl->rankmap[channel]);
 	reg_logic_delay = 0;
 
 	FOR_ALL_POPULATED_RANKS {
-		int shift =
-		    ctrl->timings[channel][slotrank].pi_coding + full_shift;
+		int shift = ctrl->timings[channel][slotrank].pi_coding + full_shift;
 		int offset_pi_code;
 		if (shift < 0)
 			shift = 0;
+
 		offset_pi_code = ctrl->pi_code_offset + shift;
-		/* set CLK phase shift */
+
+		/* Set CLK phase shift */
 		reg_pi_code |= (offset_pi_code & 0x3f) << (6 * slotrank);
 		reg_logic_delay |= ((offset_pi_code >> 6) & 1) << slotrank;
 	}
@@ -1112,11 +962,10 @@
 	reg_roundtrip_latency = 0;
 
 	FOR_ALL_POPULATED_RANKS {
-		int post_timA_min_high = 7, post_timA_max_high = 0;
-		int pre_timA_min_high = 7, pre_timA_max_high = 0;
+		int post_timA_min_high = 7, pre_timA_min_high = 7;
+		int post_timA_max_high = 0, pre_timA_max_high = 0;
 		int shift_402x = 0;
-		int shift =
-		    ctrl->timings[channel][slotrank].pi_coding + full_shift;
+		int shift = ctrl->timings[channel][slotrank].pi_coding + full_shift;
 
 		if (shift < 0)
 			shift = 0;
@@ -1139,6 +988,7 @@
 		if (pre_timA_max_high - pre_timA_min_high <
 		    post_timA_max_high - post_timA_min_high)
 			shift_402x = +1;
+
 		else if (pre_timA_max_high - pre_timA_min_high >
 			 post_timA_max_high - post_timA_min_high)
 			shift_402x = -1;
@@ -1146,6 +996,7 @@
 		reg_io_latency |=
 		    (ctrl->timings[channel][slotrank].io_latency + shift_402x -
 		     post_timA_min_high) << (4 * slotrank);
+
 		reg_roundtrip_latency |=
 		    (ctrl->timings[channel][slotrank].roundtrip_latency +
 		     shift_402x) << (8 * slotrank);
@@ -1187,45 +1038,45 @@
 	wait_for_iosav(channel);
 
 	/* DRAM command MRS
-	 * write MR3 MPR enable
-	 * in this mode only RD and RDA are allowed
-	 * all reads return a predefined pattern */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f000;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = (0xc01 | (ctrl->tMOD << 16));
+	   write MR3 MPR enable
+	   in this mode only RD and RDA are allowed
+	   all reads return a predefined pattern */
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f000;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = (0xc01 | (ctrl->tMOD << 16));
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x360004;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0;
 
 	/* DRAM command RD */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f105;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x4040c01;
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f105;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0x4040c01;
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24);
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0;
 
 	/* DRAM command RD */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x100f | ((ctrl->CAS + 36) << 16);
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x1f105;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) = 0x100f | ((ctrl->CAS + 36) << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = (slotrank << 24) | 0x60000;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0;
 
 	/* DRAM command MRS
-	 * write MR3 MPR disable */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f000;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) = 0xc01 | (ctrl->tMOD << 16);
+	   write MR3 MPR disable */
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x1f000;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) = 0xc01 | (ctrl->tMOD << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 0x360000;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0;
 
-	// execute command queue
+	/* Execute command queue */
 	MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 	wait_for_iosav(channel);
 }
 
-static int does_lane_work(ramctr_timing *ctrl, int channel, int slotrank,
-			  int lane)
+static int does_lane_work(ramctr_timing *ctrl, int channel, int slotrank, int lane)
 {
 	u32 timA = ctrl->timings[channel][slotrank].lanes[lane].timA;
-	return ((MCHBAR32(lane_base[lane] + GDCRTRAININGRESULT(channel, (timA / 32) & 1)) >>
-		(timA % 32)) & 1);
+
+	return (MCHBAR32(lane_base[lane] +
+		GDCRTRAININGRESULT(channel, (timA / 32) & 1)) >> (timA % 32)) & 1;
 }
 
 struct run {
@@ -1253,24 +1104,23 @@
 		}
 	if (bl == 0) {
 		ret.middle = sz / 2;
-		ret.start = 0;
-		ret.end = sz;
+		ret.start  = 0;
+		ret.end    = sz;
 		ret.length = sz;
-		ret.all = 1;
+		ret.all    = 1;
 		return ret;
 	}
 
-	ret.start = bs % sz;
-	ret.end = (bs + bl - 1) % sz;
+	ret.start  = bs % sz;
+	ret.end    = (bs + bl - 1) % sz;
 	ret.middle = (bs + (bl - 1) / 2) % sz;
 	ret.length = bl;
-	ret.all = 0;
+	ret.all    = 0;
 
 	return ret;
 }
 
-static void discover_timA_coarse(ramctr_timing *ctrl, int channel,
-				 int slotrank, int *upperA)
+static void discover_timA_coarse(ramctr_timing *ctrl, int channel, int slotrank, int *upperA)
 {
 	int timA;
 	int statistics[NUM_LANES][128];
@@ -1285,8 +1135,7 @@
 		test_timA(ctrl, channel, slotrank);
 
 		FOR_ALL_LANES {
-			statistics[lane][timA] =
-			    !does_lane_work(ctrl, channel, slotrank, lane);
+			statistics[lane][timA] = !does_lane_work(ctrl, channel, slotrank, lane);
 		}
 	}
 	FOR_ALL_LANES {
@@ -1295,13 +1144,13 @@
 		upperA[lane] = rn.end;
 		if (upperA[lane] < rn.middle)
 			upperA[lane] += 128;
+
 		printram("timA: %d, %d, %d: 0x%02x-0x%02x-0x%02x\n",
 			 channel, slotrank, lane, rn.start, rn.middle, rn.end);
 	}
 }
 
-static void discover_timA_fine(ramctr_timing *ctrl, int channel, int slotrank,
-			       int *upperA)
+static void discover_timA_fine(ramctr_timing *ctrl, int channel, int slotrank, int *upperA)
 {
 	int timA_delta;
 	int statistics[NUM_LANES][51];
@@ -1310,16 +1159,18 @@
 	memset(statistics, 0, sizeof(statistics));
 
 	for (timA_delta = -25; timA_delta <= 25; timA_delta++) {
-		FOR_ALL_LANES ctrl->timings[channel][slotrank].lanes[lane].
-		    timA = upperA[lane] + timA_delta + 0x40;
+
+		FOR_ALL_LANES {
+			ctrl->timings[channel][slotrank].lanes[lane].timA
+				= upperA[lane] + timA_delta + 0x40;
+		}
 		program_timings(ctrl, channel);
 
 		for (i = 0; i < 100; i++) {
 			test_timA(ctrl, channel, slotrank);
 			FOR_ALL_LANES {
 				statistics[lane][timA_delta + 25] +=
-					does_lane_work(ctrl, channel, slotrank,
-						lane);
+					does_lane_work(ctrl, channel, slotrank, lane);
 			}
 		}
 	}
@@ -1329,18 +1180,19 @@
 		for (last_zero = -25; last_zero <= 25; last_zero++)
 			if (statistics[lane][last_zero + 25])
 				break;
+
 		last_zero--;
 		for (first_all = -25; first_all <= 25; first_all++)
 			if (statistics[lane][first_all + 25] == 100)
 				break;
 
-		printram("lane %d: %d, %d\n", lane, last_zero,
-		       first_all);
+		printram("lane %d: %d, %d\n", lane, last_zero, first_all);
 
 		ctrl->timings[channel][slotrank].lanes[lane].timA =
-		    (last_zero + first_all) / 2 + upperA[lane];
+			(last_zero + first_all) / 2 + upperA[lane];
+
 		printram("Aval: %d, %d, %d: %x\n", channel, slotrank,
-		       lane, ctrl->timings[channel][slotrank].lanes[lane].timA);
+			lane, ctrl->timings[channel][slotrank].lanes[lane].timA);
 	}
 }
 
@@ -1348,13 +1200,16 @@
 {
 	int works[NUM_LANES];
 	int lane;
+
 	while (1) {
 		int all_works = 1, some_works = 0;
+
 		program_timings(ctrl, channel);
 		test_timA(ctrl, channel, slotrank);
+
 		FOR_ALL_LANES {
-			works[lane] =
-			    !does_lane_work(ctrl, channel, slotrank, lane);
+			works[lane] = !does_lane_work(ctrl, channel, slotrank, lane);
+
 			if (works[lane])
 				some_works = 1;
 			else
@@ -1362,6 +1217,7 @@
 		}
 		if (all_works)
 			return 0;
+
 		if (!some_works) {
 			if (ctrl->timings[channel][slotrank].roundtrip_latency < 2) {
 				printk(BIOS_EMERG, "402x discovery failed (1): %d, %d\n",
@@ -1374,6 +1230,7 @@
 		}
 		ctrl->timings[channel][slotrank].io_latency += 2;
 		printram("4028 += 2;\n");
+
 		if (ctrl->timings[channel][slotrank].io_latency >= 0x10) {
 			printk(BIOS_EMERG, "402x discovery failed (2): %d, %d\n",
 			       channel, slotrank);
@@ -1417,15 +1274,17 @@
 	struct timA_minmax post;
 	int shift_402x = 0;
 
-	/* Get changed maxima. */
+	/* Get changed maxima */
 	pre_timA_change(ctrl, channel, slotrank, &post);
 
 	if (mnmx->timA_max_high - mnmx->timA_min_high <
 	    post.timA_max_high - post.timA_min_high)
 		shift_402x = +1;
+
 	else if (mnmx->timA_max_high - mnmx->timA_min_high >
 		 post.timA_max_high - post.timA_min_high)
 		shift_402x = -1;
+
 	else
 		shift_402x = 0;
 
@@ -1435,17 +1294,21 @@
 	printram("4028 += %d;\n", shift_402x);
 }
 
-/* Compensate the skew between DQS and DQs.
+/*
+ * Compensate the skew between DQS and DQs.
+ *
  * To ease PCB design, a small skew between Data Strobe signals and Data Signals is allowed.
  * The controller has to measure and compensate this skew for every byte-lane. By delaying
- * either all DQs signals or DQS signal, a full phase shift can be introduced. It is assumed
+ * either all DQ signals or DQS signal, a full phase shift can be introduced. It is assumed
  * that one byte-lane's DQs signals have the same routing delay.
  *
  * To measure the actual skew, the DRAM is placed in "read leveling" mode. In read leveling
  * mode the DRAM-chip outputs an alternating periodic pattern. The memory controller iterates
  * over all possible values to do a full phase shift and issues read commands. With DQS and
- * DQs in phase the data read is expected to alternate on every byte:
+ * DQ in phase the data being read is expected to alternate on every byte:
+ *
  *   0xFF 0x00 0xFF ...
+ *
  * Once the controller has detected this pattern a bit in the result register is set for the
  * current phase shift.
  */
@@ -1462,12 +1325,12 @@
 		wait_for_iosav(channel);
 
 		/* DRAM command PREA */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f002;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0xc01 | (ctrl->tRP << 16);
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f002;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0xc01 | (ctrl->tRP << 16);
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60400;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0;
 
-		// execute command queue
+		/* Execute command queue */
 		MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
 
 		MCHBAR32(GDCRTRAININGMOD) = (slotrank << 2) | 0x8001;
@@ -1519,7 +1382,8 @@
 		pre_timA_change(ctrl, channel, slotrank, &mnmx);
 
 		FOR_ALL_LANES {
-			ctrl->timings[channel][slotrank].lanes[lane].timA -= mnmx.timA_min_high * 0x40;
+			ctrl->timings[channel][slotrank].lanes[lane].timA -=
+					mnmx.timA_min_high * 0x40;
 		}
 		ctrl->timings[channel][slotrank].io_latency -= mnmx.timA_min_high;
 		printram("4028 -= %d;\n", mnmx.timA_min_high);
@@ -1532,8 +1396,7 @@
 
 		printram("final results:\n");
 		FOR_ALL_LANES
-			printram("Aval: %d, %d, %d: %x\n", channel, slotrank,
-			    lane,
+			printram("Aval: %d, %d, %d: %x\n", channel, slotrank, lane,
 			    ctrl->timings[channel][slotrank].lanes[lane].timA);
 
 		MCHBAR32(GDCRTRAININGMOD) = 0;
@@ -1562,65 +1425,63 @@
 	wait_for_iosav(channel);
 
 	/* DRAM command ACT */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f006;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
-		(MAX((ctrl->tFAW >> 2) + 1, ctrl->tRRD) << 10)
-		| 4 | (ctrl->tRCD << 16);
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f006;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) =
+		(MAX((ctrl->tFAW >> 2) + 1, ctrl->tRRD) << 10) | 4 | (ctrl->tRCD << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | (6 << 16);
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x244;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x244;
 
 	/* DRAM command NOP */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f207;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x8041001;
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f207;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0x8041001;
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24) | 8;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x3e0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0x3e0;
 
 	/* DRAM command WR */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f201;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x80411f4;
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x1f201;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) = 0x80411f4;
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = slotrank << 24;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x242;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0x242;
 
 	/* DRAM command NOP */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f207;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
-		0x8000c01 | ((ctrl->CWL + ctrl->tWTR + 5) << 16);
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x1f207;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) =
+		0x08000c01 | ((ctrl->CWL + ctrl->tWTR + 5) << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 8;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0x3e0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0x3e0;
 
-	// execute command queue
+	/* Execute command queue */
 	MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 	wait_for_iosav(channel);
 
 	/* DRAM command PREA */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f002;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0xc01 | (ctrl->tRP << 16);
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f002;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0xc01 | (ctrl->tRP << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60400;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x240;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x240;
 
 	/* DRAM command ACT */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f006;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) =
-		(MAX(ctrl->tRRD, (ctrl->tFAW >> 2) + 1) << 10)
-		| 8 | (ctrl->CAS << 16);
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f006;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) =
+		(MAX(ctrl->tRRD, (ctrl->tFAW >> 2) + 1) << 10) | 8 | (ctrl->CAS << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24) | 0x60000;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x244;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0x244;
 
 	/* DRAM command RD */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x1f105;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) =
 		0x40011f4 | (MAX(ctrl->tRTP, 8) << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = (slotrank << 24);
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x242;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0x242;
 
 	/* DRAM command PREA */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f002;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) = 0xc01 | (ctrl->tRP << 16);
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x1f002;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) = 0xc01 | (ctrl->tRP << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 0x60400;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0x240;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0x240;
 
-	// execute command queue
+	/* Execute command queue */
 	MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 	wait_for_iosav(channel);
@@ -1634,30 +1495,32 @@
 	for (i = 1; i < count; i++) {
 		if (min > data[i])
 			min = data[i];
+
 		if (max < data[i])
 			max = data[i];
 	}
-	int threshold = min/2 + max/2;
+	int threshold = min / 2 + max / 2;
 	for (i = 0; i < count; i++)
 		data[i] = data[i] > threshold;
+
 	printram("threshold=%d min=%d max=%d\n", threshold, min, max);
 }
 
 static int discover_timC(ramctr_timing *ctrl, int channel, int slotrank)
 {
 	int timC;
-	int statistics[NUM_LANES][MAX_TIMC + 1];
+	int stats[NUM_LANES][MAX_TIMC + 1];
 	int lane;
 
 	wait_for_iosav(channel);
 
 	/* DRAM command PREA */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f002;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0xc01 | (ctrl->tRP << 16);
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f002;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0xc01 | (ctrl->tRP << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60400;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x240;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x240;
 
-	// execute command queue
+	/* Execute command queue */
 	MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
 
 	for (timC = 0; timC <= MAX_TIMC; timC++) {
@@ -1667,24 +1530,22 @@
 		test_timC(ctrl, channel, slotrank);
 
 		FOR_ALL_LANES {
-			statistics[lane][timC] =
-				MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane));
+			stats[lane][timC] = MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane));
 		}
 	}
 	FOR_ALL_LANES {
-		struct run rn = get_longest_zero_run(
-			statistics[lane], ARRAY_SIZE(statistics[lane]));
+		struct run rn = get_longest_zero_run(stats[lane], ARRAY_SIZE(stats[lane]));
+
 		if (rn.all || rn.length < 8) {
 			printk(BIOS_EMERG, "timC discovery failed: %d, %d, %d\n",
 			       channel, slotrank, lane);
-			/* With command training not happend yet, the lane can
-			 * be erroneous. Take the avarage as reference and try
-			 * again to find a run.
+			/*
+			 * With command training not being done yet, the lane can be erroneous.
+			 * Take the average as reference and try again to find a run.
 			 */
-			timC_threshold_process(statistics[lane],
-					       ARRAY_SIZE(statistics[lane]));
-			rn = get_longest_zero_run(statistics[lane],
-						 ARRAY_SIZE(statistics[lane]));
+			timC_threshold_process(stats[lane], ARRAY_SIZE(stats[lane]));
+			rn = get_longest_zero_run(stats[lane], ARRAY_SIZE(stats[lane]));
+
 			if (rn.all || rn.length < 8) {
 				printk(BIOS_EMERG, "timC recovery failed\n");
 				return MAKE_ERR;
@@ -1700,8 +1561,10 @@
 static int get_precedening_channels(ramctr_timing *ctrl, int target_channel)
 {
 	int channel, ret = 0;
+
 	FOR_ALL_POPULATED_CHANNELS if (channel < target_channel)
 		 ret++;
+
 	return ret;
 }
 
@@ -1709,8 +1572,10 @@
 {
 	unsigned int j;
 	unsigned int channel_offset = get_precedening_channels(ctrl, channel) * 0x40;
+
 	for (j = 0; j < 16; j++)
 		write32((void *)(0x04000000 + channel_offset + 4 * j), j & 2 ? b : a);
+
 	sfence();
 }
 
@@ -1727,10 +1592,13 @@
 	unsigned int j;
 	unsigned int channel_offset = get_precedening_channels(ctrl, channel) * 0x40;
 	unsigned int channel_step = 0x40 * num_of_channels(ctrl);
+
 	for (j = 0; j < 16; j++)
 		write32((void *)(0x04000000 + channel_offset + j * 4), 0xffffffff);
+
 	for (j = 0; j < 16; j++)
 		write32((void *)(0x04000000 + channel_offset + channel_step + j * 4), 0);
+
 	sfence();
 }
 
@@ -1750,40 +1618,40 @@
 			wait_for_iosav(channel);
 
 			/* DRAM command MRS
-			 * write MR3 MPR enable
-			 * in this mode only RD and RDA are allowed
-			 * all reads return a predefined pattern */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f000;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
+			   write MR3 MPR enable
+			   in this mode only RD and RDA are allowed
+			   all reads return a predefined pattern */
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f000;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) =
 				0xc01 | (ctrl->tMOD << 16);
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
 				(slotrank << 24) | 0x360004;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0;
 
 			/* DRAM command RD */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f105;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x4041003;
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f105;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0x4041003;
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = slotrank << 24;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0;
 
 			/* DRAM command RD */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x1f105;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) =
 				0x1001 | ((ctrl->CAS + 8) << 16);
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
 				(slotrank << 24) | 0x60000;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0;
 
 			/* DRAM command MRS
 			 * write MR3 MPR disable */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f000;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x1f000;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) =
 				0xc01 | (ctrl->tMOD << 16);
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) =
 				(slotrank << 24) | 0x360000;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0;
 
-			// execute command queue
+			/* Execute command queue */
 			MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 			wait_for_iosav(channel);
@@ -1802,37 +1670,37 @@
 			 * write MR3 MPR enable
 			 * in this mode only RD and RDA are allowed
 			 * all reads return a predefined pattern */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f000;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f000;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) =
 				0xc01 | (ctrl->tMOD << 16);
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
 				(slotrank << 24) | 0x360004;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0;
 
 			/* DRAM command RD */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f105;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x4041003;
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f105;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0x4041003;
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = slotrank << 24;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0;
 
 			/* DRAM command RD */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x1f105;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) =
 				0x1001 | ((ctrl->CAS + 8) << 16);
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
 				(slotrank << 24) | 0x60000;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0;
 
 			/* DRAM command MRS
 			 * write MR3 MPR disable */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f000;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x1f000;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) =
 				0xc01 | (ctrl->tMOD << 16);
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) =
 				(slotrank << 24) | 0x360000;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0;
 
-			// execute command queue
+			/* Execute command queue */
 			MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 			wait_for_iosav(channel);
@@ -1847,19 +1715,19 @@
 
 	wait_for_iosav(channel);
 	/* DRAM command NOP */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f207;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f207;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) =
 		0x8000c01 | ((ctrl->CWL + ctrl->tWLO) << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = 8 | (slotrank << 24);
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0;
 
 	/* DRAM command NOP */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f107;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x4000c01 | ((ctrl->CAS + 38) << 16);
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f107;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0x4000c01 | ((ctrl->CAS + 38) << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24) | 4;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0;
 
-	// execute command queue
+	/* Execute command queue */
 	MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(2);
 
 	wait_for_iosav(channel);
@@ -1892,23 +1760,25 @@
 	}
 	FOR_ALL_LANES {
 		struct run rn = get_longest_zero_run(statistics[lane], 128);
-		/* timC is a direct function of timB's 6 LSBs.
-		 * Some tests increments the value of timB by a small value,
-		 * which might cause the 6bit value to overflow, if it's close
-		 * to 0x3F. Increment the value by a small offset if it's likely
-		 * to overflow, to make sure it won't overflow while running
-		 * tests and bricks the system due to a non matching timC.
+		/*
+		 * timC is a direct function of timB's 6 LSBs. Some tests increments the value
+		 * of timB by a small value, which might cause the 6-bit value to overflow if
+		 * it's close to 0x3f. Increment the value by a small offset if it's likely
+		 * to overflow, to make sure it won't overflow while running tests and bricks
+		 * the system due to a non matching timC.
 		 *
-		 * TODO: find out why some tests (edge write discovery)
-		 *       increment timB. */
-		if ((rn.start & 0x3F) == 0x3E)
+		 * TODO: find out why some tests (edge write discovery) increment timB.
+		 */
+		if ((rn.start & 0x3f) == 0x3e)
 			rn.start += 2;
-		else if ((rn.start & 0x3F) == 0x3F)
+		else if ((rn.start & 0x3f) == 0x3f)
 			rn.start += 1;
+
 		ctrl->timings[channel][slotrank].lanes[lane].timB = rn.start;
 		if (rn.all) {
 			printk(BIOS_EMERG, "timB discovery failed: %d, %d, %d\n",
 			       channel, slotrank, lane);
+
 			return MAKE_ERR;
 		}
 		printram("timB: %d, %d, %d: 0x%02x-0x%02x-0x%02x\n",
@@ -1954,56 +1824,56 @@
 		wait_for_iosav(channel);
 
 		/* DRAM command ACT */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f006;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0xc01 | (ctrl->tRCD << 16);
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f006;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0xc01 | (ctrl->tRCD << 16);
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60000;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0;
 
 		/* DRAM command NOP */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f207;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x8040c01;
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f207;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0x8040c01;
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24) | 0x8;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x3e0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0x3e0;
 
 		/* DRAM command WR */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f201;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x8041003;
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x1f201;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) = 0x8041003;
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = (slotrank << 24);
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x3e2;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0x3e2;
 
 		/* DRAM command NOP */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f207;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x1f207;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) =
 			0x8000c01 | ((ctrl->CWL + ctrl->tWTR + 5) << 16);
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 0x8;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0x3e0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0x3e0;
 
-		// execute command queue
+		/* Execute command queue */
 		MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 		wait_for_iosav(channel);
 
 		/* DRAM command PREA */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f002;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0xc01 | ((ctrl->tRP) << 16);
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f002;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0xc01 | ((ctrl->tRP) << 16);
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60400;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x240;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x240;
 
 		/* DRAM command ACT */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f006;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0xc01 | ((ctrl->tRCD) << 16);
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f006;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0xc01 | ((ctrl->tRCD) << 16);
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24) | 0x60000;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0;
 
 		/* DRAM command RD */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x3f105;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x4000c01 | ((ctrl->tRP +
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x3f105;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) = 0x4000c01 | ((ctrl->tRP +
 			  ctrl->timings[channel][slotrank].roundtrip_latency +
 			  ctrl->timings[channel][slotrank].io_latency) << 16);
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = (slotrank << 24) | 0x60008;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0;
 
-		// execute command queue
+		/* Execute command queue */
 		MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(3);
 
 		wait_for_iosav(channel);
@@ -2033,28 +1903,28 @@
 	slotrank = !(ctrl->rankmap[channel] & 1) ? 2 : 0;
 
 	/* DRAM command ACT */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f003;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x41001;
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x0f003;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0x41001;
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60000;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x3e0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x3e0;
 
-	// execute command queue
+	/* Execute command queue */
 	MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
 
 	wait_for_iosav(channel);
 }
 
-/* Compensate the skew between CMD/ADDR/CLK and DQ/DQS lanes.
- * DDR3 adopted the fly-by topology. The data and strobes signals reach
- * the chips at different times with respect to command, address and
- * clock signals.
- * By delaying either all DQ/DQs or all CMD/ADDR/CLK signals, a full phase
- * shift can be introduced.
- * It is assumed that the CLK/ADDR/CMD signals have the same routing delay.
+/*
+ * Compensate the skew between CMD/ADDR/CLK and DQ/DQS lanes.
  *
- * To find the required phase shift the DRAM is placed in "write leveling" mode.
- * In this mode the DRAM-chip samples the CLK on every DQS edge and feeds back the
- * sampled value on the data lanes (DQs).
+ * Since DDR3 uses a fly-by topology, the data and strobes signals reach the chips at different
+ * times with respect to command, address and clock signals. By delaying either all DQ/DQS or
+ * all CMD/ADDR/CLK signals, a full phase shift can be introduced. It is assumed that the
+ * CLK/ADDR/CMD signals have the same routing delay.
+ *
+ * To find the required phase shift the DRAM is placed in "write leveling" mode. In this mode,
+ * the DRAM-chip samples the CLK on every DQS edge and feeds back the sampled value on the data
+ * lanes (DQ).
  */
 int write_training(ramctr_timing *ctrl)
 {
@@ -2069,42 +1939,40 @@
 		MCHBAR32_OR(SCHED_CBIT_ch(channel), 0x200000);
 	}
 
-	/* refresh disable */
+	/* Refresh disable */
 	MCHBAR32_AND(MC_INIT_STATE_G, ~8);
 	FOR_ALL_POPULATED_CHANNELS {
 		write_op(ctrl, channel);
 	}
 
-	/* enable write leveling on all ranks
-	 * disable all DQ outputs
-	 * only NOP is allowed in this mode */
-	FOR_ALL_CHANNELS
-		FOR_ALL_POPULATED_RANKS
-			write_mrreg(ctrl, channel, slotrank, 1,
+	/* Enable write leveling on all ranks
+	   Disable all DQ outputs
+	   Only NOP is allowed in this mode */
+	FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS
+		write_mrreg(ctrl, channel, slotrank, 1,
 				make_mr1(ctrl, slotrank, channel) | 0x1080);
 
 	MCHBAR32(GDCRTRAININGMOD) = 0x108052;
 
 	toggle_io_reset();
 
-	/* set any valid value for timB, it gets corrected later */
+	/* Set any valid value for timB, it gets corrected later */
 	FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS {
 		err = discover_timB(ctrl, channel, slotrank);
 		if (err)
 			return err;
 	}
 
-	/* disable write leveling on all ranks */
+	/* Disable write leveling on all ranks */
 	FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS
-		write_mrreg(ctrl, channel,
-			slotrank, 1, make_mr1(ctrl, slotrank, channel));
+		write_mrreg(ctrl, channel, slotrank, 1, make_mr1(ctrl, slotrank, channel));
 
 	MCHBAR32(GDCRTRAININGMOD) = 0;
 
 	FOR_ALL_POPULATED_CHANNELS
 		wait_for_iosav(channel);
 
-	/* refresh enable */
+	/* Refresh enable */
 	MCHBAR32_OR(MC_INIT_STATE_G, 8);
 
 	FOR_ALL_POPULATED_CHANNELS {
@@ -2113,12 +1981,12 @@
 		wait_for_iosav(channel);
 
 		/* DRAM command ZQCS */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f003;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x659001;
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x0f003;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0x659001;
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = 0x60000;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x3e0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x3e0;
 
-		// execute command queue
+		/* Execute command queue */
 		MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
 
 		wait_for_iosav(channel);
@@ -2182,37 +2050,37 @@
 
 		wait_for_iosav(channel);
 		/* DRAM command ACT */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f006;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f006;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) =
 			((MAX(ctrl->tRRD, (ctrl->tFAW >> 2) + 1)) << 10)
 			| 8 | (ctrl->tRCD << 16);
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
 			(slotrank << 24) | ctr | 0x60000;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x244;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x244;
 
 		/* DRAM command WR */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f201;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) =
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f201;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) =
 			0x8001020 | ((ctrl->CWL + ctrl->tWTR + 8) << 16);
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24);
 		MCHBAR32(IOSAV_n_ADDRESS_LFSR_ch(channel, 1)) = 0x389abcd;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x20e42;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0x20e42;
 
 		/* DRAM command RD */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x1f105;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) =
 			0x4001020 | (MAX(ctrl->tRTP, 8) << 16);
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = (slotrank << 24);
 		MCHBAR32(IOSAV_n_ADDRESS_LFSR_ch(channel, 2)) = 0x389abcd;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x20e42;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0x20e42;
 
 		/* DRAM command PRE */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f002;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) = 0xf1001;
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x1f002;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) = 0xf1001;
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 0x60400;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0x240;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0x240;
 
-		// execute command queue
+		/* Execute command queue */
 		MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 		wait_for_iosav(channel);
@@ -2237,8 +2105,8 @@
 static void fill_pattern5(ramctr_timing *ctrl, int channel, int patno)
 {
 	unsigned int i, j;
-	unsigned int channel_offset = get_precedening_channels(ctrl, channel) * 0x40;
-	unsigned int channel_step = 0x40 * num_of_channels(ctrl);
+	unsigned int offset = get_precedening_channels(ctrl, channel) * 0x40;
+	unsigned int step = 0x40 * num_of_channels(ctrl);
 
 	if (patno) {
 		u8 base8 = 0x80 >> ((patno - 1) % 8);
@@ -2246,18 +2114,19 @@
 		for (i = 0; i < 32; i++) {
 			for (j = 0; j < 16; j++) {
 				u32 val = use_base[patno - 1][i] & (1 << (j / 2)) ? base : 0;
+
 				if (invert[patno - 1][i] & (1 << (j / 2)))
 					val = ~val;
-				write32((void *)(0x04000000 + channel_offset + i * channel_step +
-						 j * 4), val);
+
+				write32((void *)((1 << 26) + offset + i * step + j * 4), val);
 			}
 		}
-
 	} else {
-		for (i = 0; i < sizeof(pattern) / sizeof(pattern[0]); i++) {
-			for (j = 0; j < 16; j++)
-				write32((void *)(0x04000000 + channel_offset + i * channel_step +
-						 j * 4), pattern[i][j]);
+		for (i = 0; i < ARRAY_SIZE(pattern); i++) {
+			for (j = 0; j < 16; j++) {
+				const u32 val = pattern[i][j];
+				write32((void *)((1 << 26) + offset + i * step + j * 4), val);
+			}
 		}
 		sfence();
 	}
@@ -2270,16 +2139,16 @@
 	FOR_ALL_POPULATED_CHANNELS {
 		wait_for_iosav(channel);
 
-		/* choose an existing rank.  */
+		/* Choose an existing rank */
 		slotrank = !(ctrl->rankmap[channel] & 1) ? 2 : 0;
 
 		/* DRAM command ZQCS */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f003;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x41001;
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x0f003;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0x41001;
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60000;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x3e0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x3e0;
 
-		// execute command queue
+		/* Execute command queue */
 		MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
 
 		wait_for_iosav(channel);
@@ -2295,20 +2164,21 @@
 		slotrank = !(ctrl->rankmap[channel] & 1) ? 2 : 0;
 
 		/* DRAM command ZQCS */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f003;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x41001;
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x0f003;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0x41001;
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60000;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x3e0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x3e0;
 
-		// execute command queue
+		/* Execute command queue */
 		MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
 
 		wait_for_iosav(channel);
 	}
 
-	/* jedec reset */
+	/* JEDEC reset */
 	dram_jedecreset(ctrl);
-	/* mrs commands. */
+
+	/* MRS commands */
 	dram_mrscommands(ctrl);
 
 	toggle_io_reset();
@@ -2333,12 +2203,12 @@
 	ctrl->cmd_stretch[channel] = cmd_stretch;
 
 	MCHBAR32(TC_RAP_ch(channel)) =
-		ctrl->tRRD
-		| (ctrl->tRTP << 4)
-		| (ctrl->tCKE << 8)
+		  (ctrl->tRRD <<  0)
+		| (ctrl->tRTP <<  4)
+		| (ctrl->tCKE <<  8)
 		| (ctrl->tWTR << 12)
 		| (ctrl->tFAW << 16)
-		| (ctrl->tWR << 24)
+		| (ctrl->tWR  << 24)
 		| (ctrl->cmd_stretch[channel] << 30);
 
 	if (ctrl->cmd_stretch[channel] == 2)
@@ -2361,11 +2231,12 @@
 		}
 	}
 	FOR_ALL_POPULATED_RANKS {
-		struct run rn =
-			get_longest_zero_run(stat[slotrank], 255);
+		struct run rn = get_longest_zero_run(stat[slotrank], 255);
+
 		ctrl->timings[channel][slotrank].pi_coding = rn.middle - 127;
 		printram("cmd_stretch: %d, %d: 0x%02x-0x%02x-0x%02x\n",
 				 channel, slotrank, rn.start, rn.middle, rn.end);
+
 		if (rn.all || rn.length < MIN_C320C_LEN) {
 			FOR_ALL_POPULATED_RANKS {
 				ctrl->timings[channel][slotrank] =
@@ -2378,9 +2249,10 @@
 	return 0;
 }
 
-/* Adjust CMD phase shift and try multiple command rates.
- * A command rate of 2T doubles the time needed for address and
- * command decode. */
+/*
+ * Adjust CMD phase shift and try multiple command rates.
+ * A command rate of 2T doubles the time needed for address and command decode.
+ */
 int command_training(ramctr_timing *ctrl)
 {
 	int channel;
@@ -2395,12 +2267,12 @@
 
 		/*
 		 * Dual DIMM per channel:
-		 * Issue:      While c320c discovery seems to succeed raminit
-		 *             will fail in write training.
-		 * Workaround: Skip 1T in dual DIMM mode, that's only
-		 *             supported by a few DIMMs.
-		 * Only try 1T mode for XMP DIMMs that request it in dual DIMM
-		 * mode.
+		 * Issue:
+		 * While c320c discovery seems to succeed raminit will fail in write training.
+		 *
+		 * Workaround:
+		 * Skip 1T in dual DIMM mode, that's only supported by a few DIMMs.
+		 * Only try 1T mode for XMP DIMMs that request it in dual DIMM mode.
 		 *
 		 * Single DIMM per channel:
 		 * Try command rate 1T and 2T
@@ -2432,16 +2304,15 @@
 	return 0;
 }
 
-
 static int discover_edges_real(ramctr_timing *ctrl, int channel, int slotrank, int *edges)
 {
 	int edge;
-	int statistics[NUM_LANES][MAX_EDGE_TIMING + 1];
+	int stats[NUM_LANES][MAX_EDGE_TIMING + 1];
 	int lane;
 
 	for (edge = 0; edge <= MAX_EDGE_TIMING; edge++) {
 		FOR_ALL_LANES {
-			ctrl->timings[channel][slotrank].lanes[lane].rising = edge;
+			ctrl->timings[channel][slotrank].lanes[lane].rising  = edge;
 			ctrl->timings[channel][slotrank].lanes[lane].falling = edge;
 		}
 		program_timings(ctrl, channel);
@@ -2452,54 +2323,55 @@
 		}
 
 		wait_for_iosav(channel);
+
 		/* DRAM command MRS
-		 * write MR3 MPR enable
-		 * in this mode only RD and RDA are allowed
-		 * all reads return a predefined pattern */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f000;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0xc01 | (ctrl->tMOD << 16);
+		   write MR3 MPR enable
+		   in this mode only RD and RDA are allowed
+		   all reads return a predefined pattern */
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f000;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0xc01 | (ctrl->tMOD << 16);
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x360004;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0;
 
 		/* DRAM command RD */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f105;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x40411f4;
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f105;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0x40411f4;
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = slotrank << 24;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0;
 
 		/* DRAM command RD */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x1001 | ((ctrl->CAS + 8) << 16);
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x1f105;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) = 0x1001 | ((ctrl->CAS + 8) << 16);
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = (slotrank << 24) | 0x60000;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0;
 
 		/* DRAM command MRS
-		 * MR3 disable MPR */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f000;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) = 0xc01 | (ctrl->tMOD << 16);
+		   MR3 disable MPR */
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x1f000;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) = 0xc01 | (ctrl->tMOD << 16);
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 0x360000;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0;
 
-		// execute command queue
+		/* Execute command queue */
 		MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 		wait_for_iosav(channel);
 
 		FOR_ALL_LANES {
-			statistics[lane][edge] =
-				MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane));
+			stats[lane][edge] = MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane));
 		}
 	}
+
 	FOR_ALL_LANES {
-		struct run rn = get_longest_zero_run(statistics[lane], MAX_EDGE_TIMING + 1);
+		struct run rn = get_longest_zero_run(stats[lane], MAX_EDGE_TIMING + 1);
 		edges[lane] = rn.middle;
+
 		if (rn.all) {
-			printk(BIOS_EMERG, "edge discovery failed: %d, %d, %d\n",
-			       channel, slotrank, lane);
+			printk(BIOS_EMERG, "edge discovery failed: %d, %d, %d\n", channel,
+			       slotrank, lane);
 			return MAKE_ERR;
 		}
-		printram("eval %d, %d, %d: %02x\n", channel, slotrank,
-		       lane, edges[lane]);
+		printram("eval %d, %d, %d: %02x\n", channel, slotrank, lane, edges[lane]);
 	}
 	return 0;
 }
@@ -2537,41 +2409,41 @@
 			wait_for_iosav(channel);
 
 			/* DRAM command MRS
-			 * MR3 enable MPR
-			 * write MR3 MPR enable
-			 * in this mode only RD and RDA are allowed
-			 * all reads return a predefined pattern */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f000;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
+			   MR3 enable MPR
+			   write MR3 MPR enable
+			   in this mode only RD and RDA are allowed
+			   all reads return a predefined pattern */
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f000;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) =
 				0xc01 | (ctrl->tMOD << 16);
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
 				(slotrank << 24) | 0x360004;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0;
 
 			/* DRAM command RD */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f105;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x4041003;
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f105;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0x4041003;
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = slotrank << 24;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0;
 
 			/* DRAM command RD */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x1f105;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) =
 				0x1001 | ((ctrl->CAS + 8) << 16);
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
 				(slotrank << 24) | 0x60000;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0;
 
 			/* DRAM command MRS
 			 * MR3 disable MPR */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f000;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x1f000;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) =
 				0xc01 | (ctrl->tMOD << 16);
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) =
 				(slotrank << 24) | 0x360000;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0;
 
-			// execute command queue
+			/* Execute command queue */
 			MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 			wait_for_iosav(channel);
@@ -2581,7 +2453,7 @@
 
 		FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
 			ctrl->timings[channel][slotrank].lanes[lane].falling = 48;
-			ctrl->timings[channel][slotrank].lanes[lane].rising = 48;
+			ctrl->timings[channel][slotrank].lanes[lane].rising  = 48;
 		}
 
 		program_timings(ctrl, channel);
@@ -2590,42 +2462,42 @@
 			wait_for_iosav(channel);
 
 			/* DRAM command MRS
-			 * MR3 enable MPR
-			 * write MR3 MPR enable
-			 * in this mode only RD and RDA are allowed
-			 * all reads return a predefined pattern */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f000;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
+			   MR3 enable MPR
+			   write MR3 MPR enable
+			   in this mode only RD and RDA are allowed
+			   all reads return a predefined pattern */
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f000;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) =
 				0xc01 | (ctrl->tMOD << 16);
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
 				(slotrank << 24) | 0x360004;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0;
 
 			/* DRAM command RD */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f105;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x4041003;
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f105;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0x4041003;
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) =
 				(slotrank << 24);
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0;
 
 			/* DRAM command RD */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x1f105;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) =
 				0x1001 | ((ctrl->CAS + 8) << 16);
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
 				(slotrank << 24) | 0x60000;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0;
 
 			/* DRAM command MRS
 			 * MR3 disable MPR */
-			MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f000;
-			MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
+			MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x1f000;
+			MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) =
 				0xc01 | (ctrl->tMOD << 16);
 			MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) =
 				(slotrank << 24) | 0x360000;
-			MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
+			MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0;
 
-			// execute command queue
+			/* Execute command queue */
 			MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 			wait_for_iosav(channel);
@@ -2682,12 +2554,11 @@
 	return 0;
 }
 
-static int discover_edges_write_real(ramctr_timing *ctrl, int channel,
-				      int slotrank, int *edges)
+static int discover_edges_write_real(ramctr_timing *ctrl, int channel, int slotrank, int *edges)
 {
 	int edge;
-	u32 raw_statistics[MAX_EDGE_TIMING + 1];
-	int statistics[MAX_EDGE_TIMING + 1];
+	u32 raw_stats[MAX_EDGE_TIMING + 1];
+	int stats[MAX_EDGE_TIMING + 1];
 	const int reg3000b24[] = { 0, 0xc, 0x2c };
 	int lane, i;
 	int lower[NUM_LANES];
@@ -2701,12 +2572,13 @@
 
 	for (i = 0; i < 3; i++) {
 		MCHBAR32(GDCRTRAININGMOD_ch(channel)) = reg3000b24[i] << 24;
-		printram("[%x] = 0x%08x\n",
-		       GDCRTRAININGMOD_ch(channel), reg3000b24[i] << 24);
+		printram("[%x] = 0x%08x\n", GDCRTRAININGMOD_ch(channel), reg3000b24[i] << 24);
+
 		for (pat = 0; pat < NUM_PATTERNS; pat++) {
 			fill_pattern5(ctrl, channel, pat);
 			MCHBAR32(IOSAV_DATA_CTL_ch(channel)) = 0x1f;
 			printram("using pattern %d\n", pat);
+
 			for (edge = 0; edge <= MAX_EDGE_TIMING; edge++) {
 				FOR_ALL_LANES {
 					ctrl->timings[channel][slotrank].lanes[lane].
@@ -2723,68 +2595,70 @@
 				wait_for_iosav(channel);
 
 				/* DRAM command ACT */
-				MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f006;
-				MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
+				MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x1f006;
+				MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) =
 					0x4 | (ctrl->tRCD << 16) |
 					(MAX(ctrl->tRRD, (ctrl->tFAW >> 2) + 1) << 10);
 				MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
 					(slotrank << 24) | 0x60000;
-				MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x240;
+				MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x240;
 
 				/* DRAM command WR */
-				MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f201;
-				MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x8005020 |
+				MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f201;
+				MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0x8005020 |
 					((ctrl->tWTR + ctrl->CWL + 8) << 16);
 				MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) =
 					slotrank << 24;
-				MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x242;
+				MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0x242;
 
 				/* DRAM command RD */
-				MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
-				MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
+				MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x1f105;
+				MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) =
 					0x4005020 | (MAX(ctrl->tRTP, 8) << 16);
 				MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
 					slotrank << 24;
-				MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x242;
+				MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0x242;
 
 				/* DRAM command PRE */
-				MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f002;
-				MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
+				MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x1f002;
+				MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) =
 					0xc01 | (ctrl->tRP << 16);
 				MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) =
 					(slotrank << 24) | 0x60400;
-				MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
+				MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0;
 
-				// execute command queue
-				MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) =
-					IOSAV_RUN_ONCE(4);
+				/* Execute command queue */
+				MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 				wait_for_iosav(channel);
 				FOR_ALL_LANES {
 					MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane));
 				}
 
-				raw_statistics[edge] = MCHBAR32(0x436c + channel * 0x400);
+				/* FIXME: This register only exists on Ivy Bridge */
+				raw_stats[edge] = MCHBAR32(0x436c + channel * 0x400);
 			}
+
 			FOR_ALL_LANES {
 				struct run rn;
 				for (edge = 0; edge <= MAX_EDGE_TIMING; edge++)
-					statistics[edge] =
-						! !(raw_statistics[edge] & (1 << lane));
-				rn = get_longest_zero_run(statistics,
-							  MAX_EDGE_TIMING + 1);
-				printram("edges: %d, %d, %d: 0x%02x-0x%02x-0x%02x, 0x%02x-0x%02x\n",
-					 channel, slotrank, i, rn.start, rn.middle,
-					 rn.end, rn.start + ctrl->edge_offset[i],
+					stats[edge] = !!(raw_stats[edge] & (1 << lane));
+
+				rn = get_longest_zero_run(stats, MAX_EDGE_TIMING + 1);
+
+				printram("edges: %d, %d, %d: 0x%02x-0x%02x-0x%02x, "
+					 "0x%02x-0x%02x\n", channel, slotrank, i, rn.start,
+					 rn.middle, rn.end, rn.start + ctrl->edge_offset[i],
 					 rn.end - ctrl->edge_offset[i]);
-				lower[lane] =
-					MAX(rn.start + ctrl->edge_offset[i], lower[lane]);
-				upper[lane] =
-					MIN(rn.end - ctrl->edge_offset[i], upper[lane]);
+
+				lower[lane] = MAX(rn.start + ctrl->edge_offset[i], lower[lane]);
+				upper[lane] = MIN(rn.end   - ctrl->edge_offset[i], upper[lane]);
+
 				edges[lane] = (lower[lane] + upper[lane]) / 2;
 				if (rn.all || (lower[lane] > upper[lane])) {
-					printk(BIOS_EMERG, "edge write discovery failed: %d, %d, %d\n",
-					       channel, slotrank, lane);
+					printk(BIOS_EMERG, "edge write discovery failed: "
+						"%d, %d, %d\n", channel, slotrank, lane);
+
 					return MAKE_ERR;
 				}
 			}
@@ -2799,17 +2673,19 @@
 int discover_edges_write(ramctr_timing *ctrl)
 {
 	int falling_edges[NUM_CHANNELS][NUM_SLOTRANKS][NUM_LANES];
-	int rising_edges[NUM_CHANNELS][NUM_SLOTRANKS][NUM_LANES];
-	int channel, slotrank, lane;
-	int err;
+	int  rising_edges[NUM_CHANNELS][NUM_SLOTRANKS][NUM_LANES];
+	int channel, slotrank, lane, err;
 
-	/* FIXME: under some conditions (older chipsets?) vendor BIOS sets both edges to the same value.  */
+	/*
+	 * FIXME: Under some conditions, vendor BIOS sets both edges to the same value. It will
+	 *        also use a single loop. It would seem that it is a debugging configuration.
+	 */
 	MCHBAR32(IOSAV_DC_MASK) = 0x300;
 	printram("discover falling edges write:\n[%x] = %x\n", IOSAV_DC_MASK, 0x300);
 
 	FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS {
 		err = discover_edges_write_real(ctrl, channel, slotrank,
-					  falling_edges[channel][slotrank]);
+					falling_edges[channel][slotrank]);
 		if (err)
 			return err;
 	}
@@ -2819,7 +2695,7 @@
 
 	FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS {
 		err = discover_edges_write_real(ctrl, channel, slotrank,
-					  rising_edges[channel][slotrank]);
+					 rising_edges[channel][slotrank]);
 		if (err)
 			return err;
 	}
@@ -2828,9 +2704,10 @@
 
 	FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
 		ctrl->timings[channel][slotrank].lanes[lane].falling =
-		    falling_edges[channel][slotrank][lane];
+				falling_edges[channel][slotrank][lane];
+
 		ctrl->timings[channel][slotrank].lanes[lane].rising =
-		    rising_edges[channel][slotrank][lane];
+				rising_edges[channel][slotrank][lane];
 	}
 
 	FOR_ALL_POPULATED_CHANNELS
@@ -2845,34 +2722,34 @@
 static void test_timC_write(ramctr_timing *ctrl, int channel, int slotrank)
 {
 	wait_for_iosav(channel);
+
 	/* DRAM command ACT */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f006;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x0001f006;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) =
 		(MAX((ctrl->tFAW >> 2) + 1, ctrl->tRRD) << 10) | (ctrl->tRCD << 16) | 4;
-	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
-		(slotrank << 24) | 0x60000;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x244;
+	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60000;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x0244;
 
 	/* DRAM command WR */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f201;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) =
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x1f201;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) =
 		0x80011e0 | ((ctrl->tWTR + ctrl->CWL + 8) << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = slotrank << 24;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x242;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0x242;
 
 	/* DRAM command RD */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x40011e0 | (MAX(ctrl->tRTP, 8) << 16);
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x1f105;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) = 0x40011e0 | (MAX(ctrl->tRTP, 8) << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = slotrank << 24;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x242;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0x242;
 
 	/* DRAM command PRE */
-	MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f002;
-	MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) = 0x1001 | (ctrl->tRP << 16);
+	MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x1f002;
+	MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) = 0x1001 | (ctrl->tRP << 16);
 	MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 0x60400;
-	MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
+	MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0;
 
-	// execute command queue
+	/* Execute command queue */
 	MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 	wait_for_iosav(channel);
@@ -2880,7 +2757,7 @@
 
 int discover_timC_write(ramctr_timing *ctrl)
 {
-	const u8 rege3c_b24[3] = { 0, 0xf, 0x2f };
+	const u8 rege3c_b24[3] = { 0, 0x0f, 0x2f };
 	int i, pat;
 
 	int lower[NUM_CHANNELS][NUM_SLOTRANKS][NUM_LANES];
@@ -2901,53 +2778,65 @@
 
 	for (i = 0; i < 3; i++)
 		FOR_ALL_POPULATED_CHANNELS {
-			MCHBAR32_AND_OR(GDCRCMDDEBUGMUXCFG_Cz_S(channel), ~0x3f000000,
-				rege3c_b24[i] << 24);
+
+			/* FIXME: Setting the Write VREF must only be done on Ivy Bridge */
+			MCHBAR32_AND_OR(GDCRCMDDEBUGMUXCFG_Cz_S(channel),
+					~0x3f000000, rege3c_b24[i] << 24);
+
 			udelay(2);
+
 			for (pat = 0; pat < NUM_PATTERNS; pat++) {
 				FOR_ALL_POPULATED_RANKS {
 					int timC;
-					u32 raw_statistics[MAX_TIMC + 1];
-					int statistics[MAX_TIMC + 1];
+					u32 raw_stats[MAX_TIMC + 1];
+					int stats[MAX_TIMC + 1];
 
 					/* Make sure rn.start < rn.end */
-					statistics[MAX_TIMC] = 1;
+					stats[MAX_TIMC] = 1;
 
 					fill_pattern5(ctrl, channel, pat);
-					MCHBAR32(IOSAV_DATA_CTL_ch(channel)) =
-						0x1f;
+					MCHBAR32(IOSAV_DATA_CTL_ch(channel)) = 0x1f;
+
 					for (timC = 0; timC < MAX_TIMC; timC++) {
-						FOR_ALL_LANES
-							ctrl->timings[channel][slotrank].lanes[lane].timC = timC;
+						FOR_ALL_LANES {
+							ctrl->timings[channel][slotrank]
+								.lanes[lane].timC = timC;
+						}
 						program_timings(ctrl, channel);
 
 						test_timC_write (ctrl, channel, slotrank);
 
-						raw_statistics[timC] =
+						/* FIXME: Another IVB-only register! */
+						raw_stats[timC] =
 							MCHBAR32(0x436c + channel * 0x400);
 					}
 					FOR_ALL_LANES {
 						struct run rn;
-						for (timC = 0; timC < MAX_TIMC; timC++)
-							statistics[timC] =
-								!!(raw_statistics[timC] &
-								   (1 << lane));
+						for (timC = 0; timC < MAX_TIMC; timC++) {
+							stats[timC] = !!(raw_stats[timC]
+									& (1 << lane));
+						}
 
-						rn = get_longest_zero_run(statistics,
-									  MAX_TIMC + 1);
+						rn = get_longest_zero_run(stats, MAX_TIMC + 1);
 						if (rn.all) {
-							printk(BIOS_EMERG, "timC write discovery failed: %d, %d, %d\n",
-							       channel, slotrank, lane);
+							printk(BIOS_EMERG,
+								"timC write discovery failed: "
+								"%d, %d, %d\n", channel,
+								slotrank, lane);
+
 							return MAKE_ERR;
 						}
-						printram("timC: %d, %d, %d: 0x%02x-0x%02x-0x%02x, 0x%02x-0x%02x\n",
-							 channel, slotrank, i, rn.start,
-							 rn.middle, rn.end,
+						printram("timC: %d, %d, %d: "
+							 "0x%02x-0x%02x-0x%02x, "
+							 "0x%02x-0x%02x\n", channel, slotrank,
+							 i, rn.start, rn.middle, rn.end,
 							 rn.start + ctrl->timC_offset[i],
-							 rn.end - ctrl->timC_offset[i]);
+							 rn.end   - ctrl->timC_offset[i]);
+
 						lower[channel][slotrank][lane] =
 							MAX(rn.start + ctrl->timC_offset[i],
 							    lower[channel][slotrank][lane]);
+
 						upper[channel][slotrank][lane] =
 							MIN(rn.end - ctrl->timC_offset[i],
 							    upper[channel][slotrank][lane]);
@@ -2958,6 +2847,7 @@
 		}
 
 	FOR_ALL_CHANNELS {
+		/* FIXME: Setting the Write VREF must only be done on Ivy Bridge */
 		MCHBAR32_AND(GDCRCMDDEBUGMUXCFG_Cz_S(channel), ~0x3f000000);
 		udelay(2);
 	}
@@ -2971,10 +2861,10 @@
 	printram("CPB\n");
 
 	FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
-		printram("timC %d, %d, %d: %x\n", channel,
-		       slotrank, lane,
+		printram("timC %d, %d, %d: %x\n", channel, slotrank, lane,
 		       (lower[channel][slotrank][lane] +
 			upper[channel][slotrank][lane]) / 2);
+
 		ctrl->timings[channel][slotrank].lanes[lane].timC =
 		    (lower[channel][slotrank][lane] +
 		     upper[channel][slotrank][lane]) / 2;
@@ -3049,30 +2939,30 @@
 		wait_for_iosav(channel);
 
 		/* DRAM command ACT */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0001f006;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x0028a004;
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 0)) = 0x0001f006;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 0)) = 0x0028a004;
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = 0x00060000 | (slotrank << 24);
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x00000244;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 0)) = 0x00000244;
 
 		/* DRAM command WR */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x0001f201;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x08281064;
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 1)) = 0x0001f201;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 1)) = 0x08281064;
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = slotrank << 24;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x00000242;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 1)) = 0x00000242;
 
 		/* DRAM command RD */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x0001f105;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x04281064;
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 2)) = 0x0001f105;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 2)) = 0x04281064;
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = slotrank << 24;
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x00000242;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 2)) = 0x00000242;
 
 		/* DRAM command PRE */
-		MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x0001f002;
-		MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) = 0x00280c01;
+		MCHBAR32(IOSAV_n_SP_CMD_CTRL_ch(channel, 3)) = 0x0001f002;
+		MCHBAR32(IOSAV_n_SUBSEQ_CTRL_ch(channel, 3)) = 0x00280c01;
 		MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = 0x00060400 | (slotrank << 24);
-		MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0x00000240;
+		MCHBAR32(IOSAV_n_ADDR_UPDATE_ch(channel, 3)) = 0x00000240;
 
-		// execute command queue
+		/* Execute command queue */
 		MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
 
 		wait_for_iosav(channel);
@@ -3090,29 +2980,27 @@
 {
 	int channel;
 
-	/* FIXME: we hardcode seeds. Do we need to use some PRNG for them?
-	   I don't think so.  */
+	/* FIXME: we hardcode seeds. Do we need to use some PRNG for them? I don't think so. */
 	static u32 seeds[NUM_CHANNELS][3] = {
 		{0x00009a36, 0xbafcfdcf, 0x46d1ab68},
 		{0x00028bfa, 0x53fe4b49, 0x19ed5483}
 	};
 	FOR_ALL_POPULATED_CHANNELS {
 		MCHBAR32(SCHED_CBIT_ch(channel)) &= ~0x10000000;
-		MCHBAR32(SCRAMBLING_SEED_1_ch(channel))      = seeds[channel][0];
-		MCHBAR32(SCRAMBLING_SEED_2_HIGH_ch(channel)) = seeds[channel][1];
-		MCHBAR32(SCRAMBLING_SEED_2_LOW_ch(channel))  = seeds[channel][2];
+		MCHBAR32(SCRAMBLING_SEED_1_ch(channel))    = seeds[channel][0];
+		MCHBAR32(SCRAMBLING_SEED_2_HI_ch(channel)) = seeds[channel][1];
+		MCHBAR32(SCRAMBLING_SEED_2_LO_ch(channel)) = seeds[channel][2];
 	}
 }
 
-void set_4f8c(void)
+void set_wmm_behavior(void)
 {
-	u32 cpu;
+	u32 cpu = cpu_get_cpuid();
 
-	cpu = cpu_get_cpuid();
 	if (IS_SANDY_CPU(cpu) && (IS_SANDY_CPU_D0(cpu) || IS_SANDY_CPU_D1(cpu))) {
-		MCHBAR32(SC_WDBWM) = 0x141D1519;
+		MCHBAR32(SC_WDBWM) = 0x141d1519;
 	} else {
-		MCHBAR32(SC_WDBWM) = 0x551D1519;
+		MCHBAR32(SC_WDBWM) = 0x551d1519;
 	}
 }
 
@@ -3121,7 +3009,7 @@
 	int channel;
 
 	FOR_ALL_POPULATED_CHANNELS {
-		// Always drive command bus
+		/* Always drive command bus */
 		MCHBAR32_OR(TC_RAP_ch(channel), 0x20000000);
 	}
 
@@ -3132,7 +3020,7 @@
 	}
 }
 
-void set_4008c(ramctr_timing *ctrl)
+void set_read_write_timings(ramctr_timing *ctrl)
 {
 	int channel, slotrank;
 
@@ -3146,20 +3034,13 @@
 			min_pi = MIN(ctrl->timings[channel][slotrank].pi_coding, min_pi);
 		}
 
-		if (max_pi - min_pi > 51)
-			b20 = 0;
-		else
-			b20 = ctrl->ref_card_offset[channel];
+		b20 = (max_pi - min_pi > 51) ? 0 : ctrl->ref_card_offset[channel];
 
-		if (ctrl->pi_coding_threshold < max_pi - min_pi)
-			b4_8_12 = 0x3330;
-		else
-			b4_8_12 = 0x2220;
+		b4_8_12 = (ctrl->pi_coding_threshold < max_pi - min_pi) ? 0x3330 : 0x2220;
 
 		dram_odt_stretch(ctrl, channel);
 
-		MCHBAR32(TC_RWP_ch(channel)) =
-			0x0a000000 | (b20 << 20) |
+		MCHBAR32(TC_RWP_ch(channel)) = 0x0a000000 | (b20 << 20) |
 			((ctrl->ref_card_offset[channel] + 2) << 16) | b4_8_12;
 	}
 }
@@ -3173,12 +3054,13 @@
 	}
 }
 
-static int encode_5d10(int ns)
+/* Encode the watermark latencies in a suitable format for graphics drivers consumption */
+static int encode_wm(int ns)
 {
 	return (ns + 499) / 500;
 }
 
-/* FIXME: values in this function should be hardware revision-dependent.  */
+/* FIXME: values in this function should be hardware revision-dependent */
 void final_registers(ramctr_timing *ctrl)
 {
 	const size_t is_mobile = get_platform_type() == PLATFORM_MOBILE;
@@ -3188,17 +3070,17 @@
 	int t3_ns;
 	u32 r32;
 
-	/* FIXME: This register only exists on Ivy Bridge. */
-	MCHBAR32(WMM_READ_CONFIG) = 0x00000046;
+	/* FIXME: This register only exists on Ivy Bridge */
+	MCHBAR32(WMM_READ_CONFIG) = 0x46;
 
 	FOR_ALL_CHANNELS
-		MCHBAR32_AND_OR(TC_OTHP_ch(channel), 0xFFFFCFFF, 0x1000);
+		MCHBAR32_AND_OR(TC_OTHP_ch(channel), 0xffffcfff, 0x1000);
 
 	if (is_mobile)
 		/* APD - DLL Off, 64 DCLKs until idle, decision per rank */
 		MCHBAR32(PM_PDWN_CONFIG) = 0x00000740;
 	else
-		/* APD - PPD, 64 DCLKs until idle, decision per rank */
+		/* APD - PPD,     64 DCLKs until idle, decision per rank */
 		MCHBAR32(PM_PDWN_CONFIG) = 0x00000340;
 
 	FOR_ALL_CHANNELS
@@ -3209,75 +3091,76 @@
 
 	FOR_ALL_CHANNELS {
 		switch (ctrl->rankmap[channel]) {
-			/* Unpopulated channel.  */
+			/* Unpopulated channel */
 		case 0:
 			MCHBAR32(PM_CMD_PWR_ch(channel)) = 0;
 			break;
-			/* Only single-ranked dimms.  */
+			/* Only single-ranked dimms */
 		case 1:
 		case 4:
 		case 5:
-			MCHBAR32(PM_CMD_PWR_ch(channel)) = 0x373131;
+			MCHBAR32(PM_CMD_PWR_ch(channel)) = 0x00373131;
 			break;
-			/* Dual-ranked dimms present.  */
+			/* Dual-ranked dimms present */
 		default:
-			MCHBAR32(PM_CMD_PWR_ch(channel)) = 0x9b6ea1;
+			MCHBAR32(PM_CMD_PWR_ch(channel)) = 0x009b6ea1;
 			break;
 		}
 	}
 
 	MCHBAR32(MEM_TRML_ESTIMATION_CONFIG) = 0xca9171e5;
-	MCHBAR32_AND_OR(MEM_TRML_THRESHOLDS_CONFIG, ~0xffffff, 0xe4d5d0);
+	MCHBAR32_AND_OR(MEM_TRML_THRESHOLDS_CONFIG, ~0x00ffffff, 0x00e4d5d0);
 	MCHBAR32_AND(MEM_TRML_INTERRUPT, ~0x1f);
 
 	FOR_ALL_CHANNELS
-		MCHBAR32_AND_OR(TC_RFP_ch(channel), ~0x30000, 1 << 16);
+		MCHBAR32_AND_OR(TC_RFP_ch(channel), ~(3 << 16), 1 << 16);
 
 	MCHBAR32_OR(MC_INIT_STATE_G, 1);
 	MCHBAR32_OR(MC_INIT_STATE_G, 0x80);
 	MCHBAR32(BANDTIMERS_SNB) = 0xfa;
 
-	/* Find a populated channel.  */
+	/* Find a populated channel */
 	FOR_ALL_POPULATED_CHANNELS
 		break;
 
 	t1_cycles = (MCHBAR32(TC_ZQCAL_ch(channel)) >> 8) & 0xff;
 	r32 = MCHBAR32(PM_DLL_CONFIG);
-	if (r32 & 0x20000)
+	if (r32 & (1 << 17))
 		t1_cycles += (r32 & 0xfff);
 	t1_cycles += MCHBAR32(TC_SRFTP_ch(channel)) & 0xfff;
 	t1_ns = t1_cycles * ctrl->tCK / 256 + 544;
-	if (!(r32 & 0x20000))
+	if (!(r32 & (1 << 17)))
 		t1_ns += 500;
 
 	t2_ns = 10 * ((MCHBAR32(SAPMTIMERS) >> 8) & 0xfff);
 	if (MCHBAR32(SAPMCTL) & 8) {
-		t3_ns = 10 * ((MCHBAR32(BANDTIMERS_IVB) >> 8) & 0xfff);
+		t3_ns  = 10 * ((MCHBAR32(BANDTIMERS_IVB) >> 8) & 0xfff);
 		t3_ns += 10 * (MCHBAR32(SAPMTIMERS2_IVB) & 0xff);
 	} else {
 		t3_ns = 500;
 	}
-	printk(BIOS_DEBUG, "t123: %d, %d, %d\n",
-	       t1_ns, t2_ns, t3_ns);
-	MCHBAR32_AND_OR(0x5d10, 0xC0C0C0C0,
-		((encode_5d10(t1_ns) + encode_5d10(t2_ns)) << 16) |
-		(encode_5d10(t1_ns) << 8) | ((encode_5d10(t3_ns) +
-		encode_5d10(t2_ns) + encode_5d10(t1_ns)) << 24) | 0xc);
+
+	/* The graphics driver will use these watermark values */
+	printk(BIOS_DEBUG, "t123: %d, %d, %d\n", t1_ns, t2_ns, t3_ns);
+	MCHBAR32_AND_OR(SSKPD, 0xC0C0C0C0,
+		((encode_wm(t1_ns) + encode_wm(t2_ns)) << 16) | (encode_wm(t1_ns) << 8) |
+		((encode_wm(t3_ns) + encode_wm(t2_ns) + encode_wm(t1_ns)) << 24) | 0x0c);
 }
 
 void restore_timings(ramctr_timing *ctrl)
 {
 	int channel, slotrank, lane;
 
-	FOR_ALL_POPULATED_CHANNELS
-	    MCHBAR32(TC_RAP_ch(channel)) =
-		ctrl->tRRD
-		| (ctrl->tRTP << 4)
-		| (ctrl->tCKE << 8)
-		| (ctrl->tWTR << 12)
-		| (ctrl->tFAW << 16)
-		| (ctrl->tWR << 24)
-		| (ctrl->cmd_stretch[channel] << 30);
+	FOR_ALL_POPULATED_CHANNELS {
+		MCHBAR32(TC_RAP_ch(channel)) =
+			  (ctrl->tRRD <<  0)
+			| (ctrl->tRTP <<  4)
+			| (ctrl->tCKE <<  8)
+			| (ctrl->tWTR << 12)
+			| (ctrl->tFAW << 16)
+			| (ctrl->tWR  << 24)
+			| (ctrl->cmd_stretch[channel] << 30);
+	}
 
 	udelay(1);
 
@@ -3290,11 +3173,11 @@
 	}
 
 	FOR_ALL_POPULATED_CHANNELS
-		MCHBAR32_OR(TC_RWP_ch(channel), 0x8000000);
+		MCHBAR32_OR(TC_RWP_ch(channel), 0x08000000);
 
 	FOR_ALL_POPULATED_CHANNELS {
-		udelay (1);
-		MCHBAR32_OR(SCHED_CBIT_ch(channel), 0x200000);
+		udelay(1);
+		MCHBAR32_OR(SCHED_CBIT_ch(channel), 0x00200000);
 	}
 
 	printram("CPE\n");
@@ -3310,36 +3193,39 @@
 
 	u32 reg, addr;
 
-	while (!(MCHBAR32(RCOMP_TIMER) & 0x10000));
+	/* Poll for RCOMP */
+	while (!(MCHBAR32(RCOMP_TIMER) & (1 << 16)))
+		;
+
 	do {
 		reg = MCHBAR32(IOSAV_STATUS_ch(0));
 	} while ((reg & 0x14) == 0);
 
-	// Set state of memory controller
+	/* Set state of memory controller */
 	MCHBAR32(MC_INIT_STATE_G) = 0x116;
-	MCHBAR32(MC_INIT_STATE) = 0;
+	MCHBAR32(MC_INIT_STATE)   = 0;
 
-	// Wait 500us
+	/* Wait 500us */
 	udelay(500);
 
 	FOR_ALL_CHANNELS {
-		// Set valid rank CKE
+		/* Set valid rank CKE */
 		reg = 0;
-		reg = (reg & ~0xf) | ctrl->rankmap[channel];
+		reg = (reg & ~0x0f) | ctrl->rankmap[channel];
 		addr = MC_INIT_STATE_ch(channel);
 		MCHBAR32(addr) = reg;
 
-		// Wait 10ns for ranks to settle
-		//udelay(0.01);
+		/* Wait 10ns for ranks to settle */
+		// udelay(0.01);
 
 		reg = (reg & ~0xf0) | (ctrl->rankmap[channel] << 4);
 		MCHBAR32(addr) = reg;
 
-		// Write reset using a NOP
+		/* Write reset using a NOP */
 		write_reset(ctrl);
 	}
 
-	/* mrs commands. */
+	/* MRS commands */
 	dram_mrscommands(ctrl);
 
 	printram("CP5c\n");
diff --git a/src/northbridge/intel/sandybridge/raminit_common.h b/src/northbridge/intel/sandybridge/raminit_common.h
index b1abf5e..0735ceaa 100644
--- a/src/northbridge/intel/sandybridge/raminit_common.h
+++ b/src/northbridge/intel/sandybridge/raminit_common.h
@@ -17,29 +17,29 @@
 
 #include <stdint.h>
 
-#define BASEFREQ 133
-#define tDLLK 512
+#define BASEFREQ	133
+#define tDLLK		512
 
-#define IS_SANDY_CPU(x) ((x & 0xffff0) == 0x206a0)
-#define IS_SANDY_CPU_C(x) ((x & 0xf) == 4)
+#define IS_SANDY_CPU(x)    ((x & 0xffff0) == 0x206a0)
+#define IS_SANDY_CPU_C(x)  ((x & 0xf) == 4)
 #define IS_SANDY_CPU_D0(x) ((x & 0xf) == 5)
 #define IS_SANDY_CPU_D1(x) ((x & 0xf) == 6)
 #define IS_SANDY_CPU_D2(x) ((x & 0xf) == 7)
 
-#define IS_IVY_CPU(x) ((x & 0xffff0) == 0x306a0)
+#define IS_IVY_CPU(x)   ((x & 0xffff0) == 0x306a0)
 #define IS_IVY_CPU_C(x) ((x & 0xf) == 4)
 #define IS_IVY_CPU_K(x) ((x & 0xf) == 5)
 #define IS_IVY_CPU_D(x) ((x & 0xf) == 6)
 #define IS_IVY_CPU_E(x) ((x & 0xf) >= 8)
 
-#define NUM_CHANNELS 2
-#define NUM_SLOTRANKS 4
-#define NUM_SLOTS 2
-#define NUM_LANES 8
+#define NUM_CHANNELS	2
+#define NUM_SLOTRANKS	4
+#define NUM_SLOTS	2
+#define NUM_LANES	8
 
 /* FIXME: Vendor BIOS uses 64 but our algorithms are less
    performant and even 1 seems to be enough in practice.  */
-#define NUM_PATTERNS 4
+#define NUM_PATTERNS	4
 
 typedef struct odtmap_st {
 	u16 rttwr;
@@ -51,24 +51,24 @@
 } dimm_info;
 
 struct ram_rank_timings {
-	/* ROUNDT_LAT register. One byte per slotrank. */
+	/* ROUNDT_LAT register: One byte per slotrank */
 	u8 roundtrip_latency;
 
-	/* IO_LATENCY register. One nibble per slotrank. */
+	/* IO_LATENCY register: One nibble per slotrank */
 	u8 io_latency;
 
-	/* Phase interpolator coding for command and control. */
+	/* Phase interpolator coding for command and control */
 	int pi_coding;
 
 	struct ram_lane_timings {
-		/* lane register offset 0x10.  */
-		u16 timA;	/* bits 0 - 5, bits 16 - 18 */
-		u8 rising;	/* bits 8 - 14 */
-		u8 falling;	/* bits 20 - 26.  */
+		/* Lane register offset 0x10 */
+		u16 timA;	/* bits  0 -  5, bits 16 - 18 */
+		u8 rising;	/* bits  8 - 14 */
+		u8 falling;	/* bits 20 - 26 */
 
-		/* lane register offset 0x20.  */
-		int timC;	/* bit 0 - 5, 19.  */
-		u16 timB;	/* bits 8 - 13, 15 - 17.  */
+		/* Lane register offset 0x20 */
+		int timC;	/* bits 0 -  5, 19 */
+		u16 timB;	/* bits 8 - 13, 15 - 17 */
 	} lanes[NUM_LANES];
 };
 
@@ -82,7 +82,7 @@
 	u8 base_freq;
 
 	u16 cas_supported;
-	/* tLatencies are in units of ns, scaled by x256 */
+	/* Latencies are in units of ns, scaled by x256 */
 	u32 tCK;
 	u32 tAA;
 	u32 tWR;
@@ -97,8 +97,8 @@
 	u32 tCWL;
 	u32 tCMD;
 	/* Latencies in terms of clock cycles
-	 * They are saved separately as they are needed for DRAM MRS commands */
-	u8 CAS;			/* CAS read latency */
+	   They are saved separately as they are needed for DRAM MRS commands */
+	u8 CAS;			/* CAS read  latency */
 	u8 CWL;			/* CAS write latency */
 
 	u32 tREFI;
@@ -110,7 +110,7 @@
 	u32 tXP;
 	u32 tAONPD;
 
-	/* Bits [0..11] of PM_DLL_CONFIG: Master DLL wakeup delay timer. */
+	/* Bits [0..11] of PM_DLL_CONFIG: Master DLL wakeup delay timer */
 	u16 mdll_wake_delay;
 
 	u8 rankmap[NUM_CHANNELS];
@@ -135,7 +135,6 @@
 	dimm_info info;
 } ramctr_timing;
 
-#define HOST_BRIDGE	PCI_DEV(0, 0, 0)
 #define SOUTHBRIDGE	PCI_DEV(0, 0x1f, 0)
 
 #define FOR_ALL_LANES for (lane = 0; lane < NUM_LANES; lane++)
@@ -149,8 +148,8 @@
 #define MAX_CAS 18
 #define MIN_CAS 4
 
-#define MAKE_ERR ((channel<<16)|(slotrank<<8)|1)
-#define GET_ERR_CHANNEL(x) (x>>16)
+#define MAKE_ERR		((channel << 16) | (slotrank << 8) | 1)
+#define GET_ERR_CHANNEL(x)	(x >> 16)
 
 u8 get_CWL(u32 tCK);
 void dram_mrscommands(ramctr_timing *ctrl);
@@ -174,17 +173,14 @@
 void write_controller_mr(ramctr_timing *ctrl);
 int channel_test(ramctr_timing *ctrl);
 void set_scrambling_seed(ramctr_timing *ctrl);
-void set_4f8c(void);
+void set_wmm_behavior(void);
 void prepare_training(ramctr_timing *ctrl);
-void set_4008c(ramctr_timing *ctrl);
+void set_read_write_timings(ramctr_timing *ctrl);
 void set_normal_operation(ramctr_timing *ctrl);
 void final_registers(ramctr_timing *ctrl);
 void restore_timings(ramctr_timing *ctrl);
 
-int try_init_dram_ddr3_sandy(ramctr_timing *ctrl, int fast_boot,
-		int s3_resume, int me_uma_size);
-
-int try_init_dram_ddr3_ivy(ramctr_timing *ctrl, int fast_boot,
-		int s3_resume, int me_uma_size);
+int try_init_dram_ddr3_snb(ramctr_timing *ctrl, int fast_boot, int s3_resume, int me_uma_size);
+int try_init_dram_ddr3_ivb(ramctr_timing *ctrl, int fast_boot, int s3_resume, int me_uma_size);
 
 #endif
diff --git a/src/northbridge/intel/sandybridge/raminit_ivy.c b/src/northbridge/intel/sandybridge/raminit_ivy.c
index a992d9c..06d2382 100644
--- a/src/northbridge/intel/sandybridge/raminit_ivy.c
+++ b/src/northbridge/intel/sandybridge/raminit_ivy.c
@@ -19,12 +19,10 @@
 #include "raminit_native.h"
 #include "raminit_common.h"
 
-/* Frequency multiplier.  */
+/* Frequency multiplier */
 static u32 get_FRQ(u32 tCK, u8 base_freq)
 {
-	u32 FRQ;
-
-	FRQ = 256000 / (tCK * base_freq);
+	const u32 FRQ = 256000 / (tCK * base_freq);
 
 	if (base_freq == 100) {
 		if (FRQ > 12)
@@ -41,249 +39,181 @@
 	return FRQ;
 }
 
+/* Get REFI based on MC frequency, tREFI = 7.8usec */
 static u32 get_REFI(u32 tCK, u8 base_freq)
 {
-	u32 refi;
-
 	if (base_freq == 100) {
-		/* Get REFI based on MCU frequency using the following rule:
-		 * tREFI = 7.8usec
-		 *         _________________________________________
-		 * FRQ  : | 7    | 8    | 9    | 10   | 11   | 12   |
-		 * REFI : | 5460 | 6240 | 7020 | 7800 | 8580 | 9360 |
-		 */
-		static const u32 frq_xs_map[] =
-		    { 5460, 6240, 7020, 7800, 8580, 9360 };
-		refi = frq_xs_map[get_FRQ(tCK, 100) - 7];
-	} else {
-		/* Get REFI based on MCU frequency using the following rule:
-		 * tREFI = 7.8usec
-		 *        ________________________________________________________
-		 * FRQ : | 3    | 4    | 5    | 6    | 7    | 8    | 9    | 10    |
-		 * REFI: | 3120 | 4160 | 5200 | 6240 | 7280 | 8320 | 9360 | 10400 |
-		 */
-		static const u32 frq_refi_map[] =
-		    { 3120, 4160, 5200, 6240, 7280, 8320, 9360, 10400 };
-		refi = frq_refi_map[get_FRQ(tCK, 133) - 3];
-	}
+		static const u32 frq_xs_map[] = {
+		/* FRQ:    7,     8,     9,    10,    11,    12, */
+			5460,  6240,  7020,  7800,  8580,  9360,
+		};
+		return frq_xs_map[get_FRQ(tCK, 100) - 7];
 
-	return refi;
+	} else {
+		static const u32 frq_refi_map[] = {
+		/* FRQ:    3,     4,     5,     6,     7,     8,     9,    10, */
+			3120,  4160,  5200,  6240,  7280,  8320,  9360, 10400,
+		};
+		return frq_refi_map[get_FRQ(tCK, 133) - 3];
+	}
 }
 
+/* Get XSOffset based on MC frequency, tXS-Offset: tXS = tRFC + 10ns */
 static u8 get_XSOffset(u32 tCK, u8 base_freq)
 {
-	u8 xsoffset;
-
 	if (base_freq == 100) {
-		/* Get XSOffset based on MCU frequency using the following rule:
-		 * tXS-offset: tXS = tRFC+10ns.
-		 *             _____________________________
-		 * FRQ      : | 7  | 8  | 9  | 10 | 11 | 12 |
-		 * XSOffset : | 7  | 8  | 9  | 10 | 11 | 12 |
-		 */
-		static const u8 frq_xs_map[] = { 7, 8, 9, 10, 11, 12 };
-		xsoffset = frq_xs_map[get_FRQ(tCK, 100) - 7];
-	} else {
-		/* Get XSOffset based on MCU frequency using the following rule:
-		 *             ___________________________________
-		 * FRQ      : | 3 | 4 | 5 | 6 | 7  | 8  | 9  | 10 |
-		 * XSOffset : | 4 | 6 | 7 | 8 | 10 | 11 | 12 | 14 |
-		 */
-		static const u8 frq_xs_map[] = { 4, 6, 7, 8, 10, 11, 12, 14 };
-		xsoffset = frq_xs_map[get_FRQ(tCK, 133) - 3];
-	}
+		static const u8 frq_xs_map[] = {
+		/* FRQ: 7,  8,  9, 10, 11, 12, */
+			7,  8,  9, 10, 11, 12,
+		};
+		return frq_xs_map[get_FRQ(tCK, 100) - 7];
 
-	return xsoffset;
+	} else {
+		static const u8 frq_xs_map[] = {
+		/* FRQ: 3,  4,  5,  6,  7,  8,  9, 10, */
+			4,  6,  7,  8, 10, 11, 12, 14,
+		};
+		return frq_xs_map[get_FRQ(tCK, 133) - 3];
+	}
 }
 
+/* Get MOD based on MC frequency */
 static u8 get_MOD(u32 tCK, u8 base_freq)
 {
-	u8 mod;
-
 	if (base_freq == 100) {
-		/* Get MOD based on MCU frequency using the following rule:
-		 *        _____________________________
-		 * FRQ : | 7  | 8  | 9  | 10 | 11 | 12 |
-		 * MOD : | 12 | 12 | 14 | 15 | 17 | 18 |
-		 */
+		static const u8 frq_mod_map[] = {
+		/* FRQ:  7,  8,  9, 10, 11, 12, */
+			12, 12, 14, 15, 17, 18,
+		};
+		return frq_mod_map[get_FRQ(tCK, 100) - 7];
 
-		static const u8 frq_mod_map[] = { 12, 12, 14, 15, 17, 18 };
-		mod = frq_mod_map[get_FRQ(tCK, 100) - 7];
 	} else {
-		/* Get MOD based on MCU frequency using the following rule:
-		 *        _______________________________________
-		 * FRQ : | 3  | 4  | 5  | 6  | 7  | 8  | 9  | 10 |
-		 * MOD : | 12 | 12 | 12 | 12 | 15 | 16 | 18 | 20 |
-		 */
-
-		static const u8 frq_mod_map[] = { 12, 12, 12, 12, 15, 16, 18, 20 };
-		mod = frq_mod_map[get_FRQ(tCK, 133) - 3];
+		static const u8 frq_mod_map[] = {
+		/* FRQ:  3,  4,  5,  6,  7,  8,  9, 10, */
+			12, 12, 12, 12, 15, 16, 18, 20,
+		};
+		return frq_mod_map[get_FRQ(tCK, 133) - 3];
 	}
-	return mod;
 }
 
+/* Get Write Leveling Output delay based on MC frequency */
 static u8 get_WLO(u32 tCK, u8 base_freq)
 {
-	u8 wlo;
-
 	if (base_freq == 100) {
-		/* Get WLO based on MCU frequency using the following rule:
-		 * Write leveling output delay
-		 *        _____________________________
-		 * FRQ : | 7  | 8  | 9  | 10 | 11 | 12 |
-		 * MOD : | 6  | 6  | 7  | 8  | 9  | 9  |
-		 */
+		static const u8 frq_wlo_map[] = {
+		/* FRQ: 7,  8,  9, 10, 11, 12, */
+			6,  6,  7,  8,  9,  9,
+		};
+		return frq_wlo_map[get_FRQ(tCK, 100) - 7];
 
-		static const u8 frq_wlo_map[] = { 6, 6, 7, 8, 9, 9 };
-		wlo = frq_wlo_map[get_FRQ(tCK, 100) - 7];
 	} else {
-		/* Get WLO based on MCU frequency using the following rule:
-		 * Write leveling output delay
-		 *        ________________________________
-		 * FRQ : | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
-		 * WLO : | 4 | 5 | 6 | 6 | 8 | 8 | 9 | 10 |
-		 */
-		static const u8 frq_wlo_map[] = { 4, 5, 6, 6, 8, 8, 9, 10 };
-		wlo = frq_wlo_map[get_FRQ(tCK, 133) - 3];
+		static const u8 frq_wlo_map[] = {
+		/* FRQ: 3,  4,  5,  6,  7,  8,  9, 10, */
+			4,  5,  6,  6,  8,  8,  9, 10,
+		};
+		return frq_wlo_map[get_FRQ(tCK, 133) - 3];
 	}
-
-	return wlo;
 }
 
+/* Get CKE based on MC frequency */
 static u8 get_CKE(u32 tCK, u8 base_freq)
 {
-	u8 cke;
-
 	if (base_freq == 100) {
-		/* Get CKE based on MCU frequency using the following rule:
-		 *        _____________________________
-		 * FRQ : | 7  | 8  | 9  | 10 | 11 | 12 |
-		 * MOD : | 4  | 4  | 5  | 5  | 6  | 6  |
-		 */
+		static const u8 frq_cke_map[] = {
+		/* FRQ: 7,  8,  9, 10, 11, 12, */
+			4,  4,  5,  5,  6,  6,
+		};
+		return frq_cke_map[get_FRQ(tCK, 100) - 7];
 
-		static const u8 frq_cke_map[] = { 4, 4, 5, 5, 6, 6 };
-		cke = frq_cke_map[get_FRQ(tCK, 100) - 7];
 	} else {
-		/* Get CKE based on MCU frequency using the following rule:
-		 *        ________________________________
-		 * FRQ : | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
-		 * WLO : | 3 | 3 | 4 | 4 | 5 | 6 | 6 | 7  |
-		 */
-		static const u8 frq_cke_map[] = { 3, 3, 4, 4, 5, 6, 6, 7 };
-		cke = frq_cke_map[get_FRQ(tCK, 133) - 3];
+		static const u8 frq_cke_map[] = {
+		/* FRQ: 3,  4,  5,  6,  7,  8,  9, 10, */
+			3,  3,  4,  4,  5,  6,  6,  7,
+		};
+		return frq_cke_map[get_FRQ(tCK, 133) - 3];
 	}
-
-	return cke;
 }
 
+/* Get XPDLL based on MC frequency */
 static u8 get_XPDLL(u32 tCK, u8 base_freq)
 {
-	u8 xpdll;
-
 	if (base_freq == 100) {
-		/* Get XPDLL based on MCU frequency using the following rule:
-		 *          _____________________________
-		 * FRQ   : | 7  | 8  | 9  | 10 | 11 | 12 |
-		 * XPDLL : | 17 | 20 | 22 | 24 | 27 | 32 |
-		 */
+		static const u8 frq_xpdll_map[] = {
+		/* FRQ:  7,  8,  9, 10, 11, 12, */
+			17, 20, 22, 24, 27, 32,
+		};
+		return frq_xpdll_map[get_FRQ(tCK, 100) - 7];
 
-		static const u8 frq_xpdll_map[] = { 17, 20, 22, 24, 27, 32 };
-		xpdll = frq_xpdll_map[get_FRQ(tCK, 100) - 7];
 	} else {
-		/* Get XPDLL based on MCU frequency using the following rule:
-		 *          _______________________________________
-		 * FRQ   : | 3  | 4  | 5  | 6  | 7  | 8  | 9  | 10 |
-		 * XPDLL : | 10 | 13 | 16 | 20 | 23 | 26 | 29 | 32 |
-		 */
-		static const u8 frq_xpdll_map[] = { 10, 13, 16, 20, 23, 26, 29, 32 };
-		xpdll = frq_xpdll_map[get_FRQ(tCK, 133) - 3];
+		static const u8 frq_xpdll_map[] = {
+		/* FRQ:  3,  4,  5,  6,  7,  8,  9, 10, */
+			10, 13, 16, 20, 23, 26, 29, 32,
+		};
+		return frq_xpdll_map[get_FRQ(tCK, 133) - 3];
 	}
-
-	return xpdll;
 }
 
+/* Get XP based on MC frequency */
 static u8 get_XP(u32 tCK, u8 base_freq)
 {
-	u8 xp;
-
 	if (base_freq == 100) {
-		/* Get XP based on MCU frequency using the following rule:
-		 *        _____________________________
-		 * FRQ : | 7  | 8  | 9  | 10 | 11 | 12 |
-		 * XP  : | 5  | 5  | 6  | 6  | 7  | 8  |
-		 */
-
-		static const u8 frq_xp_map[] = { 5, 5, 6, 6, 7, 8 };
-		xp = frq_xp_map[get_FRQ(tCK, 100) - 7];
+		static const u8 frq_xp_map[] = {
+		/* FRQ: 7,  8,  9, 10, 11, 12, */
+			5,  5,  6,  6,  7,  8,
+		};
+		return frq_xp_map[get_FRQ(tCK, 100) - 7];
 	} else {
-		/* Get XP based on MCU frequency using the following rule:
-		 *        _______________________________________
-		 * FRQ : | 3  | 4  | 5  | 6  | 7  | 8  | 9  | 10 |
-		 * XP  : | 3  | 4  | 4  | 5  | 6  | 7  | 8  | 8  |
-		 */
-		static const u8 frq_xp_map[] = { 3, 4, 4, 5, 6, 7, 8, 8 };
-		xp = frq_xp_map[get_FRQ(tCK, 133) - 3];
-	}
 
-	return xp;
+		static const u8 frq_xp_map[] = {
+		/* FRQ: 3,  4,  5,  6,  7,  8,  9, 10, */
+			3, 4, 4, 5, 6, 7, 8, 8
+		};
+		return frq_xp_map[get_FRQ(tCK, 133) - 3];
+	}
 }
 
+/* Get AONPD based on MC frequency */
 static u8 get_AONPD(u32 tCK, u8 base_freq)
 {
-	u8 aonpd;
-
 	if (base_freq == 100) {
-		/* Get AONPD based on MCU frequency using the following rule:
-		 *          _____________________________
-		 * FRQ   : | 7  | 8  | 9  | 10 | 11 | 12 |
-		 * AONPD : | 6  | 8  | 8  | 9  | 10 | 11 |
-		 */
+		static const u8 frq_aonpd_map[] = {
+		/* FRQ: 7,  8,  9, 10, 11, 12, */
+			6,  8,  8,  9, 10, 11,
+		};
+		return frq_aonpd_map[get_FRQ(tCK, 100) - 7];
 
-		static const u8 frq_aonpd_map[] = { 6, 8, 8, 9, 10, 11 };
-		aonpd = frq_aonpd_map[get_FRQ(tCK, 100) - 7];
 	} else {
-		/* Get AONPD based on MCU frequency using the following rule:
-		 *          _______________________________________
-		 * FRQ   : | 3  | 4  | 5  | 6  | 7  | 8  | 9  | 10 |
-		 * AONPD : | 4  | 5  | 6  | 8  | 8  | 10 | 11 | 12 |
-		 */
-		static const u8 frq_aonpd_map[] = { 4, 5, 6, 8, 8, 10, 11, 12 };
-		aonpd = frq_aonpd_map[get_FRQ(tCK, 133) - 3];
+		static const u8 frq_aonpd_map[] = {
+		/* FRQ: 3,  4,  5,  6,  7,  8,  9, 10, */
+			4,  5,  6,  8,  8, 10, 11, 12,
+		};
+		return frq_aonpd_map[get_FRQ(tCK, 133) - 3];
 	}
-
-	return aonpd;
 }
 
+/* Get COMP2 based on MC frequency */
 static u32 get_COMP2(u32 tCK, u8 base_freq)
 {
-	u32 comp2;
-
 	if (base_freq == 100) {
-		/* Get COMP2 based on MCU frequency using the following rule:
-		 *          ______________________________________________________________
-		 * FRQ  : | 7        | 8        | 9        | 10       | 11      | 12      |
-		 * COMP : | CA8C264  | C6671E4  | C6671E4  | C446964  | C235924 | C235924 |
-		 */
-
-		static const u32 frq_comp2_map[] = { 0xCA8C264, 0xC6671E4, 0xC6671E4, 0xC446964, 0xC235924, 0xC235924 };
-		comp2 = frq_comp2_map[get_FRQ(tCK, 100) - 7];
-	} else {
-		/* Get COMP2 based on MCU frequency using the following rule:
-		 *        ________________________________________________________________________________
-		 * FRQ  : | 3       | 4       | 5       | 6       | 7       | 8       | 9       | 10      |
-		 * COMP : | D6FF5E4 | CEBDB64 | CA8C264 | C6671E4 | C446964 | C235924 | C235924 | C235924 |
-		 */
-		static const u32 frq_comp2_map[] = { 0xD6FF5E4, 0xCEBDB64, 0xCA8C264,
-			0xC6671E4, 0xC446964, 0xC235924, 0xC235924, 0xC235924
+		static const u32 frq_comp2_map[] = {
+		// FRQ:          7,          8,          9,         10,         11,         12,
+			0x0CA8C264, 0x0C6671E4, 0x0C6671E4, 0x0C446964, 0x0C235924, 0x0C235924,
 		};
-		comp2 = frq_comp2_map[get_FRQ(tCK, 133) - 3];
-	}
+		return frq_comp2_map[get_FRQ(tCK, 100) - 7];
 
-	return comp2;
+	} else {
+		static const u32 frq_comp2_map[] = {
+		/* FRQ:          3,          4,          5,          6, */
+			0x0D6FF5E4, 0x0CEBDB64, 0x0CA8C264, 0x0C6671E4,
+
+		/* FRQ:          7,          8,          9,         10, */
+			0x0C446964, 0x0C235924, 0x0C235924, 0x0C235924,
+		};
+		return frq_comp2_map[get_FRQ(tCK, 133) - 3];
+	}
 }
 
-static void ivb_normalize_tclk(ramctr_timing *ctrl,
-			bool ref_100mhz_support)
+static void ivb_normalize_tclk(ramctr_timing *ctrl, bool ref_100mhz_support)
 {
 	if (ctrl->tCK <= TCK_1200MHZ) {
 		ctrl->tCK = TCK_1200MHZ;
@@ -324,7 +254,7 @@
 	}
 
 	if (!ref_100mhz_support && ctrl->base_freq == 100) {
-		/* Skip unsupported frequency. */
+		/* Skip unsupported frequency */
 		ctrl->tCK++;
 		ivb_normalize_tclk(ctrl, ref_100mhz_support);
 	}
@@ -333,29 +263,31 @@
 static void find_cas_tck(ramctr_timing *ctrl)
 {
 	u8 val;
-	u32 val32;
 	u32 reg32;
 	u8 ref_100mhz_support;
 
-	/* 100 Mhz reference clock supported */
-	reg32 = pci_read_config32(PCI_DEV(0, 0, 0), CAPID0_B);
+	/* 100 MHz reference clock supported */
+	reg32 = pci_read_config32(HOST_BRIDGE, CAPID0_B);
 	ref_100mhz_support = !!((reg32 >> 21) & 0x7);
-	printk(BIOS_DEBUG, "100MHz reference clock support: %s\n",
-		   ref_100mhz_support ? "yes" : "no");
+	printk(BIOS_DEBUG, "100MHz reference clock support: %s\n", ref_100mhz_support ? "yes"
+										      : "no");
 
 	/* Find CAS latency */
 	while (1) {
-		/* Normalising tCK before computing clock could potentially
-		 * results in lower selected CAS, which is desired.
+		/*
+		 * Normalising tCK before computing clock could potentially
+		 * result in a lower selected CAS, which is desired.
 		 */
 		ivb_normalize_tclk(ctrl, ref_100mhz_support);
 		if (!(ctrl->tCK))
 			die("Couldn't find compatible clock / CAS settings\n");
+
 		val = DIV_ROUND_UP(ctrl->tAA, ctrl->tCK);
 		printk(BIOS_DEBUG, "Trying CAS %u, tCK %u.\n", val, ctrl->tCK);
 		for (; val <= MAX_CAS; val++)
 			if ((ctrl->cas_supported >> (val - MIN_CAS)) & 1)
 				break;
+
 		if (val == (MAX_CAS + 1)) {
 			ctrl->tCK++;
 			continue;
@@ -365,9 +297,7 @@
 		}
 	}
 
-	val32 = NS2MHZ_DIV256 / ctrl->tCK;
-	printk(BIOS_DEBUG, "Selected DRAM frequency: %u MHz\n", val32);
-
+	printk(BIOS_DEBUG, "Selected DRAM frequency: %u MHz\n", NS2MHZ_DIV256 / ctrl->tCK);
 	printk(BIOS_DEBUG, "Selected CAS latency   : %uT\n", val);
 	ctrl->CAS = val;
 }
@@ -375,9 +305,10 @@
 
 static void dram_timing(ramctr_timing *ctrl)
 {
-	/* Maximum supported DDR3 frequency is 1400MHz (DDR3 2800).
-	 * We cap it at 1200Mhz (DDR3 2400).
-	 * Then, align it to the closest JEDEC standard frequency */
+	/*
+	 * On Ivy Bridge, the maximum supported DDR3 frequency is 1400MHz (DDR3 2800).
+	 * Cap it at 1200MHz (DDR3 2400), and align it to the closest JEDEC standard frequency.
+	 */
 	if (ctrl->tCK == TCK_1200MHZ) {
 		ctrl->edge_offset[0] = 18; //XXX: guessed
 		ctrl->edge_offset[1] = 8;
@@ -386,6 +317,7 @@
 		ctrl->timC_offset[1] = 8;
 		ctrl->timC_offset[2] = 8;
 		ctrl->pi_coding_threshold = 10;
+
 	} else if (ctrl->tCK == TCK_1100MHZ) {
 		ctrl->edge_offset[0] = 17; //XXX: guessed
 		ctrl->edge_offset[1] = 7;
@@ -394,6 +326,7 @@
 		ctrl->timC_offset[1] = 7;
 		ctrl->timC_offset[2] = 7;
 		ctrl->pi_coding_threshold = 13;
+
 	} else if (ctrl->tCK == TCK_1066MHZ) {
 		ctrl->edge_offset[0] = 16;
 		ctrl->edge_offset[1] = 7;
@@ -402,6 +335,7 @@
 		ctrl->timC_offset[1] = 7;
 		ctrl->timC_offset[2] = 7;
 		ctrl->pi_coding_threshold = 13;
+
 	} else if (ctrl->tCK == TCK_1000MHZ) {
 		ctrl->edge_offset[0] = 15; //XXX: guessed
 		ctrl->edge_offset[1] = 6;
@@ -410,6 +344,7 @@
 		ctrl->timC_offset[1] = 6;
 		ctrl->timC_offset[2] = 6;
 		ctrl->pi_coding_threshold = 13;
+
 	} else if (ctrl->tCK == TCK_933MHZ) {
 		ctrl->edge_offset[0] = 14;
 		ctrl->edge_offset[1] = 6;
@@ -418,6 +353,7 @@
 		ctrl->timC_offset[1] = 6;
 		ctrl->timC_offset[2] = 6;
 		ctrl->pi_coding_threshold = 15;
+
 	} else if (ctrl->tCK == TCK_900MHZ) {
 		ctrl->edge_offset[0] = 14; //XXX: guessed
 		ctrl->edge_offset[1] = 6;
@@ -426,6 +362,7 @@
 		ctrl->timC_offset[1] = 6;
 		ctrl->timC_offset[2] = 6;
 		ctrl->pi_coding_threshold = 12;
+
 	} else if (ctrl->tCK == TCK_800MHZ) {
 		ctrl->edge_offset[0] = 13;
 		ctrl->edge_offset[1] = 5;
@@ -434,6 +371,7 @@
 		ctrl->timC_offset[1] = 5;
 		ctrl->timC_offset[2] = 5;
 		ctrl->pi_coding_threshold = 15;
+
 	} else if (ctrl->tCK == TCK_700MHZ) {
 		ctrl->edge_offset[0] = 13; //XXX: guessed
 		ctrl->edge_offset[1] = 5;
@@ -442,6 +380,7 @@
 		ctrl->timC_offset[1] = 5;
 		ctrl->timC_offset[2] = 5;
 		ctrl->pi_coding_threshold = 16;
+
 	} else if (ctrl->tCK == TCK_666MHZ) {
 		ctrl->edge_offset[0] = 10;
 		ctrl->edge_offset[1] = 4;
@@ -450,6 +389,7 @@
 		ctrl->timC_offset[1] = 4;
 		ctrl->timC_offset[2] = 4;
 		ctrl->pi_coding_threshold = 16;
+
 	} else if (ctrl->tCK == TCK_533MHZ) {
 		ctrl->edge_offset[0] = 8;
 		ctrl->edge_offset[1] = 3;
@@ -458,6 +398,7 @@
 		ctrl->timC_offset[1] = 3;
 		ctrl->timC_offset[2] = 3;
 		ctrl->pi_coding_threshold = 17;
+
 	} else  { /* TCK_400MHZ */
 		ctrl->edge_offset[0] = 6;
 		ctrl->edge_offset[1] = 2;
@@ -478,13 +419,14 @@
 		ctrl->CWL = DIV_ROUND_UP(ctrl->tCWL, ctrl->tCK);
 	else
 		ctrl->CWL = get_CWL(ctrl->tCK);
+
 	printk(BIOS_DEBUG, "Selected CWL latency   : %uT\n", ctrl->CWL);
 
 	/* Find tRCD */
 	ctrl->tRCD = DIV_ROUND_UP(ctrl->tRCD, ctrl->tCK);
 	printk(BIOS_DEBUG, "Selected tRCD          : %uT\n", ctrl->tRCD);
 
-	ctrl->tRP = DIV_ROUND_UP(ctrl->tRP, ctrl->tCK);
+	ctrl->tRP  = DIV_ROUND_UP(ctrl->tRP,  ctrl->tCK);
 	printk(BIOS_DEBUG, "Selected tRP           : %uT\n", ctrl->tRP);
 
 	/* Find tRAS */
@@ -492,7 +434,7 @@
 	printk(BIOS_DEBUG, "Selected tRAS          : %uT\n", ctrl->tRAS);
 
 	/* Find tWR */
-	ctrl->tWR = DIV_ROUND_UP(ctrl->tWR, ctrl->tCK);
+	ctrl->tWR  = DIV_ROUND_UP(ctrl->tWR,  ctrl->tCK);
 	printk(BIOS_DEBUG, "Selected tWR           : %uT\n", ctrl->tWR);
 
 	/* Find tFAW */
@@ -515,21 +457,22 @@
 	ctrl->tRFC = DIV_ROUND_UP(ctrl->tRFC, ctrl->tCK);
 	printk(BIOS_DEBUG, "Selected tRFC          : %uT\n", ctrl->tRFC);
 
-	ctrl->tREFI = get_REFI(ctrl->tCK, ctrl->base_freq);
-	ctrl->tMOD = get_MOD(ctrl->tCK, ctrl->base_freq);
+	ctrl->tREFI     =     get_REFI(ctrl->tCK, ctrl->base_freq);
+	ctrl->tMOD      =      get_MOD(ctrl->tCK, ctrl->base_freq);
 	ctrl->tXSOffset = get_XSOffset(ctrl->tCK, ctrl->base_freq);
-	ctrl->tWLO = get_WLO(ctrl->tCK, ctrl->base_freq);
-	ctrl->tCKE = get_CKE(ctrl->tCK, ctrl->base_freq);
-	ctrl->tXPDLL = get_XPDLL(ctrl->tCK, ctrl->base_freq);
-	ctrl->tXP = get_XP(ctrl->tCK, ctrl->base_freq);
-	ctrl->tAONPD = get_AONPD(ctrl->tCK, ctrl->base_freq);
+	ctrl->tWLO      =      get_WLO(ctrl->tCK, ctrl->base_freq);
+	ctrl->tCKE      =      get_CKE(ctrl->tCK, ctrl->base_freq);
+	ctrl->tXPDLL    =    get_XPDLL(ctrl->tCK, ctrl->base_freq);
+	ctrl->tXP       =       get_XP(ctrl->tCK, ctrl->base_freq);
+	ctrl->tAONPD    =    get_AONPD(ctrl->tCK, ctrl->base_freq);
 }
 
 static void dram_freq(ramctr_timing *ctrl)
 {
 	if (ctrl->tCK > TCK_400MHZ) {
-		printk (BIOS_ERR, "DRAM frequency is under lowest supported "
-				"frequency (400 MHz). Increasing to 400 MHz as last resort");
+		printk(BIOS_ERR,
+			"DRAM frequency is under lowest supported frequency (400 MHz). "
+			"Increasing to 400 MHz as last resort");
 		ctrl->tCK = TCK_400MHZ;
 	}
 
@@ -540,11 +483,12 @@
 		/* Step 1 - Set target PCU frequency */
 		find_cas_tck(ctrl);
 
-		/* Frequency multiplier.  */
-		u32 FRQ = get_FRQ(ctrl->tCK, ctrl->base_freq);
+		/* Frequency multiplier */
+		const u32 FRQ = get_FRQ(ctrl->tCK, ctrl->base_freq);
 
-		/* The PLL will never lock if the required frequency is
-		 * already set. Exit early to prevent a system hang.
+		/*
+		 * The PLL will never lock if the required frequency is already set.
+		 * Exit early to prevent a system hang.
 		 */
 		reg1 = MCHBAR32(MC_BIOS_DATA);
 		val2 = (u8) reg1;
@@ -554,10 +498,11 @@
 		/* Step 2 - Select frequency in the MCU */
 		reg1 = FRQ;
 		if (ctrl->base_freq == 100)
-			reg1 |= 0x100; /* Enable 100Mhz REF clock */
-		reg1 |= 0x80000000;	// set running bit
+			reg1 |= 0x100;	/* Enable 100Mhz REF clock */
+
+		reg1 |= 0x80000000;	/* set running bit */
 		MCHBAR32(MC_BIOS_REQ) = reg1;
-		int i=0;
+		int i = 0;
 		printk(BIOS_DEBUG, "PLL busy... ");
 		while (reg1 & 0x80000000) {
 			udelay(10);
@@ -581,61 +526,57 @@
 
 static void dram_ioregs(ramctr_timing *ctrl)
 {
-	u32 reg, comp2;
+	u32 reg;
 
 	int channel;
 
-	// IO clock
+	/* IO clock */
 	FOR_ALL_CHANNELS {
 		MCHBAR32(GDCRCLKRANKSUSED_ch(channel)) = ctrl->rankmap[channel];
 	}
 
-	// IO command
+	/* IO command */
 	FOR_ALL_CHANNELS {
 		MCHBAR32(GDCRCTLRANKSUSED_ch(channel)) = ctrl->rankmap[channel];
 	}
 
-	// IO control
+	/* IO control */
 	FOR_ALL_POPULATED_CHANNELS {
 		program_timings(ctrl, channel);
 	}
 
-	// Rcomp
+	/* Perform RCOMP */
 	printram("RCOMP...");
-	reg = 0;
-	while (reg == 0) {
-		reg = MCHBAR32(RCOMP_TIMER) & 0x10000;
-	}
+	while (!(MCHBAR32(RCOMP_TIMER) & (1 << 16)))
+		;
+
 	printram("done\n");
 
-	// Set comp2
-	comp2 = get_COMP2(ctrl->tCK, ctrl->base_freq);
-	MCHBAR32(CRCOMPOFST2) = comp2;
+	/* Set COMP2 */
+	MCHBAR32(CRCOMPOFST2) = get_COMP2(ctrl->tCK, ctrl->base_freq);
 	printram("COMP2 done\n");
 
-	// Set comp1
+	/* Set COMP1 */
 	FOR_ALL_POPULATED_CHANNELS {
-		reg = MCHBAR32(CRCOMPOFST1_ch(channel));	//ch0
-		reg = (reg & ~0xe00) | (1 << 9);	//odt
-		reg = (reg & ~0xe00000) | (1 << 21);	//clk drive up
-		reg = (reg & ~0x38000000) | (1 << 27);	//ctl drive up
+		reg = MCHBAR32(CRCOMPOFST1_ch(channel));
+		reg = (reg & ~0x00000e00) | (1 <<  9);	/* ODT */
+		reg = (reg & ~0x00e00000) | (1 << 21);	/* clk drive up */
+		reg = (reg & ~0x38000000) | (1 << 27);	/* ctl drive up */
 		MCHBAR32(CRCOMPOFST1_ch(channel)) = reg;
 	}
 	printram("COMP1 done\n");
 
 	printram("FORCE RCOMP and wait 20us...");
-	MCHBAR32(M_COMP) |= 0x100;
+	MCHBAR32(M_COMP) |= (1 << 8);
 	udelay(20);
 	printram("done\n");
 }
 
-int try_init_dram_ddr3_ivy(ramctr_timing *ctrl, int fast_boot,
-		int s3_resume, int me_uma_size)
+int try_init_dram_ddr3_ivb(ramctr_timing *ctrl, int fast_boot, int s3_resume, int me_uma_size)
 {
 	int err;
 
-	printk(BIOS_DEBUG, "Starting Ivybridge RAM training (%d).\n",
-		   fast_boot);
+	printk(BIOS_DEBUG, "Starting Ivybridge RAM training (%d).\n", fast_boot);
 
 	if (!fast_boot) {
 		/* Find fastest common supported parameters */
@@ -644,7 +585,7 @@
 		dram_dimm_mapping(ctrl);
 	}
 
-	/* Set MCU frequency */
+	/* Set MC frequency */
 	dram_freq(ctrl);
 
 	if (!fast_boot) {
@@ -653,7 +594,7 @@
 	}
 
 	/* Set version register */
-	MCHBAR32(MRC_REVISION) = 0xC04EB002;
+	MCHBAR32(MRC_REVISION) = 0xc04eb002;
 
 	/* Enable crossover */
 	dram_xover(ctrl);
@@ -667,11 +608,11 @@
 	/* Set scheduler chicken bits */
 	MCHBAR32(SCHED_CBIT) = 0x10100005;
 
-	/* Set CPU specific register */
-	set_4f8c();
+	/* Set up watermarks and starvation counter */
+	set_wmm_behavior();
 
 	/* Clear IO reset bit */
-	MCHBAR32(MC_INIT_STATE_G) &= ~0x20;
+	MCHBAR32(MC_INIT_STATE_G) &= ~(1 << 5);
 
 	/* Set MAD-DIMM registers */
 	dram_dimm_set_mapping(ctrl);
@@ -693,7 +634,7 @@
 	if (fast_boot) {
 		restore_timings(ctrl);
 	} else {
-		/* Do jedec ddr3 reset sequence */
+		/* Do JEDEC DDR3 reset sequence */
 		dram_jedecreset(ctrl);
 		printk(BIOS_DEBUG, "Done jedec reset\n");
 
@@ -737,7 +678,7 @@
 		normalize_training(ctrl);
 	}
 
-	set_4008c(ctrl);
+	set_read_write_timings(ctrl);
 
 	write_controller_mr(ctrl);
 
diff --git a/src/northbridge/intel/sandybridge/raminit_mrc.c b/src/northbridge/intel/sandybridge/raminit_mrc.c
index 9e07e2e..5b4b46c 100644
--- a/src/northbridge/intel/sandybridge/raminit_mrc.c
+++ b/src/northbridge/intel/sandybridge/raminit_mrc.c
@@ -44,8 +44,8 @@
  */
 #if CONFIG(USE_OPTION_TABLE)
 #include "option_table.h"
-#define CMOS_OFFSET_MRC_SEED     (CMOS_VSTART_mrc_scrambler_seed >> 3)
-#define CMOS_OFFSET_MRC_SEED_S3  (CMOS_VSTART_mrc_scrambler_seed_s3 >> 3)
+#define CMOS_OFFSET_MRC_SEED     (CMOS_VSTART_mrc_scrambler_seed     >> 3)
+#define CMOS_OFFSET_MRC_SEED_S3  (CMOS_VSTART_mrc_scrambler_seed_s3  >> 3)
 #define CMOS_OFFSET_MRC_SEED_CHK (CMOS_VSTART_mrc_scrambler_seed_chk >> 3)
 #else
 #define CMOS_OFFSET_MRC_SEED     152
@@ -60,8 +60,7 @@
 	u16 c1, c2, checksum;
 
 	/* Save the MRC S3 restore data to cbmem */
-	mrc_cache_stash_data(MRC_TRAINING_DATA, MRC_CACHE_VERSION,
-			pei_data->mrc_output,
+	mrc_cache_stash_data(MRC_TRAINING_DATA, MRC_CACHE_VERSION, pei_data->mrc_output,
 			pei_data->mrc_output_len);
 
 	/* Save the MRC seed values to CMOS */
@@ -74,14 +73,12 @@
 	       pei_data->scrambler_seed_s3, CMOS_OFFSET_MRC_SEED_S3);
 
 	/* Save a simple checksum of the seed values */
-	c1 = compute_ip_checksum((u8*)&pei_data->scrambler_seed,
-				 sizeof(u32));
-	c2 = compute_ip_checksum((u8*)&pei_data->scrambler_seed_s3,
-				 sizeof(u32));
+	c1 = compute_ip_checksum((u8 *)&pei_data->scrambler_seed,    sizeof(u32));
+	c2 = compute_ip_checksum((u8 *)&pei_data->scrambler_seed_s3, sizeof(u32));
 	checksum = add_ip_checksums(sizeof(u32), c1, c2);
 
-	cmos_write(checksum & 0xff, CMOS_OFFSET_MRC_SEED_CHK);
-	cmos_write((checksum >> 8) & 0xff, CMOS_OFFSET_MRC_SEED_CHK+1);
+	cmos_write((checksum >> 0) & 0xff, CMOS_OFFSET_MRC_SEED_CHK);
+	cmos_write((checksum >> 8) & 0xff, CMOS_OFFSET_MRC_SEED_CHK + 1);
 }
 
 static void prepare_mrc_cache(struct pei_data *pei_data)
@@ -89,7 +86,7 @@
 	struct region_device rdev;
 	u16 c1, c2, checksum, seed_checksum;
 
-	// preset just in case there is an error
+	/* Preset just in case there is an error */
 	pei_data->mrc_input = NULL;
 	pei_data->mrc_input_len = 0;
 
@@ -103,14 +100,12 @@
 	       pei_data->scrambler_seed_s3, CMOS_OFFSET_MRC_SEED_S3);
 
 	/* Compute seed checksum and compare */
-	c1 = compute_ip_checksum((u8*)&pei_data->scrambler_seed,
-				 sizeof(u32));
-	c2 = compute_ip_checksum((u8*)&pei_data->scrambler_seed_s3,
-				 sizeof(u32));
+	c1 = compute_ip_checksum((u8 *)&pei_data->scrambler_seed,    sizeof(u32));
+	c2 = compute_ip_checksum((u8 *)&pei_data->scrambler_seed_s3, sizeof(u32));
 	checksum = add_ip_checksums(sizeof(u32), c1, c2);
 
-	seed_checksum = cmos_read(CMOS_OFFSET_MRC_SEED_CHK);
-	seed_checksum |= cmos_read(CMOS_OFFSET_MRC_SEED_CHK+1) << 8;
+	seed_checksum  = cmos_read(CMOS_OFFSET_MRC_SEED_CHK);
+	seed_checksum |= cmos_read(CMOS_OFFSET_MRC_SEED_CHK + 1) << 8;
 
 	if (checksum != seed_checksum) {
 		printk(BIOS_ERR, "%s: invalid seed checksum\n", __func__);
@@ -119,68 +114,64 @@
 		return;
 	}
 
-	if (mrc_cache_get_current(MRC_TRAINING_DATA, MRC_CACHE_VERSION,
-					&rdev)) {
-		/* error message printed in find_current_mrc_cache */
+	if (mrc_cache_get_current(MRC_TRAINING_DATA, MRC_CACHE_VERSION, &rdev)) {
+		/* Error message printed in find_current_mrc_cache */
 		return;
 	}
 
 	pei_data->mrc_input = rdev_mmap_full(&rdev);
 	pei_data->mrc_input_len = region_device_sz(&rdev);
 
-	printk(BIOS_DEBUG, "%s: at %p, size %x\n",
-	       __func__, pei_data->mrc_input, pei_data->mrc_input_len);
+	printk(BIOS_DEBUG, "%s: at %p, size %x\n", __func__, pei_data->mrc_input,
+			pei_data->mrc_input_len);
 }
 
 static const char *ecc_decoder[] = {
 	"inactive",
 	"active on IO",
 	"disabled on IO",
-	"active"
+	"active",
 };
 
-/*
- * Dump in the log memory controller configuration as read from the memory
- * controller registers.
- */
+#define ON_OFF(val) (((val) & 1) ? "on" : "off")
+
+/* Print the memory controller configuration as read from the memory controller registers. */
 static void report_memory_config(void)
 {
 	u32 addr_decoder_common, addr_decode_ch[2];
 	int i;
 
 	addr_decoder_common = MCHBAR32(MAD_CHNL);
-	addr_decode_ch[0] = MCHBAR32(MAD_DIMM_CH0);
-	addr_decode_ch[1] = MCHBAR32(MAD_DIMM_CH1);
+	addr_decode_ch[0]   = MCHBAR32(MAD_DIMM_CH0);
+	addr_decode_ch[1]   = MCHBAR32(MAD_DIMM_CH1);
 
 	printk(BIOS_DEBUG, "memcfg DDR3 clock %d MHz\n",
-	       (MCHBAR32(MC_BIOS_DATA) * 13333 * 2 + 50)/100);
+	       (MCHBAR32(MC_BIOS_DATA) * 13333 * 2 + 50) / 100);
+
 	printk(BIOS_DEBUG, "memcfg channel assignment: A: %d, B % d, C % d\n",
-	       addr_decoder_common & 3,
+	       (addr_decoder_common >> 0) & 3,
 	       (addr_decoder_common >> 2) & 3,
 	       (addr_decoder_common >> 4) & 3);
 
 	for (i = 0; i < ARRAY_SIZE(addr_decode_ch); i++) {
 		u32 ch_conf = addr_decode_ch[i];
-		printk(BIOS_DEBUG, "memcfg channel[%d] config (%8.8x):\n",
-		       i, ch_conf);
-		printk(BIOS_DEBUG, "   ECC %s\n",
-		       ecc_decoder[(ch_conf >> 24) & 3]);
-		printk(BIOS_DEBUG, "   enhanced interleave mode %s\n",
-		       ((ch_conf >> 22) & 1) ? "on" : "off");
-		printk(BIOS_DEBUG, "   rank interleave %s\n",
-		       ((ch_conf >> 21) & 1) ? "on" : "off");
+		printk(BIOS_DEBUG, "memcfg channel[%d] config (%8.8x):\n", i, ch_conf);
+		printk(BIOS_DEBUG, "   ECC %s\n", ecc_decoder[(ch_conf >> 24) & 3]);
+		printk(BIOS_DEBUG, "   enhanced interleave mode %s\n", ON_OFF(ch_conf >> 22));
+		printk(BIOS_DEBUG, "   rank interleave %s\n", ON_OFF(ch_conf >> 21));
 		printk(BIOS_DEBUG, "   DIMMA %d MB width x%d %s rank%s\n",
-		       ((ch_conf >> 0) & 0xff) * 256,
+		       ((ch_conf >>  0) & 0xff) * 256,
 		       ((ch_conf >> 19) & 1) ? 16 : 8,
 		       ((ch_conf >> 17) & 1) ? "dual" : "single",
 		       ((ch_conf >> 16) & 1) ? "" : ", selected");
 		printk(BIOS_DEBUG, "   DIMMB %d MB width x%d %s rank%s\n",
-		       ((ch_conf >> 8) & 0xff) * 256,
+		       ((ch_conf >>  8) & 0xff) * 256,
 		       ((ch_conf >> 20) & 1) ? 16 : 8,
 		       ((ch_conf >> 18) & 1) ? "dual" : "single",
 		       ((ch_conf >> 16) & 1) ? ", selected" : "");
 	}
 }
+#undef ON_OFF
 
 /**
  * Find PEI executable in coreboot filesystem and execute it.
@@ -190,7 +181,7 @@
 void sdram_initialize(struct pei_data *pei_data)
 {
 	struct sys_info sysinfo;
-	int (*entry) (struct pei_data *pei_data) __attribute__((regparm(1)));
+	int (*entry)(struct pei_data *pei_data) __attribute__((regparm(1)));
 
 	/* Wait for ME to be ready */
 	intel_early_me_init();
@@ -245,18 +236,17 @@
 	if (CONFIG(USBDEBUG_IN_PRE_RAM))
 		usbdebug_hw_init(true);
 
-	/* For reference print the System Agent version
-	 * after executing the UEFI PEI stage.
-	 */
+	/* For reference, print the System Agent version after executing the UEFI PEI stage */
 	u32 version = MCHBAR32(MRC_REVISION);
 	printk(BIOS_DEBUG, "System Agent Version %d.%d.%d Build %d\n",
-		version >> 24, (version >> 16) & 0xff,
-		(version >> 8) & 0xff, version & 0xff);
+		(version >> 24) & 0xff, (version >> 16) & 0xff,
+		(version >>  8) & 0xff, (version >>  0) & 0xff);
 
-	/* Send ME init done for SandyBridge here.  This is done
-	 * inside the SystemAgent binary on IvyBridge. */
-	if (BASE_REV_SNB ==
-	    (pci_read_config16(PCI_CPU_DEVICE, PCI_DEVICE_ID) & BASE_REV_MASK))
+	/*
+	 * Send ME init done for SandyBridge here.
+	 * This is done inside the SystemAgent binary on IvyBridge.
+	 */
+	if (BASE_REV_SNB == (pci_read_config16(PCI_CPU_DEVICE, PCI_DEVICE_ID) & BASE_REV_MASK))
 		intel_early_me_init_done(ME_INIT_STATUS_SUCCESS);
 	else
 		intel_early_me_status();
@@ -264,31 +254,30 @@
 	report_memory_config();
 }
 
-/* These are the location and structure of MRC_VAR data in CAR.
-   The CAR region looks like this:
-   +------------------+ -> DCACHE_RAM_BASE
-   |                  |
-   |                  |
-   |  COREBOOT STACK  |
-   |                  |
-   |                  |
-   +------------------+ -> DCACHE_RAM_BASE + DCACHE_RAM_SIZE
-   |                  |
-   |  MRC HEAP        |
-   |  size = 0x5000   |
-   |                  |
-   +------------------+
-   |                  |
-   |  MRC VAR         |
-   |  size = 0x4000   |
-   |                  |
-   +------------------+ -> DACHE_RAM_BASE + DACHE_RAM_SIZE
-                                 + DCACHE_RAM_MRC_VAR_SIZE
-
+/*
+ * These are the location and structure of MRC_VAR data in CAR.
+ * The CAR region looks like this:
+ * +------------------+ -> DCACHE_RAM_BASE
+ * |                  |
+ * |                  |
+ * |  COREBOOT STACK  |
+ * |                  |
+ * |                  |
+ * +------------------+ -> DCACHE_RAM_BASE + DCACHE_RAM_SIZE
+ * |                  |
+ * |  MRC HEAP        |
+ * |  size = 0x5000   |
+ * |                  |
+ * +------------------+
+ * |                  |
+ * |  MRC VAR         |
+ * |  size = 0x4000   |
+ * |                  |
+ * +------------------+ -> DACHE_RAM_BASE + DACHE_RAM_SIZE
+ *                               + DCACHE_RAM_MRC_VAR_SIZE
  */
-#define DCACHE_RAM_MRC_VAR_BASE \
-	(CONFIG_DCACHE_RAM_BASE + CONFIG_DCACHE_RAM_SIZE + \
-	 CONFIG_DCACHE_RAM_MRC_VAR_SIZE - 0x4000)
+#define DCACHE_RAM_MRC_VAR_BASE	 (CONFIG_DCACHE_RAM_BASE + CONFIG_DCACHE_RAM_SIZE \
+				+ CONFIG_DCACHE_RAM_MRC_VAR_SIZE - 0x4000)
 
 struct mrc_var_data {
 	u32 acpi_timer_flag;
@@ -300,14 +289,14 @@
 
 static void northbridge_fill_pei_data(struct pei_data *pei_data)
 {
-	pei_data->mchbar = (uintptr_t)DEFAULT_MCHBAR;
-	pei_data->dmibar = (uintptr_t)DEFAULT_DMIBAR;
-	pei_data->epbar = DEFAULT_EPBAR;
-	pei_data->pciexbar = CONFIG_MMCONF_BASE_ADDRESS;
+	pei_data->mchbar       = (uintptr_t)DEFAULT_MCHBAR;
+	pei_data->dmibar       = (uintptr_t)DEFAULT_DMIBAR;
+	pei_data->epbar        = DEFAULT_EPBAR;
+	pei_data->pciexbar     = CONFIG_MMCONF_BASE_ADDRESS;
 	pei_data->hpet_address = CONFIG_HPET_ADDRESS;
-	pei_data->thermalbase = 0xfed08000;
-	pei_data->system_type = get_platform_type() == PLATFORM_MOBILE ? 0 : 1;
-	pei_data->tseg_size = CONFIG_SMM_TSEG_SIZE;
+	pei_data->thermalbase  = 0xfed08000;
+	pei_data->system_type  = !(get_platform_type() == PLATFORM_MOBILE);
+	pei_data->tseg_size    = CONFIG_SMM_TSEG_SIZE;
 
 	if ((cpu_get_cpuid() & 0xffff0) == 0x306a0) {
 		const struct device *dev = pcidev_on_root(1, 0);
@@ -321,12 +310,12 @@
 {
 	const struct device *dev = pcidev_on_root(0x19, 0);
 
-	pei_data->smbusbar = SMBUS_IO_BASE;
-	pei_data->wdbbar = 0x4000000;
-	pei_data->wdbsize = 0x1000;
-	pei_data->rcba = (uintptr_t)DEFAULT_RCBABASE;
-	pei_data->pmbase = DEFAULT_PMBASE;
-	pei_data->gpiobase = DEFAULT_GPIOBASE;
+	pei_data->smbusbar   = SMBUS_IO_BASE;
+	pei_data->wdbbar     = 0x04000000;
+	pei_data->wdbsize    = 0x1000;
+	pei_data->rcba       = (uintptr_t)DEFAULT_RCBABASE;
+	pei_data->pmbase     = DEFAULT_PMBASE;
+	pei_data->gpiobase   = DEFAULT_GPIOBASE;
 	pei_data->gbe_enable = dev && dev->enabled;
 }
 
@@ -360,13 +349,10 @@
 
 	}
 
-	memcpy(pei_data->spd_addresses, cfg->spd_addresses,
-	       sizeof(pei_data->spd_addresses));
+	memcpy(pei_data->spd_addresses, cfg->spd_addresses, sizeof(pei_data->spd_addresses));
+	memcpy(pei_data->ts_addresses,  cfg->ts_addresses,  sizeof(pei_data->ts_addresses));
 
-	memcpy(pei_data->ts_addresses, cfg->ts_addresses,
-	       sizeof(pei_data->ts_addresses));
-
-	pei_data->ec_present = cfg->ec_present;
+	pei_data->ec_present     = cfg->ec_present;
 	pei_data->ddr3lv_support = cfg->ddr3lv_support;
 
 	pei_data->nmode = cfg->nmode;
@@ -375,15 +361,15 @@
 	memcpy(pei_data->usb_port_config, cfg->usb_port_config,
 	       sizeof(pei_data->usb_port_config));
 
-	pei_data->usb3.mode = cfg->usb3.mode;
+	pei_data->usb3.mode                = cfg->usb3.mode;
 	pei_data->usb3.hs_port_switch_mask = cfg->usb3.hs_port_switch_mask;
-	pei_data->usb3.preboot_support = cfg->usb3.preboot_support;
-	pei_data->usb3.xhci_streams = cfg->usb3.xhci_streams;
+	pei_data->usb3.preboot_support     = cfg->usb3.preboot_support;
+	pei_data->usb3.xhci_streams        = cfg->usb3.xhci_streams;
 }
 
 static void disable_p2p(void)
 {
-	/* Disable PCI-to-PCI bridge early to prevent probing by MRC. */
+	/* Disable PCI-to-PCI bridge early to prevent probing by MRC */
 	const struct device *const p2p = pcidev_on_root(0x1e, 0);
 	if (p2p && p2p->enabled)
 		return;
@@ -393,7 +379,6 @@
 
 void perform_raminit(int s3resume)
 {
-	int cbmem_was_initted;
 	struct pei_data pei_data;
 	struct mrc_var_data *mrc_var;
 
@@ -425,6 +410,7 @@
 		if (pei_data.spd_data[i][0] && !pei_data.spd_data[0][0]) {
 			memcpy(pei_data.spd_data[0], pei_data.spd_data[i],
 			       sizeof(pei_data.spd_data[0]));
+
 		} else if (pei_data.spd_data[i][0] && pei_data.spd_data[0][0]) {
 			if (memcmp(pei_data.spd_data[i], pei_data.spd_data[0],
 			    sizeof(pei_data.spd_data[0])) != 0)
@@ -438,18 +424,18 @@
 	timestamp_add_now(TS_BEFORE_INITRAM);
 	sdram_initialize(&pei_data);
 
+	/* Sanity check mrc_var location by verifying a known field */
 	mrc_var = (void *)DCACHE_RAM_MRC_VAR_BASE;
-	/* Sanity check mrc_var location by verifying a known field. */
 	if (mrc_var->tx_byte == (uintptr_t)pei_data.tx_byte) {
 		printk(BIOS_DEBUG, "MRC_VAR pool occupied [%08x,%08x]\n",
-		       mrc_var->pool_base,
-		       mrc_var->pool_base + mrc_var->pool_used);
+		       mrc_var->pool_base, mrc_var->pool_base + mrc_var->pool_used);
+
 	} else {
 		printk(BIOS_ERR, "Could not parse MRC_VAR data\n");
-		hexdump32(BIOS_ERR, mrc_var, sizeof(*mrc_var)/sizeof(u32));
+		hexdump32(BIOS_ERR, mrc_var, sizeof(*mrc_var) / sizeof(u32));
 	}
 
-	cbmem_was_initted = !cbmem_recovery(s3resume);
+	const int cbmem_was_initted = !cbmem_recovery(s3resume);
 	if (!s3resume)
 		save_mrc_data(&pei_data);
 
diff --git a/src/northbridge/intel/sandybridge/raminit_native.h b/src/northbridge/intel/sandybridge/raminit_native.h
index ecf13cf..60a5665 100644
--- a/src/northbridge/intel/sandybridge/raminit_native.h
+++ b/src/northbridge/intel/sandybridge/raminit_native.h
@@ -18,8 +18,8 @@
 #include "sandybridge.h"
 #include <device/dram/ddr3.h>
 
-/* The order is ch0dimmA, ch0dimmB, ch1dimmA, ch1dimmB.  */
+/* The order is: ch0dimmA, ch0dimmB, ch1dimmA, ch1dimmB */
 void read_spd(spd_raw_data *spd, u8 addr, bool id_only);
 void mainboard_get_spd(spd_raw_data *spd, bool id_only);
 
-#endif				/* RAMINIT_H */
+#endif /* RAMINIT_NATIVE_H */
diff --git a/src/northbridge/intel/sandybridge/raminit_sandy.c b/src/northbridge/intel/sandybridge/raminit_sandy.c
index b5169e7..3b68c22 100644
--- a/src/northbridge/intel/sandybridge/raminit_sandy.c
+++ b/src/northbridge/intel/sandybridge/raminit_sandy.c
@@ -18,116 +18,105 @@
 #include "raminit_native.h"
 #include "raminit_common.h"
 
-/* Frequency multiplier.  */
+/* Frequency multiplier */
 static u32 get_FRQ(u32 tCK)
 {
-	u32 FRQ;
-	FRQ = 256000 / (tCK * BASEFREQ);
+	const u32 FRQ = 256000 / (tCK * BASEFREQ);
+
 	if (FRQ > 8)
 		return 8;
 	if (FRQ < 3)
 		return 3;
+
 	return FRQ;
 }
 
+/* Get REFI based on MC frequency */
 static u32 get_REFI(u32 tCK)
 {
-	/* Get REFI based on MCU frequency using the following rule:
-	 *        _________________________________________
-	 * FRQ : | 3    | 4    | 5    | 6    | 7    | 8    |
-	 * REFI: | 3120 | 4160 | 5200 | 6240 | 7280 | 8320 |
-	 */
-	static const u32 frq_refi_map[] =
-	    { 3120, 4160, 5200, 6240, 7280, 8320 };
+	static const u32 frq_refi_map[] = {
+	/* FRQ:    3,    4,    5,    6,    7,    8, */
+		3120, 4160, 5200, 6240, 7280, 8320,
+	};
 	return frq_refi_map[get_FRQ(tCK) - 3];
 }
 
+/* Get XSOffset based on MC frequency */
 static u8 get_XSOffset(u32 tCK)
 {
-	/* Get XSOffset based on MCU frequency using the following rule:
-	 *             _________________________
-	 * FRQ      : | 3 | 4 | 5 | 6 | 7  | 8  |
-	 * XSOffset : | 4 | 6 | 7 | 8 | 10 | 11 |
-	 */
-	static const u8 frq_xs_map[] = { 4, 6, 7, 8, 10, 11 };
+	static const u8 frq_xs_map[] = {
+	/* FRQ: 3,  4,  5,  6,  7,  8, */
+		4,  6,  7,  8, 10, 11,
+	};
 	return frq_xs_map[get_FRQ(tCK) - 3];
 }
 
+/* Get MOD based on MC frequency */
 static u8 get_MOD(u32 tCK)
 {
-	/* Get MOD based on MCU frequency using the following rule:
-	 *        _____________________________
-	 * FRQ : | 3  | 4  | 5  | 6  | 7  | 8  |
-	 * MOD : | 12 | 12 | 12 | 12 | 15 | 16 |
-	 */
-	static const u8 frq_mod_map[] = { 12, 12, 12, 12, 15, 16 };
+	static const u8 frq_mod_map[] = {
+	/* FRQ:  3,  4,  5,  6,  7,  8, */
+		12, 12, 12, 12, 15, 16,
+	};
 	return frq_mod_map[get_FRQ(tCK) - 3];
 }
 
+/* Get Write Leveling Output delay based on MC frequency */
 static u8 get_WLO(u32 tCK)
 {
-	/* Get WLO based on MCU frequency using the following rule:
-	 *        _______________________
-	 * FRQ : | 3 | 4 | 5 | 6 | 7 | 8 |
-	 * WLO : | 4 | 5 | 6 | 6 | 8 | 8 |
-	 */
-	static const u8 frq_wlo_map[] = { 4, 5, 6, 6, 8, 8 };
+	static const u8 frq_wlo_map[] = {
+	/* FRQ: 3, 4, 5, 6, 7, 8, */
+		4, 5, 6, 6, 8, 8,
+	};
 	return frq_wlo_map[get_FRQ(tCK) - 3];
 }
 
+/* Get CKE based on MC frequency */
 static u8 get_CKE(u32 tCK)
 {
-	/* Get CKE based on MCU frequency using the following rule:
-	 *        _______________________
-	 * FRQ : | 3 | 4 | 5 | 6 | 7 | 8 |
-	 * CKE : | 3 | 3 | 4 | 4 | 5 | 6 |
-	 */
-	static const u8 frq_cke_map[] = { 3, 3, 4, 4, 5, 6 };
+	static const u8 frq_cke_map[] = {
+	/* FRQ: 3, 4, 5, 6, 7, 8, */
+		3, 3, 4, 4, 5, 6,
+	};
 	return frq_cke_map[get_FRQ(tCK) - 3];
 }
 
+/* Get XPDLL based on MC frequency */
 static u8 get_XPDLL(u32 tCK)
 {
-	/* Get XPDLL based on MCU frequency using the following rule:
-	 *          _____________________________
-	 * FRQ   : | 3  | 4  | 5  | 6  | 7  | 8  |
-	 * XPDLL : | 10 | 13 | 16 | 20 | 23 | 26 |
-	 */
-	static const u8 frq_xpdll_map[] = { 10, 13, 16, 20, 23, 26 };
+	static const u8 frq_xpdll_map[] = {
+	/* FRQ:  3,  4,  5,  6,  7,  8, */
+		10, 13, 16, 20, 23, 26,
+	};
 	return frq_xpdll_map[get_FRQ(tCK) - 3];
 }
 
+/* Get XP based on MC frequency */
 static u8 get_XP(u32 tCK)
 {
-	/* Get XP based on MCU frequency using the following rule:
-	 *        _______________________
-	 * FRQ : | 3 | 4 | 5 | 6 | 7 | 8 |
-	 * XP  : | 3 | 4 | 4 | 5 | 6 | 7 |
-	 */
-	static const u8 frq_xp_map[] = { 3, 4, 4, 5, 6, 7 };
+	static const u8 frq_xp_map[] = {
+	/* FRQ: 3, 4, 5, 6, 7, 8, */
+		3, 4, 4, 5, 6, 7,
+	};
 	return frq_xp_map[get_FRQ(tCK) - 3];
 }
 
+/* Get AONPD based on MC frequency */
 static u8 get_AONPD(u32 tCK)
 {
-	/* Get AONPD based on MCU frequency using the following rule:
-	 *          ________________________
-	 * FRQ   : | 3 | 4 | 5 | 6 | 7 | 8  |
-	 * AONPD : | 4 | 5 | 6 | 8 | 8 | 10 |
-	 */
-	static const u8 frq_aonpd_map[] = { 4, 5, 6, 8, 8, 10 };
+	static const u8 frq_aonpd_map[] = {
+	/* FRQ: 3,  4,  5,  6,  7,  8, */
+		4,  5,  6,  8,  8, 10,
+	};
 	return frq_aonpd_map[get_FRQ(tCK) - 3];
 }
 
+/* Get COMP2 based on MC frequency */
 static u32 get_COMP2(u32 tCK)
 {
-	/* Get COMP2 based on MCU frequency using the following rule:
-	 *         ___________________________________________________________
-	 * FRQ  : | 3       | 4       | 5       | 6       | 7       | 8       |
-	 * COMP : | D6BEDCC | CE7C34C | CA57A4C | C6369CC | C42514C | C21410C |
-	 */
-	static const u32 frq_comp2_map[] = { 0xD6BEDCC, 0xCE7C34C, 0xCA57A4C,
-		0xC6369CC, 0xC42514C, 0xC21410C
+	static const u32 frq_comp2_map[] = {
+	/* FRQ:          3,          4,          5,          6,          7,          8, */
+		0x0D6BEDCC, 0x0CE7C34C, 0x0CA57A4C, 0x0C6369CC, 0x0C42514C, 0x0C21410C,
 	};
 	return frq_comp2_map[get_FRQ(tCK) - 3];
 }
@@ -154,21 +143,23 @@
 static void find_cas_tck(ramctr_timing *ctrl)
 {
 	u8 val;
-	u32 val32;
 
 	/* Find CAS latency */
 	while (1) {
-		/* Normalising tCK before computing clock could potentially
-		 * results in lower selected CAS, which is desired.
+		/*
+		 * Normalising tCK before computing clock could potentially
+		 * result in a lower selected CAS, which is desired.
 		 */
 		snb_normalize_tclk(&(ctrl->tCK));
 		if (!(ctrl->tCK))
 			die("Couldn't find compatible clock / CAS settings\n");
+
 		val = DIV_ROUND_UP(ctrl->tAA, ctrl->tCK);
 		printk(BIOS_DEBUG, "Trying CAS %u, tCK %u.\n", val, ctrl->tCK);
 		for (; val <= MAX_CAS; val++)
 			if ((ctrl->cas_supported >> (val - MIN_CAS)) & 1)
 				break;
+
 		if (val == (MAX_CAS + 1)) {
 			ctrl->tCK++;
 			continue;
@@ -178,18 +169,17 @@
 		}
 	}
 
-	val32 = NS2MHZ_DIV256 / ctrl->tCK;
-	printk(BIOS_DEBUG, "Selected DRAM frequency: %u MHz\n", val32);
-
+	printk(BIOS_DEBUG, "Selected DRAM frequency: %u MHz\n", NS2MHZ_DIV256 / ctrl->tCK);
 	printk(BIOS_DEBUG, "Selected CAS latency   : %uT\n", val);
 	ctrl->CAS = val;
 }
 
 static void dram_timing(ramctr_timing *ctrl)
 {
-	/* Maximum supported DDR3 frequency is 1066MHz (DDR3 2133) so make sure
-	 * we cap it if we have faster DIMMs.
-	 * Then, align it to the closest JEDEC standard frequency */
+	/*
+	 * On Sandy Bridge, the maximum supported DDR3 frequency is 1066MHz (DDR3 2133).
+	 * Cap it for faster DIMMs, and align it to the closest JEDEC standard frequency.
+	 */
 	if (ctrl->tCK == TCK_1066MHZ) {
 		ctrl->edge_offset[0] = 16;
 		ctrl->edge_offset[1] = 7;
@@ -198,6 +188,7 @@
 		ctrl->timC_offset[1] = 7;
 		ctrl->timC_offset[2] = 7;
 		ctrl->pi_coding_threshold = 13;
+
 	} else if (ctrl->tCK == TCK_933MHZ) {
 		ctrl->edge_offset[0] = 14;
 		ctrl->edge_offset[1] = 6;
@@ -206,6 +197,7 @@
 		ctrl->timC_offset[1] = 6;
 		ctrl->timC_offset[2] = 6;
 		ctrl->pi_coding_threshold = 15;
+
 	} else if (ctrl->tCK == TCK_800MHZ) {
 		ctrl->edge_offset[0] = 13;
 		ctrl->edge_offset[1] = 5;
@@ -214,6 +206,7 @@
 		ctrl->timC_offset[1] = 5;
 		ctrl->timC_offset[2] = 5;
 		ctrl->pi_coding_threshold = 15;
+
 	} else if (ctrl->tCK == TCK_666MHZ) {
 		ctrl->edge_offset[0] = 10;
 		ctrl->edge_offset[1] = 4;
@@ -222,6 +215,7 @@
 		ctrl->timC_offset[1] = 4;
 		ctrl->timC_offset[2] = 4;
 		ctrl->pi_coding_threshold = 16;
+
 	} else if (ctrl->tCK == TCK_533MHZ) {
 		ctrl->edge_offset[0] = 8;
 		ctrl->edge_offset[1] = 3;
@@ -230,6 +224,7 @@
 		ctrl->timC_offset[1] = 3;
 		ctrl->timC_offset[2] = 3;
 		ctrl->pi_coding_threshold = 17;
+
 	} else  {
 		ctrl->tCK = TCK_400MHZ;
 		ctrl->edge_offset[0] = 6;
@@ -251,13 +246,14 @@
 		ctrl->CWL = DIV_ROUND_UP(ctrl->tCWL, ctrl->tCK);
 	else
 		ctrl->CWL = get_CWL(ctrl->tCK);
+
 	printk(BIOS_DEBUG, "Selected CWL latency   : %uT\n", ctrl->CWL);
 
 	/* Find tRCD */
 	ctrl->tRCD = DIV_ROUND_UP(ctrl->tRCD, ctrl->tCK);
 	printk(BIOS_DEBUG, "Selected tRCD          : %uT\n", ctrl->tRCD);
 
-	ctrl->tRP = DIV_ROUND_UP(ctrl->tRP, ctrl->tCK);
+	ctrl->tRP  = DIV_ROUND_UP(ctrl->tRP,  ctrl->tCK);
 	printk(BIOS_DEBUG, "Selected tRP           : %uT\n", ctrl->tRP);
 
 	/* Find tRAS */
@@ -265,7 +261,7 @@
 	printk(BIOS_DEBUG, "Selected tRAS          : %uT\n", ctrl->tRAS);
 
 	/* Find tWR */
-	ctrl->tWR = DIV_ROUND_UP(ctrl->tWR, ctrl->tCK);
+	ctrl->tWR  = DIV_ROUND_UP(ctrl->tWR,  ctrl->tCK);
 	printk(BIOS_DEBUG, "Selected tWR           : %uT\n", ctrl->tWR);
 
 	/* Find tFAW */
@@ -285,25 +281,25 @@
 	printk(BIOS_DEBUG, "Selected tWTR          : %uT\n", ctrl->tWTR);
 
 	/* Refresh-to-Active or Refresh-to-Refresh (tRFC) */
-	ctrl->tRFC = DIV_ROUND_UP(ctrl->tRFC, ctrl->tCK - 1);
+	ctrl->tRFC = DIV_ROUND_UP(ctrl->tRFC, ctrl->tCK - 1);	/* FIXME: Why the -1 ? */
 	printk(BIOS_DEBUG, "Selected tRFC          : %uT\n", ctrl->tRFC);
 
-	ctrl->tREFI = get_REFI(ctrl->tCK);
-	ctrl->tMOD = get_MOD(ctrl->tCK);
+	ctrl->tREFI     =     get_REFI(ctrl->tCK);
+	ctrl->tMOD      =      get_MOD(ctrl->tCK);
 	ctrl->tXSOffset = get_XSOffset(ctrl->tCK);
-	ctrl->tWLO = get_WLO(ctrl->tCK);
-	ctrl->tCKE = get_CKE(ctrl->tCK);
-	ctrl->tXPDLL = get_XPDLL(ctrl->tCK);
-	ctrl->tXP = get_XP(ctrl->tCK);
-	ctrl->tAONPD = get_AONPD(ctrl->tCK);
+	ctrl->tWLO      =      get_WLO(ctrl->tCK);
+	ctrl->tCKE      =      get_CKE(ctrl->tCK);
+	ctrl->tXPDLL    =    get_XPDLL(ctrl->tCK);
+	ctrl->tXP       =       get_XP(ctrl->tCK);
+	ctrl->tAONPD    =    get_AONPD(ctrl->tCK);
 }
 
 static void dram_freq(ramctr_timing *ctrl)
 {
-
 	if (ctrl->tCK > TCK_400MHZ) {
-		printk(BIOS_ERR, "DRAM frequency is under lowest supported "
-			"frequency (400 MHz). Increasing to 400 MHz as last resort");
+		printk(BIOS_ERR,
+			"DRAM frequency is under lowest supported frequency (400 MHz). "
+			"Increasing to 400 MHz as last resort");
 		ctrl->tCK = TCK_400MHZ;
 	}
 
@@ -311,13 +307,15 @@
 		u8 val2;
 		u32 reg1 = 0;
 
+		/* Step 1 - Set target PCU frequency */
 		find_cas_tck(ctrl);
 
-		/* Frequency multiplier.  */
-		u32 FRQ = get_FRQ(ctrl->tCK);
+		/* Frequency multiplier */
+		const u32 FRQ = get_FRQ(ctrl->tCK);
 
-		/* The PLL will never lock if the required frequency is
-		 * already set. Exit early to prevent a system hang.
+		/*
+		 * The PLL will never lock if the required frequency is already set.
+		 * Exit early to prevent a system hang.
 		 */
 		reg1 = MCHBAR32(MC_BIOS_DATA);
 		val2 = (u8) reg1;
@@ -326,7 +324,7 @@
 
 		/* Step 1 - Select frequency in the MCU */
 		reg1 = FRQ;
-		reg1 |= 0x80000000;	// set running bit
+		reg1 |= 0x80000000;	/* set running bit */
 		MCHBAR32(MC_BIOS_REQ) = reg1;
 		int i=0;
 		printk(BIOS_DEBUG, "PLL busy... ");
@@ -352,61 +350,57 @@
 
 static void dram_ioregs(ramctr_timing *ctrl)
 {
-	u32 reg, comp2;
+	u32 reg;
 
 	int channel;
 
-	// IO clock
+	/* IO clock */
 	FOR_ALL_CHANNELS {
 		MCHBAR32(GDCRCLKRANKSUSED_ch(channel)) = ctrl->rankmap[channel];
 	}
 
-	// IO command
+	/* IO command */
 	FOR_ALL_CHANNELS {
 		MCHBAR32(GDCRCTLRANKSUSED_ch(channel)) = ctrl->rankmap[channel];
 	}
 
-	// IO control
+	/* IO control */
 	FOR_ALL_POPULATED_CHANNELS {
 		program_timings(ctrl, channel);
 	}
 
-	// Rcomp
+	/* Perform RCOMP */
 	printram("RCOMP...");
-	reg = 0;
-	while (reg == 0) {
-		reg = MCHBAR32(RCOMP_TIMER) & 0x10000;
-	}
+	while (!(MCHBAR32(RCOMP_TIMER) & (1 << 16)))
+		;
+
 	printram("done\n");
 
-	// Set comp2
-	comp2 = get_COMP2(ctrl->tCK);
-	MCHBAR32(CRCOMPOFST2) = comp2;
+	/* Set COMP2 */
+	MCHBAR32(CRCOMPOFST2) = get_COMP2(ctrl->tCK);
 	printram("COMP2 done\n");
 
-	// Set comp1
+	/* Set COMP1 */
 	FOR_ALL_POPULATED_CHANNELS {
-		reg = MCHBAR32(CRCOMPOFST1_ch(channel));	//ch0
-		reg = (reg & ~0xe00) | (1 << 9);	//odt
-		reg = (reg & ~0xe00000) | (1 << 21);	//clk drive up
-		reg = (reg & ~0x38000000) | (1 << 27);	//ctl drive up
+		reg = MCHBAR32(CRCOMPOFST1_ch(channel));
+		reg = (reg & ~0x00000e00) | (1 <<  9);	/* ODT */
+		reg = (reg & ~0x00e00000) | (1 << 21);	/* clk drive up */
+		reg = (reg & ~0x38000000) | (1 << 27);	/* ctl drive up */
 		MCHBAR32(CRCOMPOFST1_ch(channel)) = reg;
 	}
 	printram("COMP1 done\n");
 
 	printram("FORCE RCOMP and wait 20us...");
-	MCHBAR32(M_COMP) |= 0x100;
+	MCHBAR32(M_COMP) |= (1 << 8);
 	udelay(20);
 	printram("done\n");
 }
 
-int try_init_dram_ddr3_sandy(ramctr_timing *ctrl, int fast_boot,
-		int s3_resume, int me_uma_size)
+int try_init_dram_ddr3_snb(ramctr_timing *ctrl, int fast_boot, int s3_resume, int me_uma_size)
 {
 	int err;
 
-	printk(BIOS_DEBUG, "Starting SandyBridge RAM training (%d).\n",
-		   fast_boot);
+	printk(BIOS_DEBUG, "Starting SandyBridge RAM training (%d).\n", fast_boot);
 
 	if (!fast_boot) {
 		/* Find fastest common supported parameters */
@@ -415,7 +409,7 @@
 		dram_dimm_mapping(ctrl);
 	}
 
-	/* Set MCU frequency */
+	/* Set MC frequency */
 	dram_freq(ctrl);
 
 	if (!fast_boot) {
@@ -424,7 +418,7 @@
 	}
 
 	/* Set version register */
-	MCHBAR32(MRC_REVISION) = 0xC04EB002;
+	MCHBAR32(MRC_REVISION) = 0xc04eb002;
 
 	/* Enable crossover */
 	dram_xover(ctrl);
@@ -438,11 +432,11 @@
 	/* Set scheduler chicken bits */
 	MCHBAR32(SCHED_CBIT) = 0x10100005;
 
-	/* Set CPU specific register */
-	set_4f8c();
+	/* Set up watermarks and starvation counter */
+	set_wmm_behavior();
 
 	/* Clear IO reset bit */
-	MCHBAR32(MC_INIT_STATE_G) &= ~0x20;
+	MCHBAR32(MC_INIT_STATE_G) &= ~(1 << 5);
 
 	/* Set MAD-DIMM registers */
 	dram_dimm_set_mapping(ctrl);
@@ -464,7 +458,7 @@
 	if (fast_boot) {
 		restore_timings(ctrl);
 	} else {
-		/* Do jedec ddr3 reset sequence */
+		/* Do JEDEC DDR3 reset sequence */
 		dram_jedecreset(ctrl);
 		printk(BIOS_DEBUG, "Done jedec reset\n");
 
@@ -508,7 +502,7 @@
 		normalize_training(ctrl);
 	}
 
-	set_4008c(ctrl);
+	set_read_write_timings(ctrl);
 
 	write_controller_mr(ctrl);
 
diff --git a/src/northbridge/intel/sandybridge/romstage.c b/src/northbridge/intel/sandybridge/romstage.c
index 81049e5..ec44ee2 100644
--- a/src/northbridge/intel/sandybridge/romstage.c
+++ b/src/northbridge/intel/sandybridge/romstage.c
@@ -39,20 +39,18 @@
 {
 	u8 reg8;
 
-	// reset rtc power status
+	/* Reset RTC power status */
 	reg8 = pci_read_config8(PCH_LPC_DEV, GEN_PMCON_3);
 	reg8 &= ~(1 << 2);
 	pci_write_config8(PCH_LPC_DEV, GEN_PMCON_3, reg8);
 }
 
-/* Platform has no romstage entry point under mainboard directory,
- * so this one is named with prefix mainboard.
- */
+/* The romstage entry point for this platform is not mainboard-specific, hence the name */
 void mainboard_romstage_entry(void)
 {
 	int s3resume = 0;
 
-	if (MCHBAR16(SSKPD) == 0xCAFE)
+	if (MCHBAR16(SSKPD_HI) == 0xCAFE)
 		system_reset();
 
 	enable_lapic();
@@ -60,14 +58,12 @@
 	/* Init LPC, GPIO, BARs, disable watchdog ... */
 	early_pch_init();
 
-	/* USB is initialized in MRC if MRC is used.  */
+	/* When using MRC, USB is initialized by MRC */
 	if (CONFIG(USE_NATIVE_RAMINIT)) {
 		early_usb_init(mainboard_usb_ports);
 	}
 
-	/* Perform some early chipset initialization required
-	 * before RAM initialization can work
-	 */
+	/* Perform some early chipset init needed before RAM initialization can work */
 	systemagent_early_init();
 	printk(BIOS_DEBUG, "Back from systemagent_early_init()\n");
 
diff --git a/src/northbridge/intel/sandybridge/sandybridge.h b/src/northbridge/intel/sandybridge/sandybridge.h
index 0bbb6fc..07d7904 100644
--- a/src/northbridge/intel/sandybridge/sandybridge.h
+++ b/src/northbridge/intel/sandybridge/sandybridge.h
@@ -43,8 +43,8 @@
 #define DEFAULT_EPBAR		0xfed19000	/* 4 KB */
 #define DEFAULT_RCBABASE	((u8 *)0xfed1c000)
 
-#define IOMMU_BASE1		0xfed90000ULL
-#define IOMMU_BASE2		0xfed91000ULL
+#define GFXVT_BASE		0xfed90000ULL
+#define VTVC0_BASE		0xfed91000ULL
 
 /* Everything below this line is ignored in the DSDT */
 #ifndef __ACPI__
@@ -58,31 +58,32 @@
 
 
 /* Device 0:0.0 PCI configuration space (Host Bridge) */
+#define HOST_BRIDGE	PCI_DEV(0, 0, 0)
 
 #define EPBAR		0x40
 #define MCHBAR		0x48
-#define PCIEXBAR	0x60
-#define DMIBAR		0x68
 
-#define GGC		0x50			/* GMCH Graphics Control */
-
-#define DEVEN		0x54			/* Device Enable */
+#define GGC		0x50	/* GMCH Graphics Control */
+#define DEVEN		0x54	/* Device Enable */
 #define  DEVEN_D7EN	(1 << 14)
 #define  DEVEN_PEG60	(1 << 13)
-#define  DEVEN_D4EN	(1 << 7)
-#define  DEVEN_IGD	(1 << 4)
-#define  DEVEN_PEG10	(1 << 3)
-#define  DEVEN_PEG11	(1 << 2)
-#define  DEVEN_PEG12	(1 << 1)
-#define  DEVEN_HOST	(1 << 0)
+#define  DEVEN_D4EN	(1 <<  7)
+#define  DEVEN_IGD	(1 <<  4)
+#define  DEVEN_PEG10	(1 <<  3)
+#define  DEVEN_PEG11	(1 <<  2)
+#define  DEVEN_PEG12	(1 <<  1)
+#define  DEVEN_HOST	(1 <<  0)
 
 #define PAVPC		0x58	/* Protected Audio Video Path Control */
 #define DPR		0x5c	/* DMA Protected Range */
 
+#define PCIEXBAR	0x60
+#define DMIBAR		0x68
+
 #define MESEG_BASE	0x70
 #define MESEG_MASK	0x78
-#define  MELCK		(1 << 10) /* ME Range Lock */
-#define  ME_STLEN_EN	(1 << 11) /* ME Stolen Memory Enable */
+#define  MELCK		(1 << 10)	/* ME Range Lock */
+#define  ME_STLEN_EN	(1 << 11)	/* ME Stolen Memory Enable */
 
 #define PAM0		0x80
 #define PAM1		0x81
@@ -109,6 +110,13 @@
 
 #define SKPAD		0xdc	/* Scratchpad Data */
 
+#define DIDOR		0xf3	/* Device ID override, for debug and samples only */
+
+
+/* Devices 0:1.0, 0:1.1, 0:1.2, 0:6.0 PCI configuration space (PCI Express Graphics) */
+
+#define AFE_PWRON	0xc24	/* PEG Analog Front-End Power-On */
+
 
 /* Device 0:2.0 PCI configuration space (Graphics Device) */
 
@@ -118,246 +126,27 @@
  * MCHBAR
  */
 
-#define MCHBAR8(x) (*((volatile u8 *)(DEFAULT_MCHBAR + (x))))
+#define MCHBAR8(x)  (*((volatile u8  *)(DEFAULT_MCHBAR + (x))))
 #define MCHBAR16(x) (*((volatile u16 *)(DEFAULT_MCHBAR + (x))))
 #define MCHBAR32(x) (*((volatile u32 *)(DEFAULT_MCHBAR + (x))))
-#define MCHBAR32_OR(x, or) (MCHBAR32(x) = (MCHBAR32(x) | (or)))
-#define MCHBAR32_AND(x, and) (MCHBAR32(x) = (MCHBAR32(x) & (and)))
+#define MCHBAR8_AND(x,  and) (MCHBAR8(x)  = MCHBAR8(x)  & (and))
+#define MCHBAR16_AND(x, and) (MCHBAR16(x) = MCHBAR16(x) & (and))
+#define MCHBAR32_AND(x, and) (MCHBAR32(x) = MCHBAR32(x) & (and))
+#define MCHBAR8_OR(x,  or) (MCHBAR8(x)  = MCHBAR8(x)  | (or))
+#define MCHBAR16_OR(x, or) (MCHBAR16(x) = MCHBAR16(x) | (or))
+#define MCHBAR32_OR(x, or) (MCHBAR32(x) = MCHBAR32(x) | (or))
+#define MCHBAR8_AND_OR(x,  and, or) (MCHBAR8(x)  = (MCHBAR8(x)  & (and)) | (or))
+#define MCHBAR16_AND_OR(x, and, or) (MCHBAR16(x) = (MCHBAR16(x) & (and)) | (or))
 #define MCHBAR32_AND_OR(x, and, or) (MCHBAR32(x) = (MCHBAR32(x) & (and)) | (or))
 
-/* Indexed register helper macros */
-#define Gz(r, z)	((r) + ((z) <<  8))
-#define Ly(r, y)	((r) + ((y) <<  2))
-#define Cx(r, x)	((r) + ((x) << 10))
-#define CxLy(r, x, y)	((r) + ((x) << 10) + ((y) << 2))
-#define GzLy(r, z, y)	((r) + ((z) <<  8) + ((y) << 2))
-
-/* byte lane training register base addresses */
-#define LANEBASE_B0	0x0000
-#define LANEBASE_B1	0x0200
-#define LANEBASE_B2	0x0400
-#define LANEBASE_B3	0x0600
-#define LANEBASE_ECC	0x0800 /* ECC lane is in the middle of the data lanes */
-#define LANEBASE_B4	0x1000
-#define LANEBASE_B5	0x1200
-#define LANEBASE_B6	0x1400
-#define LANEBASE_B7	0x1600
-
-/* byte lane register offsets */
-#define GDCRTRAININGRESULT(ch, y)	GzLy(0x0004, ch, y) /* Test results for PI config */
-#define GDCRTRAININGRESULT1(ch)		GDCRTRAININGRESULT(ch, 0) /* 0x0004 */
-#define GDCRTRAININGRESULT2(ch)		GDCRTRAININGRESULT(ch, 1) /* 0x0008 */
-#define GDCRRX(ch, rank)		GzLy(0x10, ch, rank) /* Time setting for lane Rx */
-#define GDCRTX(ch, rank)		GzLy(0x20, ch, rank) /* Time setting for lane Tx */
-
-/* Register definitions */
-#define GDCRCLKRANKSUSED_ch(ch)		Gz(0x0c00, ch) /* Indicates which rank is populated */
-#define GDCRCLKCOMP_ch(ch)		Gz(0x0c04, ch) /* RCOMP result register */
-#define GDCRCKPICODE_ch(ch)		Gz(0x0c14, ch) /* PI coding for DDR CLK pins */
-#define GDCRCKLOGICDELAY_ch(ch)		Gz(0x0c18, ch) /* Logic delay of 1 QCLK in CLK slice */
-#define GDDLLFUSE_ch(ch)		Gz(0x0c20, ch) /* Used for fuse download to the DLLs */
-#define GDCRCLKDEBUGMUXCFG_ch(ch)	Gz(0x0c3c, ch) /* Debug MUX control */
-
-#define GDCRCMDDEBUGMUXCFG_Cz_S(ch)	Gz(0x0e3c, ch) /* Debug MUX control */
-
-#define CRCOMPOFST1_ch(ch)		Gz(0x1810, ch) /* DQ, CTL and CLK Offset values */
-
-#define GDCRTRAININGMOD_ch(ch)		Gz(0x3000, ch) /* Data training mode control */
-#define GDCRTRAININGRESULT1_ch(ch)	Gz(0x3004, ch) /* Training results according to PI */
-#define GDCRTRAININGRESULT2_ch(ch)	Gz(0x3008, ch)
-
-#define GDCRCTLRANKSUSED_ch(ch)		Gz(0x3200, ch) /* Indicates which rank is populated */
-#define GDCRCMDCOMP_ch(ch)		Gz(0x3204, ch) /* COMP values register */
-#define GDCRCMDCTLCOMP_ch(ch)		Gz(0x3208, ch) /* COMP values register */
-#define GDCRCMDPICODING_ch(ch)		Gz(0x320c, ch) /* Command and control PI coding */
-
-#define GDCRTRAININGMOD			0x3400 /* Data training mode control register */
-#define GDCRDATACOMP			0x340c /* COMP values register */
-
-#define CRCOMPOFST2			0x3714 /* CMD DRV, SComp and Static Leg controls */
-
-/* MC per-channel registers */
-#define TC_DBP_ch(ch)			Cx(0x4000, ch) /* Timings: BIN */
-#define TC_RAP_ch(ch)			Cx(0x4004, ch) /* Timings: Regular access */
-#define TC_RWP_ch(ch)			Cx(0x4008, ch) /* Timings: Read / Write */
-#define TC_OTHP_ch(ch)			Cx(0x400c, ch) /* Timings: Other parameters */
-#define SCHED_SECOND_CBIT_ch(ch)	Cx(0x401c, ch) /* More chicken bits */
-#define SCHED_CBIT_ch(ch)		Cx(0x4020, ch) /* Chicken bits in scheduler */
-#define SC_ROUNDT_LAT_ch(ch)		Cx(0x4024, ch) /* Round-trip latency per rank */
-#define SC_IO_LATENCY_ch(ch)		Cx(0x4028, ch) /* IO Latency Configuration */
-#define SCRAMBLING_SEED_1_ch(ch)	Cx(0x4034, ch) /* Scrambling seed 1 */
-#define SCRAMBLING_SEED_2_LOW_ch(ch)	Cx(0x4038, ch) /* Scrambling seed 2 low */
-#define SCRAMBLING_SEED_2_HIGH_ch(ch)	Cx(0x403c, ch) /* Scrambling seed 2 high */
-
-/* IOSAV Bytelane Bit-wise error */
-#define IOSAV_By_BW_SERROR_ch(ch, y)	CxLy(0x4040, ch, y)
-
-/* IOSAV Bytelane Bit-wise compare mask */
-#define IOSAV_By_BW_MASK_ch(ch, y)	CxLy(0x4080, ch, y)
-
-/*
- * Defines the number of transactions (non-VC1 RD CAS commands) between two priority ticks.
- * Different counters for transactions that are issued on the ring agents (core or GT) and
- * transactions issued in the SA.
- */
-#define SC_PR_CNT_CONFIG_ch(ch)	Cx(0x40a8, ch)
-#define SC_PCIT_ch(ch)		Cx(0x40ac, ch) /* Page-close idle timer setup - 8 bits */
-#define PM_PDWN_CONFIG_ch(ch)	Cx(0x40b0, ch) /* Power-down (CKE-off) operation config */
-#define ECC_INJECT_COUNT_ch(ch)	Cx(0x40b4, ch) /* ECC error injection count */
-#define ECC_DFT_ch(ch)		Cx(0x40b8, ch) /* ECC DFT features (ECC4ANA, error inject) */
-#define SC_WR_ADD_DELAY_ch(ch)	Cx(0x40d0, ch) /* Extra WR delay to overcome WR-flyby issue */
-
-#define IOSAV_By_BW_SERROR_C_ch(ch, y)	CxLy(0x4140, ch, y) /* IOSAV Bytelane Bit-wise error */
-
-/* IOSAV sub-sequence control registers */
-#define IOSAV_n_SP_CMD_ADDR_ch(ch, y)	CxLy(0x4200, ch, y) /* Special command address. */
-#define IOSAV_n_ADDR_UPD_ch(ch, y)	CxLy(0x4210, ch, y) /* Address update control */
-#define IOSAV_n_SP_CMD_CTL_ch(ch, y)	CxLy(0x4220, ch, y) /* Control of command signals */
-#define IOSAV_n_SUBSEQ_CTL_ch(ch, y)	CxLy(0x4230, ch, y) /* Sub-sequence controls */
-#define IOSAV_n_ADDRESS_LFSR_ch(ch, y)	CxLy(0x4240, ch, y) /* 23-bit LFSR state value */
-
-#define PM_THML_STAT_ch(ch)	Cx(0x4280, ch) /* Thermal status of each rank */
-#define IOSAV_SEQ_CTL_ch(ch)	Cx(0x4284, ch) /* IOSAV sequence level control */
-#define IOSAV_DATA_CTL_ch(ch)	Cx(0x4288, ch) /* Data control in IOSAV mode */
-#define IOSAV_STATUS_ch(ch)	Cx(0x428c, ch) /* State of the IOSAV sequence machine */
-#define TC_ZQCAL_ch(ch)		Cx(0x4290, ch) /* ZQCAL control register */
-#define TC_RFP_ch(ch)		Cx(0x4294, ch) /* Refresh Parameters */
-#define TC_RFTP_ch(ch)		Cx(0x4298, ch) /* Refresh Timing Parameters */
-#define TC_MR2_SHADOW_ch(ch)	Cx(0x429c, ch) /* MR2 shadow - copy of DDR configuration */
-#define MC_INIT_STATE_ch(ch)	Cx(0x42a0, ch) /* IOSAV mode control */
-#define TC_SRFTP_ch(ch)		Cx(0x42a4, ch) /* Self-refresh timing parameters */
-#define IOSAV_ERROR_ch(ch)	Cx(0x42ac, ch) /* Data vector count of the first error */
-#define IOSAV_DC_MASK_ch(ch)	Cx(0x42b0, ch) /* IOSAV data check masking */
-
-#define IOSAV_By_ERROR_COUNT_ch(ch, y)	CxLy(0x4340, ch, y) /* Per-byte 16-bit error count */
-#define IOSAV_G_ERROR_COUNT_ch(ch)	Cx(0x4364, ch) /* Global 16-bit error count */
-
-#define PM_TRML_M_CONFIG_ch(ch)		Cx(0x4380, ch) /* Thermal mode configuration */
-#define PM_CMD_PWR_ch(ch)		Cx(0x4384, ch) /* Power contribution of commands */
-#define PM_BW_LIMIT_CONFIG_ch(ch)	Cx(0x4388, ch) /* Bandwidth throttling on overtemp */
-#define SC_WDBWM_ch(ch)			Cx(0x438c, ch) /* Watermarks and starvation counter */
-
-/* MC Channel Broadcast registers */
-#define TC_DBP			0x4c00 /* Timings: BIN */
-#define TC_RAP			0x4c04 /* Timings: Regular access */
-#define TC_RWP			0x4c08 /* Timings: Read / Write */
-#define TC_OTHP			0x4c0c /* Timings: Other parameters */
-#define SCHED_SECOND_CBIT	0x4c1c /* More chicken bits */
-#define SCHED_CBIT		0x4c20 /* Chicken bits in scheduler */
-#define SC_ROUNDT_LAT		0x4c24 /* Round-trip latency per rank */
-#define SC_IO_LATENCY		0x4c28 /* IO Latency Configuration */
-#define SCRAMBLING_SEED_1	0x4c34 /* Scrambling seed 1 */
-#define SCRAMBLING_SEED_2_LOW	0x4c38 /* Scrambling seed 2 low */
-#define SCRAMBLING_SEED_2_HIGH	0x4c3c /* Scrambling seed 2 high */
-
-#define IOSAV_By_BW_SERROR(y)	Ly(0x4c40, y) /* IOSAV Bytelane Bit-wise error */
-#define IOSAV_By_BW_MASK(y)	Ly(0x4c80, y) /* IOSAV Bytelane Bit-wise compare mask */
-
-/*
- * Defines the number of transactions (non-VC1 RD CAS commands) between two priority ticks.
- * Different counters for transactions that are issued on the ring agents (core or GT) and
- * transactions issued in the SA.
- */
-#define SC_PR_CNT_CONFIG	0x4ca8
-#define SC_PCIT			0x4cac /* Page-close idle timer setup - 8 bits */
-#define PM_PDWN_CONFIG		0x4cb0 /* Power-down (CKE-off) operation config */
-#define ECC_INJECT_COUNT	0x4cb4 /* ECC error injection count */
-#define ECC_DFT			0x4cb8 /* ECC DFT features (ECC4ANA, error inject) */
-#define SC_WR_ADD_DELAY		0x4cd0 /* Extra WR delay to overcome WR-flyby issue */
-
-/* Opportunistic reads configuration during write-major-mode (WMM) */
-#define WMM_READ_CONFIG		0x4cd4 /** WARNING: Only exists on IVB! */
-
-#define IOSAV_By_BW_SERROR_C(y)	Ly(0x4d40, y) /* IOSAV Bytelane Bit-wise error */
-
-#define IOSAV_n_SP_CMD_ADDR(n)	Ly(0x4e00, n) /* Sub-sequence special command address */
-#define IOSAV_n_ADDR_UPD(n)	Ly(0x4e10, n) /* Address update after command execution */
-#define IOSAV_n_SP_CMD_CTL(n)	Ly(0x4e20, n) /* Command signals in sub-sequence command */
-#define IOSAV_n_SUBSEQ_CTL(n)	Ly(0x4e30, n) /* Sub-sequence command parameter control */
-#define IOSAV_n_ADDRESS_LFSR(n)	Ly(0x4e40, n) /* 23-bit LFSR value of the sequence */
-
-#define PM_THML_STAT		0x4e80 /* Thermal status of each rank */
-#define IOSAV_SEQ_CTL		0x4e84 /* IOSAV sequence level control */
-#define IOSAV_DATA_CTL		0x4e88 /* Data control in IOSAV mode */
-#define IOSAV_STATUS		0x4e8c /* State of the IOSAV sequence machine */
-#define TC_ZQCAL		0x4e90 /* ZQCAL control register */
-#define TC_RFP			0x4e94 /* Refresh Parameters */
-#define TC_RFTP			0x4e98 /* Refresh Timing Parameters */
-#define TC_MR2_SHADOW		0x4e9c /* MR2 shadow - copy of DDR configuration */
-#define MC_INIT_STATE		0x4ea0 /* IOSAV mode control */
-#define TC_SRFTP		0x4ea4 /* Self-refresh timing parameters */
-
-/*
- * Auxiliary register in mcmnts synthesis FUB (Functional Unit Block). Additionally, this
- * register is also used to enable IOSAV_n_SP_CMD_ADDR optimization on Ivy Bridge.
- */
-#define MCMNTS_SPARE		0x4ea8 /** WARNING: Reserved, use only on IVB! */
-
-#define IOSAV_ERROR		0x4eac /* Data vector count of the first error */
-#define IOSAV_DC_MASK		0x4eb0 /* IOSAV data check masking */
-
-#define IOSAV_By_ERROR_COUNT(y)	Ly(0x4f40, y) /* Per-byte 16-bit error counter */
-#define IOSAV_G_ERROR_COUNT	0x4f64 /* Global 16-bit error counter */
-
-#define PM_TRML_M_CONFIG	0x4f80 /* Thermal mode configuration */
-#define PM_CMD_PWR		0x4f84 /* Power contribution of commands */
-#define PM_BW_LIMIT_CONFIG	0x4f88 /* Bandwidth throttling on overtemperature */
-#define SC_WDBWM		0x4f8c /* Watermarks and starvation counter config */
-
-#define MAD_CHNL		0x5000 /* Address Decoder Channel Configuration */
-#define MAD_DIMM_CH0		0x5004 /* Address Decode Channel 0 */
-#define MAD_DIMM_CH1		0x5008 /* Address Decode Channel 1 */
-#define MAD_DIMM_CH2		0x500c /* Address Decode Channel 2 (unused on SNB) */
-#define MAD_ZR			0x5014 /* Address Decode Zones */
-#define MCDECS_SPARE		0x5018 /* Spare register in mcdecs synthesis FUB */
-#define MCDECS_CBIT		0x501c /* Chicken bits in mcdecs synthesis FUB */
-
-#define CHANNEL_HASH		0x5024 /** WARNING: Only exists on IVB! */
-
-#define MC_INIT_STATE_G		0x5030 /* High-level behavior in IOSAV mode */
-#define MRC_REVISION		0x5034 /* MRC Revision */
-#define PM_DLL_CONFIG		0x5064 /* Memory Controller I/O DLL config */
-#define RCOMP_TIMER		0x5084 /* RCOMP evaluation timer register */
-
-#define MC_LOCK			0x50fc /* Memory Controlller Lock register */
-
-#define VTD1_BASE		0x5400 /* Base address for IGD */
-#define VTD2_BASE		0x5410 /* Base address for PEG, USB, SATA, etc. */
-#define PAIR_CTL		0x5418 /* Power Aware Interrupt Routing Control */
-
-/* PAVP control register, undocumented. Different from PAVPC on PCI config space. */
-#define MMIO_PAVP_CTL		0x5500 /* Bit 0 locks PAVP settings */
-
-#define MEM_TRML_ESTIMATION_CONFIG	0x5880
-#define MEM_TRML_THRESHOLDS_CONFIG	0x5888
-#define MEM_TRML_INTERRUPT		0x58a8
-
-#define MC_TURBO_PL1		0x59a0 /* Turbo Power Limit 1 parameters */
-#define MC_TURBO_PL2		0x59a4 /* Turbo Power Limit 2 parameters */
-
-#define SSKPD_OK		0x5d10 /* 64-bit scratchpad register */
-#define SSKPD			0x5d14 /* 16bit (scratchpad) */
-#define BIOS_RESET_CPL		0x5da8 /* 8bit */
-
-/* PCODE will sample SAPM-related registers at the end of Phase 4. */
-#define MC_BIOS_REQ		0x5e00 /* Memory frequency request register */
-#define MC_BIOS_DATA		0x5e04 /* Miscellaneous information for BIOS */
-#define SAPMCTL			0x5f00 /* Bit 3 enables DDR EPG (C7i) on IVB */
-#define M_COMP			0x5f08 /* Memory COMP control */
-#define SAPMTIMERS		0x5f10 /* SAPM timers in 10ns (100 MHz) units */
-
-/* WARNING: Only applies to Sandy Bridge! */
-#define BANDTIMERS_SNB		0x5f18 /* MPLL and PPLL time to do self-banding */
-
-/** WARNING: Only applies to Ivy Bridge! */
-#define SAPMTIMERS2_IVB		0x5f18 /** Extra latency for DDRIO EPG exit (C7i) */
-#define BANDTIMERS_IVB		0x5f20 /** MPLL and PPLL time to do self-banding */
+/* As there are many registers, define them on a separate file */
+#include "mchbar_regs.h"
 
 /*
  * EPBAR - Egress Port Root Complex Register Block
  */
 
-#define EPBAR8(x) (*((volatile u8 *)(DEFAULT_EPBAR + (x))))
+#define EPBAR8(x)  (*((volatile u8  *)(DEFAULT_EPBAR + (x))))
 #define EPBAR16(x) (*((volatile u16 *)(DEFAULT_EPBAR + (x))))
 #define EPBAR32(x) (*((volatile u32 *)(DEFAULT_EPBAR + (x))))
 
@@ -388,7 +177,7 @@
  * DMIBAR
  */
 
-#define DMIBAR8(x) (*((volatile u8 *)(DEFAULT_DMIBAR + (x))))
+#define DMIBAR8(x)  (*((volatile u8  *)(DEFAULT_DMIBAR + (x))))
 #define DMIBAR16(x) (*((volatile u16 *)(DEFAULT_DMIBAR + (x))))
 #define DMIBAR32(x) (*((volatile u32 *)(DEFAULT_DMIBAR + (x))))
 
@@ -436,7 +225,6 @@
 #ifndef __ASSEMBLER__
 
 void intel_sandybridge_finalize_smm(void);
-
 int bridge_silicon_revision(void);
 void systemagent_early_init(void);
 void sandybridge_init_iommu(void);
@@ -444,8 +232,7 @@
 void northbridge_romstage_finalize(int s3resume);
 void early_init_dmi(void);
 
-/* mainboard_early_init: Optional mainboard callback run after console init
-   but before raminit. */
+/* mainboard_early_init: Optional callback, run after console init but before raminit. */
 void mainboard_early_init(int s3resume);
 int mainboard_should_reset_usb(int s3resume);
 void perform_raminit(int s3resume);
@@ -454,7 +241,8 @@
 #include <device/device.h>
 
 struct acpi_rsdp;
-unsigned long northbridge_write_acpi_tables(struct device *device, unsigned long start, struct acpi_rsdp *rsdp);
+unsigned long northbridge_write_acpi_tables(struct device *device, unsigned long start,
+					    struct acpi_rsdp *rsdp);
 
 #endif
 #endif