blob: d4f96689f07f8a01af893f040fa46d39281ff4af [file] [log] [blame]
Elyes HAOUASf50b6622020-07-19 14:00:43 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Nick Vaccarob1fa25f2020-01-28 00:31:26 -08002
Nick Vaccarob1fa25f2020-01-28 00:31:26 -08003#include <console/console.h>
4#include <fsp/util.h>
Aamir Bohra555c9b62020-03-23 10:13:10 +05305#include <soc/meminit.h>
Nick Vaccarob1fa25f2020-01-28 00:31:26 -08006#include <string.h>
7
Furquan Shaikhf06d0462020-12-31 21:15:34 -08008#define LP4X_CH_WIDTH 16
9#define LP4X_CHANNELS CHANNEL_COUNT(LP4X_CH_WIDTH)
Furquan Shaikh5b1f3352020-03-26 15:36:19 -070010
Furquan Shaikhf06d0462020-12-31 21:15:34 -080011#define DDR4_CH_WIDTH 64
12#define DDR4_CHANNELS CHANNEL_COUNT(DDR4_CH_WIDTH)
Furquan Shaikh8ebbe172020-04-21 23:19:52 -070013
Furquan Shaikhf06d0462020-12-31 21:15:34 -080014static const struct soc_mem_cfg soc_mem_cfg[] = {
15 [MEM_TYPE_DDR4] = {
16 .num_phys_channels = DDR4_CHANNELS,
17 .phys_to_mrc_map = {
18 [0] = 0,
19 [1] = 4,
20 },
21 .md_phy_masks = {
22 /*
23 * Only physical channel 0 is populated in case of half-populated
24 * configuration.
25 */
26 .half_channel = BIT(0),
27 /* In mixed topologies, channel 0 is always memory-down. */
28 .mixed_topo = BIT(0),
29 },
30 },
31 [MEM_TYPE_LP4X] = {
32 .num_phys_channels = LP4X_CHANNELS,
33 .phys_to_mrc_map = {
34 [0] = 0,
35 [1] = 1,
36 [2] = 2,
37 [3] = 3,
38 [4] = 4,
39 [5] = 5,
40 [6] = 6,
41 [7] = 7,
42 },
43 .md_phy_masks = {
44 /*
45 * Physical channels 0, 1, 2 and 3 are populated in case of
46 * half-populated configurations.
47 */
48 .half_channel = BIT(0) | BIT(1) | BIT(2) | BIT(3),
49 /* LP4x does not support mixed topologies. */
50 },
51 },
Nick Vaccarob1fa25f2020-01-28 00:31:26 -080052};
53
Furquan Shaikhf06d0462020-12-31 21:15:34 -080054static void mem_init_spd_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data)
Furquan Shaikh5b1f3352020-03-26 15:36:19 -070055{
Furquan Shaikhf06d0462020-12-31 21:15:34 -080056 uint32_t *spd_upds[MRC_CHANNELS][CONFIG_DIMMS_PER_CHANNEL] = {
Felix Singer3e3c4562020-12-17 18:34:45 +000057 [0] = { &mem_cfg->MemorySpdPtr000, &mem_cfg->MemorySpdPtr001, },
58 [1] = { &mem_cfg->MemorySpdPtr010, &mem_cfg->MemorySpdPtr011, },
59 [2] = { &mem_cfg->MemorySpdPtr020, &mem_cfg->MemorySpdPtr021, },
60 [3] = { &mem_cfg->MemorySpdPtr030, &mem_cfg->MemorySpdPtr031, },
61 [4] = { &mem_cfg->MemorySpdPtr100, &mem_cfg->MemorySpdPtr101, },
62 [5] = { &mem_cfg->MemorySpdPtr110, &mem_cfg->MemorySpdPtr111, },
63 [6] = { &mem_cfg->MemorySpdPtr120, &mem_cfg->MemorySpdPtr121, },
64 [7] = { &mem_cfg->MemorySpdPtr130, &mem_cfg->MemorySpdPtr131, },
Furquan Shaikhf06d0462020-12-31 21:15:34 -080065 };
66 uint8_t *disable_dimm_upds[MRC_CHANNELS] = {
Felix Singer3e3c4562020-12-17 18:34:45 +000067 &mem_cfg->DisableDimmMc0Ch0,
68 &mem_cfg->DisableDimmMc0Ch1,
69 &mem_cfg->DisableDimmMc0Ch2,
70 &mem_cfg->DisableDimmMc0Ch3,
71 &mem_cfg->DisableDimmMc1Ch0,
72 &mem_cfg->DisableDimmMc1Ch1,
73 &mem_cfg->DisableDimmMc1Ch2,
74 &mem_cfg->DisableDimmMc1Ch3,
Furquan Shaikhf06d0462020-12-31 21:15:34 -080075 };
76 int ch, dimm;
Nick Vaccarob1fa25f2020-01-28 00:31:26 -080077
Furquan Shaikhf06d0462020-12-31 21:15:34 -080078 mem_cfg->MemorySpdDataLen = data->spd_len;
Nick Vaccarob1fa25f2020-01-28 00:31:26 -080079
Furquan Shaikhf06d0462020-12-31 21:15:34 -080080 for (ch = 0; ch < MRC_CHANNELS; ch++) {
81 uint8_t *disable_dimm_ptr = disable_dimm_upds[ch];
82 *disable_dimm_ptr = 0;
Furquan Shaikh5b1f3352020-03-26 15:36:19 -070083
Furquan Shaikhf06d0462020-12-31 21:15:34 -080084 for (dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
85 uint32_t *spd_ptr = spd_upds[ch][dimm];
Furquan Shaikh5b1f3352020-03-26 15:36:19 -070086
Furquan Shaikhf06d0462020-12-31 21:15:34 -080087 *spd_ptr = data->spd[ch][dimm];
88 if (!*spd_ptr)
89 *disable_dimm_ptr |= BIT(dimm);
Varun Joshi97343252020-03-23 13:24:36 -070090 }
91 }
92}
Nick Vaccaro0cc63cc2020-08-05 14:45:58 -070093
Furquan Shaikhf06d0462020-12-31 21:15:34 -080094static void mem_init_dq_dqs_upds(void *upds[MRC_CHANNELS], const void *map, size_t upd_size,
95 const struct mem_channel_data *data)
Nick Vaccaro0cc63cc2020-08-05 14:45:58 -070096{
Furquan Shaikhf06d0462020-12-31 21:15:34 -080097 size_t i;
98
99 for (i = 0; i < MRC_CHANNELS; i++, map += upd_size) {
100 if (channel_is_populated(i, MRC_CHANNELS, data->ch_population_flags))
101 memcpy(upds[i], map, upd_size);
102 else
103 memset(upds[i], 0, upd_size);
104 }
105}
106
107static void mem_init_dq_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
108 const struct mb_cfg *mb_cfg)
109{
110 void *dq_upds[MRC_CHANNELS] = {
Felix Singer3e3c4562020-12-17 18:34:45 +0000111 &mem_cfg->DqMapCpu2DramMc0Ch0,
112 &mem_cfg->DqMapCpu2DramMc0Ch1,
113 &mem_cfg->DqMapCpu2DramMc0Ch2,
114 &mem_cfg->DqMapCpu2DramMc0Ch3,
115 &mem_cfg->DqMapCpu2DramMc1Ch0,
116 &mem_cfg->DqMapCpu2DramMc1Ch1,
117 &mem_cfg->DqMapCpu2DramMc1Ch2,
118 &mem_cfg->DqMapCpu2DramMc1Ch3,
Furquan Shaikhf06d0462020-12-31 21:15:34 -0800119 };
120
Felix Singer3e3c4562020-12-17 18:34:45 +0000121 const size_t upd_size = sizeof(mem_cfg->DqMapCpu2DramMc0Ch0);
Furquan Shaikhf06d0462020-12-31 21:15:34 -0800122
Arthur Heymans10c43d82022-03-24 01:04:25 +0100123 _Static_assert(sizeof(mem_cfg->DqMapCpu2DramMc0Ch0) == CONFIG_MRC_CHANNEL_WIDTH,
124 "Incorrect DQ UPD size!");
Furquan Shaikhf06d0462020-12-31 21:15:34 -0800125
126 mem_init_dq_dqs_upds(dq_upds, mb_cfg->dq_map, upd_size, data);
127}
128
129static void mem_init_dqs_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
130 const struct mb_cfg *mb_cfg)
131{
132 void *dqs_upds[MRC_CHANNELS] = {
Felix Singer3e3c4562020-12-17 18:34:45 +0000133 &mem_cfg->DqsMapCpu2DramMc0Ch0,
134 &mem_cfg->DqsMapCpu2DramMc0Ch1,
135 &mem_cfg->DqsMapCpu2DramMc0Ch2,
136 &mem_cfg->DqsMapCpu2DramMc0Ch3,
137 &mem_cfg->DqsMapCpu2DramMc1Ch0,
138 &mem_cfg->DqsMapCpu2DramMc1Ch1,
139 &mem_cfg->DqsMapCpu2DramMc1Ch2,
140 &mem_cfg->DqsMapCpu2DramMc1Ch3,
Furquan Shaikhf06d0462020-12-31 21:15:34 -0800141 };
142
Felix Singer3e3c4562020-12-17 18:34:45 +0000143 const size_t upd_size = sizeof(mem_cfg->DqsMapCpu2DramMc0Ch0);
Furquan Shaikhf06d0462020-12-31 21:15:34 -0800144
Arthur Heymans10c43d82022-03-24 01:04:25 +0100145 _Static_assert(sizeof(mem_cfg->DqsMapCpu2DramMc0Ch0) == CONFIG_MRC_CHANNEL_WIDTH / 8,
146 "Incorrect DQS UPD size!");
Furquan Shaikhf06d0462020-12-31 21:15:34 -0800147
148 mem_init_dq_dqs_upds(dqs_upds, mb_cfg->dqs_map, upd_size, data);
149}
150
Subrata Banik2eb51aa2022-03-10 17:53:14 +0530151void memcfg_init(FSPM_UPD *memupd, const struct mb_cfg *mb_cfg,
Furquan Shaikhf06d0462020-12-31 21:15:34 -0800152 const struct mem_spd *spd_info, bool half_populated)
153{
154 struct mem_channel_data data;
Subrata Banik2eb51aa2022-03-10 17:53:14 +0530155 FSP_M_CONFIG *mem_cfg = &memupd->FspmConfig;
Furquan Shaikhf06d0462020-12-31 21:15:34 -0800156
157 if (mb_cfg->type >= ARRAY_SIZE(soc_mem_cfg))
158 die("Invalid memory type(%x)!\n", mb_cfg->type);
159
Subrata Banik47b836a2022-03-10 17:21:33 +0530160 mem_populate_channel_data(memupd, &soc_mem_cfg[mb_cfg->type], spd_info, half_populated,
Subrata Banik4703edc2022-03-10 19:12:02 +0530161 &data);
Furquan Shaikhf06d0462020-12-31 21:15:34 -0800162 mem_init_spd_upds(mem_cfg, &data);
163 mem_init_dq_upds(mem_cfg, &data, mb_cfg);
164 mem_init_dqs_upds(mem_cfg, &data, mb_cfg);
165
166 mem_cfg->ECT = mb_cfg->ect;
167
168 switch (mb_cfg->type) {
169 case MEM_TYPE_DDR4:
170 mem_cfg->DqPinsInterleaved = mb_cfg->ddr4_config.dq_pins_interleaved;
Nick Vaccaro0cc63cc2020-08-05 14:45:58 -0700171 break;
Furquan Shaikhf06d0462020-12-31 21:15:34 -0800172 case MEM_TYPE_LP4X:
173 /* LPDDR4x does not allow interleaved memory */
174 mem_cfg->DqPinsInterleaved = 0;
Nick Vaccaro0cc63cc2020-08-05 14:45:58 -0700175 break;
176 default:
Furquan Shaikhf06d0462020-12-31 21:15:34 -0800177 die("Unsupported memory type(%d)\n", mb_cfg->type);
Nick Vaccaro0cc63cc2020-08-05 14:45:58 -0700178 }
Furquan Shaikhf06d0462020-12-31 21:15:34 -0800179
Nick Vaccaro0cc63cc2020-08-05 14:45:58 -0700180}