blob: 35eedcb91afc0e59c1309bc67135d6fafe776591 [file] [log] [blame]
Matt Papageorgea21eae02019-11-13 17:00:12 -06001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <acpi/acpi.h>
Jason Glenesk276e8652020-12-10 04:22:56 -08004#include <acpi/acpi_crat.h>
Jason Glenesk1916f892020-07-24 02:51:30 -07005#include <acpi/acpi_ivrs.h>
Raul E Rangel4969b4d2021-09-13 08:40:45 -06006#include <arch/cpu.h>
Jason Glenesk276e8652020-12-10 04:22:56 -08007#include <cpu/amd/cpuid.h>
8#include <cpu/amd/msr.h>
Matt Papageorgea21eae02019-11-13 17:00:12 -06009#include <FspGuids.h>
10#include <soc/acpi.h>
11#include <stdint.h>
Felix Heldf3e268b2021-05-04 21:02:40 +020012#include <device/device.h>
Jason Glenesk1916f892020-07-24 02:51:30 -070013#include <device/pci_def.h>
14#include <device/pci_ops.h>
Felix Held245adca2021-05-04 20:01:46 +020015#include <amdblocks/acpi.h>
Felix Helddd2f3fa2021-02-08 22:23:54 +010016#include <amdblocks/cpu.h>
Felix Helddba3fe72021-02-13 01:05:56 +010017#include <amdblocks/data_fabric.h>
Felix Held604ffa62021-02-12 00:43:20 +010018#include <amdblocks/ioapic.h>
Jason Glenesk276e8652020-12-10 04:22:56 -080019#include <soc/data_fabric.h>
Jason Glenesk1916f892020-07-24 02:51:30 -070020#include <soc/pci_devs.h>
Jason Glenesk1916f892020-07-24 02:51:30 -070021#include <arch/mmio.h>
Matt Papageorgea21eae02019-11-13 17:00:12 -060022
Jason Glenesk276e8652020-12-10 04:22:56 -080023static unsigned long gen_crat_hsa_entry(struct acpi_crat_header *crat, unsigned long current)
24{
25 struct crat_hsa_processing_unit *hsa_entry = (struct crat_hsa_processing_unit *)current;
26 memset(hsa_entry, 0, sizeof(struct crat_hsa_processing_unit));
27
28 hsa_entry->flags = CRAT_HSA_PR_FLAG_EN | CRAT_HSA_PR_FLAG_CPU_PRES;
29 hsa_entry->wave_front_size = 4;
30 hsa_entry->num_cpu_cores = get_cpu_count();
31 hsa_entry->length = sizeof(struct crat_hsa_processing_unit);
32 crat->total_entries++;
33
34 current += hsa_entry->length;
35 return current;
36}
37
38static unsigned long create_crat_memory_entry(uint32_t domain, uint64_t region_base,
39 uint64_t region_size, unsigned long current)
40{
41 struct crat_memory *mem_affinity = (struct crat_memory *)current;
42 memset(mem_affinity, 0, sizeof(struct crat_memory));
43
44 mem_affinity->type = CRAT_MEMORY_TYPE;
45 mem_affinity->length = sizeof(struct crat_memory);
46 mem_affinity->proximity_domain = 0;
47 mem_affinity->base_address_low = region_base & 0xffffffff;
48 mem_affinity->base_address_high = (region_base >> 32) & 0xffffffff;
49 mem_affinity->length_low = region_size & 0xffffffff;
50 mem_affinity->length_high = (region_size >> 32) & 0xffffffff;
51 mem_affinity->flags = CRAT_MEM_FLAG_EN;
52 mem_affinity->width = 64;
53
54 current += mem_affinity->length;
55 return current;
56}
57
58static unsigned long gen_crat_memory_entries(struct acpi_crat_header *crat,
59 unsigned long current)
60{
61 uint32_t dram_base_reg, dram_limit_reg, dram_hole_ctl;
62 uint64_t memory_length, memory_base, hole_base, size_below_hole;
63 size_t new_entries = 0;
64
65 for (size_t dram_map_idx = 0; dram_map_idx < PICASSO_NUM_DRAM_REG;
66 dram_map_idx++) {
67 dram_base_reg =
Felix Held29148b92021-01-29 02:37:33 +010068 data_fabric_read32(0, DF_DRAM_BASE(dram_map_idx), IOMS0_FABRIC_ID);
Jason Glenesk276e8652020-12-10 04:22:56 -080069
70 if (dram_base_reg & DRAM_BASE_REG_VALID) {
Felix Held29148b92021-01-29 02:37:33 +010071 dram_limit_reg = data_fabric_read32(0, DF_DRAM_LIMIT(dram_map_idx),
Jason Glenesk276e8652020-12-10 04:22:56 -080072 IOMS0_FABRIC_ID);
73 memory_length =
74 ((dram_limit_reg & DRAM_LIMIT_ADDR) >> DRAM_LIMIT_ADDR_SHFT) + 1
75 - ((dram_base_reg & DRAM_BASE_ADDR) >> DRAM_BASE_ADDR_SHFT);
76 memory_length = memory_length << 28;
Felix Held7a92e382021-02-16 00:30:15 +010077 memory_base = (uint64_t)(dram_base_reg & DRAM_BASE_ADDR)
Jason Glenesk276e8652020-12-10 04:22:56 -080078 << (28 - DRAM_BASE_ADDR_SHFT);
79
80 if (memory_base == 0) {
81 current =
82 create_crat_memory_entry(0, 0ull, 0xa0000ull, current);
Felix Held6ebcdf32021-10-12 21:39:27 +020083 memory_base = 1 * MiB;
Jason Glenesk276e8652020-12-10 04:22:56 -080084 memory_length = memory_base;
85 new_entries++;
86 }
87
88 if (dram_base_reg & DRAM_BASE_HOLE_EN) {
Felix Held29148b92021-01-29 02:37:33 +010089 dram_hole_ctl = data_fabric_read32(0, D18F0_DRAM_HOLE_CTL,
Jason Glenesk276e8652020-12-10 04:22:56 -080090 IOMS0_FABRIC_ID);
91 hole_base = (dram_hole_ctl & DRAM_HOLE_CTL_BASE);
92 size_below_hole = hole_base - memory_base;
93 current = create_crat_memory_entry(0, memory_base,
94 size_below_hole, current);
Felix Held7a92e382021-02-16 00:30:15 +010095 memory_length = (uint64_t)(((dram_limit_reg & DRAM_LIMIT_ADDR)
Jason Glenesk276e8652020-12-10 04:22:56 -080096 >> DRAM_LIMIT_ADDR_SHFT)
97 + 1 - 0x10)
98 << 28;
99 memory_base = 0x100000000;
100 new_entries++;
101 }
102
103 current = create_crat_memory_entry(0, memory_base, memory_length,
104 current);
105 new_entries++;
106 }
107 }
108 crat->total_entries += new_entries;
109 return current;
110}
111
112static unsigned long add_crat_cache_entry(struct crat_cache **cache_affinity,
113 unsigned long current)
114{
115 *cache_affinity = (struct crat_cache *)current;
116 memset(*cache_affinity, 0, sizeof(struct crat_cache));
117
118 (*cache_affinity)->type = CRAT_CACHE_TYPE;
119 (*cache_affinity)->length = sizeof(struct crat_cache);
120 (*cache_affinity)->flags = CRAT_CACHE_FLAG_EN | CRAT_CACHE_FLAG_CPU_CACHE;
121
122 current += sizeof(struct crat_cache);
123 return current;
124}
125
126static uint8_t get_associativity(uint32_t encoded_associativity)
127{
128 uint8_t associativity = 0;
129
130 switch (encoded_associativity) {
131 case 0:
132 case 1:
133 case 2:
134 case 3:
135 case 4:
136 return encoded_associativity;
137 case 5:
138 associativity = 6;
139 break;
140 case 6:
141 associativity = 8;
142 break;
143 case 8:
144 associativity = 16;
145 break;
146 case 0xA:
147 associativity = 32;
148 break;
149 case 0xB:
150 associativity = 48;
151 break;
152 case 0xC:
153 associativity = 64;
154 break;
155 case 0xD:
156 associativity = 96;
157 break;
158 case 0xE:
159 associativity = 128;
160 break;
161 case 0xF:
162 associativity = 0xFF;
163 break;
164 default:
165 return 0;
166 }
167
168 return associativity;
169}
170
171static unsigned long gen_crat_cache_entry(struct acpi_crat_header *crat, unsigned long current)
172{
173 size_t total_num_threads, num_threads_sharing0, num_threads_sharing1,
174 num_threads_sharing2, num_threads_sharing3, thread, new_entries;
175 struct cpuid_result cache_props0, cache_props1, cache_props2, cache_props3;
176 uint8_t sibling_mask = 0;
177 uint32_t l1_data_cache_ids, l1_inst_cache_ids, l2_cache_ids, l3_cache_ids;
178 struct crat_cache *cache_affinity = NULL;
179
180 total_num_threads = get_cpu_count();
181
182 cache_props0 = cpuid_ext(CPUID_CACHE_PROPS, CACHE_PROPS_0);
183 cache_props1 = cpuid_ext(CPUID_CACHE_PROPS, CACHE_PROPS_1);
184 cache_props2 = cpuid_ext(CPUID_CACHE_PROPS, CACHE_PROPS_2);
185 cache_props3 = cpuid_ext(CPUID_CACHE_PROPS, CACHE_PROPS_3);
186
187 l1_data_cache_ids = cpuid_ecx(CPUID_L1_TLB_CACHE_IDS);
188 l1_inst_cache_ids = cpuid_edx(CPUID_L1_TLB_CACHE_IDS);
189 l2_cache_ids = cpuid_ecx(CPUID_L2_L3_CACHE_L2_TLB_IDS);
190 l3_cache_ids = cpuid_edx(CPUID_L2_L3_CACHE_L2_TLB_IDS);
191
192 num_threads_sharing0 =
193 ((cache_props0.eax & NUM_SHARE_CACHE_MASK) >> NUM_SHARE_CACHE_SHFT) + 1;
194 num_threads_sharing1 =
195 ((cache_props1.eax & NUM_SHARE_CACHE_MASK) >> NUM_SHARE_CACHE_SHFT) + 1;
196 num_threads_sharing2 =
197 ((cache_props2.eax & NUM_SHARE_CACHE_MASK) >> NUM_SHARE_CACHE_SHFT) + 1;
198 num_threads_sharing3 =
199 ((cache_props3.eax & NUM_SHARE_CACHE_MASK) >> NUM_SHARE_CACHE_SHFT) + 1;
200
201 new_entries = 0;
202 for (thread = 0; thread < total_num_threads; thread++) {
203 /* L1 data cache */
204 if (thread % num_threads_sharing0 == 0) {
205 current = add_crat_cache_entry(&cache_affinity, current);
206 new_entries++;
207
208 cache_affinity->flags |= CRAT_CACHE_FLAG_DATA_CACHE;
209 cache_affinity->proc_id_low = thread;
210 sibling_mask = 1;
211 for (size_t sibling = 1; sibling < num_threads_sharing0; sibling++)
212 sibling_mask = (sibling_mask << 1) + 1;
213 cache_affinity->sibling_map[thread / 8] = sibling_mask << (thread % 8);
214 cache_affinity->cache_properties =
215 (cache_props0.edx & CACHE_INCLUSIVE_MASK) ? 2 : 0;
216 cache_affinity->cache_size =
217 (l1_data_cache_ids & L1_DC_SIZE_MASK) >> L1_DC_SIZE_SHFT;
218 cache_affinity->cache_level = CRAT_L1_CACHE;
219 cache_affinity->lines_per_tag =
220 (l1_data_cache_ids & L1_DC_LINE_TAG_MASK)
221 >> L1_DC_LINE_TAG_SHFT;
222 cache_affinity->cache_line_size =
223 (l1_data_cache_ids & L1_DC_LINE_SIZE_MASK)
224 >> L1_DC_LINE_SIZE_SHFT;
225 cache_affinity->associativity =
226 (l1_data_cache_ids & L1_DC_ASSOC_MASK) >> L1_DC_ASSOC_SHFT;
227 cache_affinity->cache_latency = 1;
228 }
229
230 /* L1 instruction cache */
231 if (thread % num_threads_sharing1 == 0) {
232 current = add_crat_cache_entry(&cache_affinity, current);
233 new_entries++;
234
235 cache_affinity->flags |= CRAT_CACHE_FLAG_INSTR_CACHE;
236 cache_affinity->proc_id_low = thread;
237 sibling_mask = 1;
238 for (size_t sibling = 1; sibling < num_threads_sharing1; sibling++)
239 sibling_mask = (sibling_mask << 1) + 1;
240 cache_affinity->sibling_map[thread / 8] = sibling_mask << (thread % 8);
241 cache_affinity->cache_properties =
242 (cache_props1.edx & CACHE_INCLUSIVE_MASK) ? 2 : 0;
243 cache_affinity->cache_size =
244 (l1_inst_cache_ids & L1_IC_SIZE_MASK) >> L1_IC_SIZE_SHFT;
245 cache_affinity->cache_level = CRAT_L1_CACHE;
246 cache_affinity->lines_per_tag =
247 (l1_inst_cache_ids & L1_IC_LINE_TAG_MASK)
248 >> L1_IC_LINE_TAG_SHFT;
249 cache_affinity->cache_line_size =
250 (l1_inst_cache_ids & L1_IC_LINE_SIZE_MASK)
251 >> L1_IC_LINE_SIZE_SHFT;
252 cache_affinity->associativity =
253 (l1_inst_cache_ids & L1_IC_ASSOC_MASK) >> L1_IC_ASSOC_SHFT;
254 cache_affinity->cache_latency = 1;
255 }
256
257 /* L2 cache */
258 if (thread % num_threads_sharing2 == 0) {
259 current = add_crat_cache_entry(&cache_affinity, current);
260 new_entries++;
261
262 cache_affinity->flags |=
263 CRAT_CACHE_FLAG_DATA_CACHE | CRAT_CACHE_FLAG_INSTR_CACHE;
264 cache_affinity->proc_id_low = thread;
265 sibling_mask = 1;
266 for (size_t sibling = 1; sibling < num_threads_sharing2; sibling++)
267 sibling_mask = (sibling_mask << 1) + 1;
268 cache_affinity->sibling_map[thread / 8] = sibling_mask << (thread % 8);
269 cache_affinity->cache_properties =
270 (cache_props2.edx & CACHE_INCLUSIVE_MASK) ? 2 : 0;
271 cache_affinity->cache_size =
272 (l2_cache_ids & L2_DC_SIZE_MASK) >> L2_DC_SIZE_SHFT;
273 cache_affinity->cache_level = CRAT_L2_CACHE;
274 cache_affinity->lines_per_tag =
275 (l2_cache_ids & L2_DC_LINE_TAG_MASK) >> L2_DC_LINE_TAG_SHFT;
276 cache_affinity->cache_line_size =
277 (l2_cache_ids & L2_DC_LINE_SIZE_MASK) >> L2_DC_LINE_SIZE_SHFT;
278 cache_affinity->associativity = get_associativity(
279 (l2_cache_ids & L2_DC_ASSOC_MASK) >> L2_DC_ASSOC_SHFT);
280 cache_affinity->cache_latency = 1;
281 }
282
283 /* L3 cache */
284 if (thread % num_threads_sharing3 == 0) {
285 current = add_crat_cache_entry(&cache_affinity, current);
286 new_entries++;
287
288 cache_affinity->flags |=
289 CRAT_CACHE_FLAG_DATA_CACHE | CRAT_CACHE_FLAG_INSTR_CACHE;
290 cache_affinity->proc_id_low = thread;
291 sibling_mask = 1;
292 for (size_t sibling = 1; sibling < num_threads_sharing3; sibling++)
293 sibling_mask = (sibling_mask << 1) + 1;
294 cache_affinity->sibling_map[thread / 8] = sibling_mask << (thread % 8);
295 cache_affinity->cache_properties =
296 (cache_props0.edx & CACHE_INCLUSIVE_MASK) ? 2 : 0;
297 cache_affinity->cache_size =
298 ((l3_cache_ids & L3_DC_SIZE_MASK) >> L3_DC_SIZE_SHFT) * 512;
299 cache_affinity->cache_level = CRAT_L3_CACHE;
300 cache_affinity->lines_per_tag =
301 (l3_cache_ids & L3_DC_LINE_TAG_MASK) >> L3_DC_LINE_TAG_SHFT;
302 cache_affinity->cache_line_size =
303 (l3_cache_ids & L3_DC_LINE_SIZE_MASK) >> L3_DC_LINE_SIZE_SHFT;
304 cache_affinity->associativity = get_associativity(
305 (l3_cache_ids & L3_DC_ASSOC_MASK) >> L3_DC_ASSOC_SHFT);
306 cache_affinity->cache_latency = 1;
307 }
308 }
309 crat->total_entries += new_entries;
310 return current;
311}
312
313static uint8_t get_tlb_size(enum tlb_type type, struct crat_tlb *crat_tlb_entry,
314 uint16_t raw_assoc_size)
315{
316 uint8_t tlbsize;
317
318 if (raw_assoc_size >= 256) {
319 tlbsize = (uint8_t)(raw_assoc_size / 256);
320
321 if (type == tlb_2m)
322 crat_tlb_entry->flags |= CRAT_TLB_FLAG_2MB_BASE_256;
323 else if (type == tlb_4k)
324 crat_tlb_entry->flags |= CRAT_TLB_FLAG_4K_BASE_256;
325 else if (type == tlb_1g)
326 crat_tlb_entry->flags |= CRAT_TLB_FLAG_1GB_BASE_256;
327 } else {
328 tlbsize = (uint8_t)(raw_assoc_size);
329 }
330 return tlbsize;
331}
332
333static unsigned long add_crat_tlb_entry(struct crat_tlb **tlb_affinity, unsigned long current)
334{
335 *tlb_affinity = (struct crat_tlb *)current;
336 memset(*tlb_affinity, 0, sizeof(struct crat_tlb));
337
338 (*tlb_affinity)->type = CRAT_TLB_TYPE;
339 (*tlb_affinity)->length = sizeof(struct crat_tlb);
340 (*tlb_affinity)->flags = CRAT_TLB_FLAG_EN | CRAT_TLB_FLAG_CPU_TLB;
341
342 current += sizeof(struct crat_tlb);
343 return current;
344}
345
346static unsigned long gen_crat_tlb_entry(struct acpi_crat_header *crat, unsigned long current)
347{
348 size_t total_num_threads, num_threads_sharing0, num_threads_sharing1,
349 num_threads_sharing2, thread, new_entries;
350 struct cpuid_result cache_props0, cache_props1, cache_props2;
351 uint8_t sibling_mask = 0;
352 uint32_t l1_tlb_2M4M_ids, l1_tlb_4K_ids, l2_tlb_2M4M_ids, l2_tlb_4K_ids, l1_tlb_1G_ids,
353 l2_tlb_1G_ids;
354 struct crat_tlb *tlb_affinity = NULL;
355
356 total_num_threads = get_cpu_count();
357 cache_props0 = cpuid_ext(CPUID_CACHE_PROPS, CACHE_PROPS_0);
358 cache_props1 = cpuid_ext(CPUID_CACHE_PROPS, CACHE_PROPS_1);
359 cache_props2 = cpuid_ext(CPUID_CACHE_PROPS, CACHE_PROPS_2);
360
361 l1_tlb_2M4M_ids = cpuid_eax(CPUID_L1_TLB_CACHE_IDS);
362 l2_tlb_2M4M_ids = cpuid_eax(CPUID_L2_L3_CACHE_L2_TLB_IDS);
363 l1_tlb_4K_ids = cpuid_ebx(CPUID_L1_TLB_CACHE_IDS);
364 l2_tlb_4K_ids = cpuid_ebx(CPUID_L2_L3_CACHE_L2_TLB_IDS);
365 l1_tlb_1G_ids = cpuid_eax(CPUID_TLB_L1L2_1G_IDS);
366 l2_tlb_1G_ids = cpuid_ebx(CPUID_TLB_L1L2_1G_IDS);
367
368 num_threads_sharing0 =
369 ((cache_props0.eax & NUM_SHARE_CACHE_MASK) >> NUM_SHARE_CACHE_SHFT) + 1;
370 num_threads_sharing1 =
371 ((cache_props1.eax & NUM_SHARE_CACHE_MASK) >> NUM_SHARE_CACHE_SHFT) + 1;
372 num_threads_sharing2 =
373 ((cache_props2.eax & NUM_SHARE_CACHE_MASK) >> NUM_SHARE_CACHE_SHFT) + 1;
374
375 new_entries = 0;
376 for (thread = 0; thread < total_num_threads; thread++) {
377
378 /* L1 data TLB */
379 if (thread % num_threads_sharing0 == 0) {
380 current = add_crat_tlb_entry(&tlb_affinity, current);
381 new_entries++;
382
383 tlb_affinity->flags |= CRAT_TLB_FLAG_DATA_TLB;
384 tlb_affinity->proc_id_low = thread;
385 sibling_mask = 1;
386 for (size_t sibling = 1; sibling < num_threads_sharing0; sibling++)
387 sibling_mask = (sibling_mask << 1) + 1;
388 tlb_affinity->sibling_map[thread / 8] = sibling_mask << (thread % 8);
389 tlb_affinity->tlb_level = CRAT_L1_CACHE;
390
391 tlb_affinity->data_tlb_2mb_assoc =
392 (l1_tlb_2M4M_ids & L1_DAT_TLB_2M4M_ASSOC_MASK)
393 >> L1_DAT_TLB_2M4M_ASSOC_SHFT;
394 tlb_affinity->data_tlb_2mb_size =
395 get_tlb_size(tlb_2m, tlb_affinity,
396 (l1_tlb_2M4M_ids & L1_DAT_TLB_2M4M_SIZE_MASK)
397 >> L1_DAT_TLB_2M4M_SIZE_SHFT);
398
399 tlb_affinity->data_tlb_4k_assoc =
400 (l1_tlb_4K_ids & L1_DAT_TLB_4K_ASSOC_MASK)
401 >> L1_DAT_TLB_4K_ASSOC_SHFT;
402 tlb_affinity->data_tlb_4k_size =
403 get_tlb_size(tlb_4k, tlb_affinity,
404 (l1_tlb_4K_ids & L1_DAT_TLB_4K_SIZE_MASK)
405 >> L1_DAT_TLB_4K_SIZE_SHFT);
406
407 tlb_affinity->data_tlb_1g_assoc =
408 (l1_tlb_1G_ids & L1_DAT_TLB_1G_ASSOC_MASK)
409 >> L1_DAT_TLB_1G_ASSOC_SHFT;
410 tlb_affinity->data_tlb_1g_size =
411 get_tlb_size(tlb_1g, tlb_affinity,
412 (l1_tlb_1G_ids & L1_DAT_TLB_1G_SIZE_MASK)
413 >> L1_DAT_TLB_1G_SIZE_SHFT);
414 }
415
416 /* L1 instruction TLB */
417 if (thread % num_threads_sharing1 == 0) {
418 current = add_crat_tlb_entry(&tlb_affinity, current);
419 new_entries++;
420
421 tlb_affinity->flags |= CRAT_TLB_FLAG_INSTR_TLB;
422 tlb_affinity->proc_id_low = thread;
423 sibling_mask = 1;
424 for (size_t sibling = 1; sibling < num_threads_sharing1; sibling++)
425 sibling_mask = (sibling_mask << 1) + 1;
426 tlb_affinity->sibling_map[thread / 8] = sibling_mask << (thread % 8);
427 tlb_affinity->tlb_level = CRAT_L1_CACHE;
428 tlb_affinity->instr_tlb_2mb_assoc =
429 (l1_tlb_2M4M_ids & L1_INST_TLB_2M4M_ASSOC_MASK)
430 >> L1_INST_TLB_2M4M_ASSOC_SHFT;
431 tlb_affinity->instr_tlb_2mb_size =
432 get_tlb_size(tlb_2m, tlb_affinity,
433 (l1_tlb_2M4M_ids & L1_INST_TLB_2M4M_SIZE_MASK)
434 >> L1_INST_TLB_2M4M_SIZE_SHFT);
435
436 tlb_affinity->instr_tlb_4k_assoc =
437 (l1_tlb_4K_ids & L1_INST_TLB_4K_ASSOC_MASK)
438 >> L1_INST_TLB_4K_ASSOC_SHFT;
439 tlb_affinity->instr_tlb_4k_size =
440 get_tlb_size(tlb_4k, tlb_affinity,
441 (l1_tlb_4K_ids & L1_INST_TLB_4K_SIZE_MASK)
442 >> L1_INST_TLB_4K_SIZE_SHFT);
443
444 tlb_affinity->instr_tlb_1g_assoc =
445 (l1_tlb_1G_ids & L1_INST_TLB_1G_ASSOC_MASK)
446 >> L1_INST_TLB_1G_ASSOC_SHFT;
447 tlb_affinity->instr_tlb_1g_size =
448 get_tlb_size(tlb_1g, tlb_affinity,
449 (l1_tlb_1G_ids & L1_INST_TLB_1G_SIZE_MASK)
450 >> L1_INST_TLB_1G_SIZE_SHFT);
451 }
452
453 /* L2 Data TLB */
454 if (thread % num_threads_sharing2 == 0) {
455 current = add_crat_tlb_entry(&tlb_affinity, current);
456 new_entries++;
457
458 tlb_affinity->flags |= CRAT_TLB_FLAG_DATA_TLB;
459 tlb_affinity->proc_id_low = thread;
460 sibling_mask = 1;
461 for (size_t sibling = 1; sibling < num_threads_sharing2; sibling++)
462 sibling_mask = (sibling_mask << 1) + 1;
463 tlb_affinity->sibling_map[thread / 8] = sibling_mask << (thread % 8);
464 tlb_affinity->tlb_level = CRAT_L2_CACHE;
465 tlb_affinity->data_tlb_2mb_assoc =
466 (l2_tlb_2M4M_ids & L2_DAT_TLB_2M4M_ASSOC_MASK)
467 >> L2_DAT_TLB_2M4M_ASSOC_SHFT;
468 tlb_affinity->data_tlb_2mb_size =
469 get_tlb_size(tlb_2m, tlb_affinity,
470 (l2_tlb_2M4M_ids & L2_DAT_TLB_2M4M_SIZE_MASK)
471 >> L2_DAT_TLB_2M4M_SIZE_SHFT);
472
473 tlb_affinity->data_tlb_4k_assoc =
474 get_associativity((l2_tlb_4K_ids & L2_DAT_TLB_2M4M_ASSOC_MASK)
475 >> L2_DAT_TLB_4K_ASSOC_SHFT);
476 tlb_affinity->data_tlb_4k_size =
477 get_tlb_size(tlb_4k, tlb_affinity,
478 (l2_tlb_2M4M_ids & L2_DAT_TLB_4K_SIZE_MASK)
479 >> L2_DAT_TLB_4K_SIZE_SHFT);
480
481 tlb_affinity->data_tlb_1g_assoc =
482 get_associativity((l2_tlb_1G_ids & L2_DAT_TLB_1G_ASSOC_MASK)
483 >> L2_DAT_TLB_1G_ASSOC_SHFT);
484 tlb_affinity->data_tlb_1g_size =
485 get_tlb_size(tlb_1g, tlb_affinity,
486 (l2_tlb_1G_ids & L2_DAT_TLB_1G_SIZE_MASK)
487 >> L2_DAT_TLB_1G_SIZE_SHFT);
488 }
489
490 /* L2 Instruction TLB */
491 if (thread % num_threads_sharing2 == 0) {
492 current = add_crat_tlb_entry(&tlb_affinity, current);
493 new_entries++;
494
495 tlb_affinity->flags |= CRAT_TLB_FLAG_INSTR_TLB;
496 tlb_affinity->proc_id_low = thread;
497 sibling_mask = 1;
498 for (size_t sibling = 1; sibling < num_threads_sharing2; sibling++)
499 sibling_mask = (sibling_mask << 1) + 1;
500 tlb_affinity->sibling_map[thread / 8] = sibling_mask << (thread % 8);
501 tlb_affinity->tlb_level = CRAT_L2_CACHE;
502 tlb_affinity->instr_tlb_2mb_assoc = get_associativity(
503 (l2_tlb_2M4M_ids & L2_INST_TLB_2M4M_ASSOC_MASK)
504 >> L2_INST_TLB_2M4M_ASSOC_SHFT);
505 tlb_affinity->instr_tlb_2mb_size =
506 get_tlb_size(tlb_2m, tlb_affinity,
507 (l2_tlb_2M4M_ids & L2_INST_TLB_2M4M_SIZE_MASK)
508 >> L2_INST_TLB_2M4M_SIZE_SHFT);
509
510 tlb_affinity->instr_tlb_4k_assoc =
511 get_associativity((l2_tlb_4K_ids & L2_INST_TLB_4K_ASSOC_MASK)
512 >> L2_INST_TLB_4K_ASSOC_SHFT);
513 tlb_affinity->instr_tlb_4k_size =
514 get_tlb_size(tlb_4k, tlb_affinity,
515 (l2_tlb_4K_ids & L2_INST_TLB_4K_SIZE_MASK)
516 >> L2_INST_TLB_4K_SIZE_SHFT);
517
518 tlb_affinity->instr_tlb_1g_assoc =
519 get_associativity((l2_tlb_1G_ids & L2_INST_TLB_1G_ASSOC_MASK)
520 >> L2_INST_TLB_1G_ASSOC_SHFT);
521 tlb_affinity->instr_tlb_1g_size =
522 get_tlb_size(tlb_1g, tlb_affinity,
523 (l2_tlb_1G_ids & L2_INST_TLB_1G_SIZE_MASK)
524 >> L2_INST_TLB_1G_SIZE_SHFT);
525 }
526 }
527
528 crat->total_entries += new_entries;
529 return current;
530}
531
532static unsigned long acpi_fill_crat(struct acpi_crat_header *crat, unsigned long current)
533{
534 current = gen_crat_hsa_entry(crat, current);
535 current = gen_crat_memory_entries(crat, current);
536 current = gen_crat_cache_entry(crat, current);
537 current = gen_crat_tlb_entry(crat, current);
538 crat->num_nodes++;
539
540 return current;
541}
542
Matt Papageorgea21eae02019-11-13 17:00:12 -0600543uintptr_t agesa_write_acpi_tables(const struct device *device, uintptr_t current,
544 acpi_rsdp_t *rsdp)
545{
Jason Glenesk1916f892020-07-24 02:51:30 -0700546 acpi_ivrs_t *ivrs;
Jason Glenesk276e8652020-12-10 04:22:56 -0800547 struct acpi_crat_header *crat;
Jason Glenesk1916f892020-07-24 02:51:30 -0700548
Jason Glenesk276e8652020-12-10 04:22:56 -0800549 /* CRAT */
Elyes Haouasd6b6b222022-10-10 12:34:21 +0200550 current = ALIGN_UP(current, 8);
Jason Glenesk276e8652020-12-10 04:22:56 -0800551 crat = (struct acpi_crat_header *)current;
552 acpi_create_crat(crat, acpi_fill_crat);
553 current += crat->header.length;
554 acpi_add_table(rsdp, crat);
Matt Papageorgea21eae02019-11-13 17:00:12 -0600555
Felix Heldafc49782021-05-04 21:17:50 +0200556 /* add ALIB SSDT from HOB */
Felix Held245adca2021-05-04 20:01:46 +0200557 current = add_agesa_fsp_acpi_table(AMD_FSP_ACPI_ALIB_HOB_GUID, "ALIB", rsdp, current);
Jason Glenesk1916f892020-07-24 02:51:30 -0700558
559 /* IVRS */
Elyes Haouasd6b6b222022-10-10 12:34:21 +0200560 current = ALIGN_UP(current, 8);
Elyes Haouas55d0f402022-07-16 09:53:05 +0200561 ivrs = (acpi_ivrs_t *)current;
Jason Glenesk1916f892020-07-24 02:51:30 -0700562 acpi_create_ivrs(ivrs, acpi_fill_ivrs);
563 current += ivrs->header.length;
564 acpi_add_table(rsdp, ivrs);
Matt Papageorgea21eae02019-11-13 17:00:12 -0600565
566 /* Add SRAT, MSCT, SLIT if needed in the future */
567
568 return current;
569}