blob: 55e892c941798281bd10e725c4d2ea32683265e9 [file] [log] [blame]
Angel Pons0612b272020-04-05 15:46:56 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Subrata Banik01ae11b2017-03-04 23:32:41 +05302
Arthur Heymans08769c62022-05-09 14:33:15 +02003#include <acpi/acpigen.h>
Subrata Banik7609c652017-05-19 14:50:09 +05304#include <cbmem.h>
Subrata Banikb6df6b02020-01-03 15:29:02 +05305#include <console/console.h>
Furquan Shaikhcc35f722020-05-12 16:25:31 -07006#include <cpu/cpu.h>
Subrata Banik7609c652017-05-19 14:50:09 +05307#include <device/device.h>
8#include <device/pci.h>
9#include <device/pci_ids.h>
Werner Zehd12530c2018-12-14 13:09:12 +010010#include <intelblocks/acpi.h>
Subrata Banikb6df6b02020-01-03 15:29:02 +053011#include <intelblocks/cfg.h>
Subrata Banik01ae11b2017-03-04 23:32:41 +053012#include <intelblocks/systemagent.h>
Lijian Zhao357e5522019-04-11 13:07:00 -070013#include <smbios.h>
Subrata Banik7609c652017-05-19 14:50:09 +053014#include <soc/iomap.h>
Kyösti Mälkkid6c57142020-12-21 15:17:01 +020015#include <soc/nvs.h>
Subrata Banik01ae11b2017-03-04 23:32:41 +053016#include <soc/pci_devs.h>
Subrata Banik7609c652017-05-19 14:50:09 +053017#include <soc/systemagent.h>
Patrick Rudolph5e007802020-07-27 15:37:43 +020018#include <types.h>
Subrata Banik7609c652017-05-19 14:50:09 +053019#include "systemagent_def.h"
Subrata Banik01ae11b2017-03-04 23:32:41 +053020
Subrata Banik7609c652017-05-19 14:50:09 +053021/* SoC override function */
Aaron Durbin64031672018-04-21 14:45:32 -060022__weak void soc_systemagent_init(struct device *dev)
Subrata Banik01ae11b2017-03-04 23:32:41 +053023{
Subrata Banik7609c652017-05-19 14:50:09 +053024 /* no-op */
Subrata Banik01ae11b2017-03-04 23:32:41 +053025}
26
Aaron Durbin64031672018-04-21 14:45:32 -060027__weak void soc_add_fixed_mmio_resources(struct device *dev,
Subrata Banik7609c652017-05-19 14:50:09 +053028 int *resource_cnt)
29{
30 /* no-op */
31}
32
Aaron Durbin64031672018-04-21 14:45:32 -060033__weak int soc_get_uncore_prmmr_base_and_mask(uint64_t *base,
Pratik Prajapati82cdfa72017-08-28 14:48:55 -070034 uint64_t *mask)
35{
36 /* return failure for this dummy API */
37 return -1;
38}
39
Furquan Shaikh0f007d82020-04-24 06:41:18 -070040__weak unsigned long sa_write_acpi_tables(const struct device *dev,
Werner Zehd12530c2018-12-14 13:09:12 +010041 unsigned long current,
42 struct acpi_rsdp *rsdp)
43{
44 return current;
45}
46
Patrick Rudolphbf72dcb2020-05-12 16:04:47 +020047__weak uint32_t soc_systemagent_max_chan_capacity_mib(u8 capid0_a_ddrsz)
48{
49 return 32768; /* 32 GiB per channel */
50}
51
Angel Pons6724ba42021-01-31 15:06:59 +010052static uint8_t sa_get_ecc_type(const uint32_t capid0_a)
Patrick Rudolph5e007802020-07-27 15:37:43 +020053{
Angel Pons6724ba42021-01-31 15:06:59 +010054 return capid0_a & CAPID_ECCDIS ? MEMORY_ARRAY_ECC_NONE : MEMORY_ARRAY_ECC_SINGLE_BIT;
Patrick Rudolph5e007802020-07-27 15:37:43 +020055}
56
57static size_t sa_slots_per_channel(const uint32_t capid0_a)
58{
59 return !(capid0_a & CAPID_DDPCD) + 1;
60}
61
62static size_t sa_number_of_channels(const uint32_t capid0_a)
63{
64 return !(capid0_a & CAPID_PDCD) + 1;
65}
66
67static void sa_soc_systemagent_init(struct device *dev)
68{
69 soc_systemagent_init(dev);
70
71 struct memory_info *m = cbmem_find(CBMEM_ID_MEMINFO);
72 if (m == NULL)
73 return;
74
75 const uint32_t capid0_a = pci_read_config32(dev, CAPID0_A);
76
Angel Pons6724ba42021-01-31 15:06:59 +010077 m->ecc_type = sa_get_ecc_type(capid0_a);
Patrick Rudolph5e007802020-07-27 15:37:43 +020078 m->max_capacity_mib = soc_systemagent_max_chan_capacity_mib(CAPID_DDRSZ(capid0_a)) *
79 sa_number_of_channels(capid0_a);
80 m->number_of_devices = sa_slots_per_channel(capid0_a) *
81 sa_number_of_channels(capid0_a);
82}
83
Subrata Banik7609c652017-05-19 14:50:09 +053084/*
85 * Add all known fixed MMIO ranges that hang off the host bridge/memory
86 * controller device.
87 */
88void sa_add_fixed_mmio_resources(struct device *dev, int *resource_cnt,
89 const struct sa_mmio_descriptor *sa_fixed_resources, size_t count)
90{
91 int i;
92 int index = *resource_cnt;
93
94 for (i = 0; i < count; i++) {
95 uintptr_t base;
96 size_t size;
97
98 size = sa_fixed_resources[i].size;
99 base = sa_fixed_resources[i].base;
100
101 mmio_resource(dev, index++, base / KiB, size / KiB);
102 }
103
104 *resource_cnt = index;
105}
106
107/*
108 * DRAM memory mapped register
109 *
110 * TOUUD: This 64 bit register defines the Top of Upper Usable DRAM
111 * TOLUD: This 32 bit register defines the Top of Low Usable DRAM
112 * BGSM: This register contains the base address of stolen DRAM memory for GTT
113 * TSEG: This register contains the base address of TSEG DRAM memory
114 */
115static const struct sa_mem_map_descriptor sa_memory_map[MAX_MAP_ENTRIES] = {
116 { TOUUD, true, "TOUUD" },
117 { TOLUD, false, "TOLUD" },
118 { BGSM, false, "BGSM" },
119 { TSEG, false, "TSEG" },
120};
121
122/* Read DRAM memory map register value through PCI configuration space */
Arthur Heymans08769c62022-05-09 14:33:15 +0200123static void sa_read_map_entry(const struct device *dev,
Subrata Banik7609c652017-05-19 14:50:09 +0530124 const struct sa_mem_map_descriptor *entry, uint64_t *result)
125{
126 uint64_t value = 0;
127
128 if (entry->is_64_bit) {
129 value = pci_read_config32(dev, entry->reg + 4);
130 value <<= 32;
131 }
132
133 value |= pci_read_config32(dev, entry->reg);
134 /* All registers are on a 1MiB granularity. */
135 value = ALIGN_DOWN(value, 1 * MiB);
136
137 *result = value;
138}
139
140static void sa_get_mem_map(struct device *dev, uint64_t *values)
141{
142 int i;
143 for (i = 0; i < MAX_MAP_ENTRIES; i++)
144 sa_read_map_entry(dev, &sa_memory_map[i], &values[i]);
145}
146
147/*
Subrata Banik7609c652017-05-19 14:50:09 +0530148 * These are the host memory ranges that should be added:
149 * - 0 -> 0xa0000: cacheable
150 * - 0xc0000 -> top_of_ram : cacheable
Subrata Banik239272e2020-07-29 11:01:26 +0530151 * - top_of_ram -> TOLUD: not cacheable with standard MTRRs and reserved
Subrata Banik7609c652017-05-19 14:50:09 +0530152 * - 4GiB -> TOUUD: cacheable
153 *
154 * The default SMRAM space is reserved so that the range doesn't
155 * have to be saved during S3 Resume. Once marked reserved the OS
156 * cannot use the memory. This is a bit of an odd place to reserve
157 * the region, but the CPU devices don't have dev_ops->read_resources()
158 * called on them.
159 *
160 * The range 0xa0000 -> 0xc0000 does not have any resources
161 * associated with it to handle legacy VGA memory. If this range
162 * is not omitted the mtrr code will setup the area as cacheable
163 * causing VGA access to not work.
164 *
Subrata Banik239272e2020-07-29 11:01:26 +0530165 * Don't need to mark the entire top_of_ram till TOLUD range (used
166 * for stolen memory like GFX and ME, PTT, DPR, PRMRR, TSEG etc) as
167 * cacheable for OS usage as coreboot already done with mpinit w/ smm
168 * relocation early.
Subrata Banik7609c652017-05-19 14:50:09 +0530169 *
170 * It should be noted that cacheable entry types need to be added in
171 * order. The reason is that the current MTRR code assumes this and
172 * falls over itself if it isn't.
173 *
174 * The resource index starts low and should not meet or exceed
175 * PCI_BASE_ADDRESS_0.
176 */
177static void sa_add_dram_resources(struct device *dev, int *resource_count)
178{
179 uintptr_t base_k, touud_k;
Michael Niewöhner40f893e2019-10-21 18:58:04 +0200180 size_t size_k;
Subrata Banik7609c652017-05-19 14:50:09 +0530181 uint64_t sa_map_values[MAX_MAP_ENTRIES];
182 uintptr_t top_of_ram;
183 int index = *resource_count;
184
Subrata Banik7609c652017-05-19 14:50:09 +0530185 top_of_ram = (uintptr_t)cbmem_top();
186
187 /* 0 - > 0xa0000 */
188 base_k = 0;
189 size_k = (0xa0000 / KiB) - base_k;
190 ram_resource(dev, index++, base_k, size_k);
191
192 /* 0xc0000 -> top_of_ram */
193 base_k = 0xc0000 / KiB;
194 size_k = (top_of_ram / KiB) - base_k;
195 ram_resource(dev, index++, base_k, size_k);
196
197 sa_get_mem_map(dev, &sa_map_values[0]);
198
Subrata Banik239272e2020-07-29 11:01:26 +0530199 /* top_of_ram -> TOLUD */
Subrata Banik7609c652017-05-19 14:50:09 +0530200 base_k = top_of_ram;
Subrata Banik7609c652017-05-19 14:50:09 +0530201 size_k = sa_map_values[SA_TOLUD_REG] - base_k;
202 mmio_resource(dev, index++, base_k / KiB, size_k / KiB);
203
204 /* 4GiB -> TOUUD */
205 base_k = 4 * (GiB / KiB); /* 4GiB */
206 touud_k = sa_map_values[SA_TOUUD_REG] / KiB;
207 size_k = touud_k - base_k;
208 if (touud_k > base_k)
209 ram_resource(dev, index++, base_k, size_k);
210
211 /*
212 * Reserve everything between A segment and 1MB:
213 *
214 * 0xa0000 - 0xbffff: legacy VGA
215 * 0xc0000 - 0xfffff: RAM
216 */
217 mmio_resource(dev, index++, 0xa0000 / KiB, (0xc0000 - 0xa0000) / KiB);
218 reserved_ram_resource(dev, index++, 0xc0000 / KiB,
219 (1*MiB - 0xc0000) / KiB);
220
221 *resource_count = index;
222}
223
224static bool is_imr_enabled(uint32_t imr_base_reg)
225{
226 return !!(imr_base_reg & (1 << 31));
227}
228
Elyes HAOUAS4a131262018-09-16 17:35:48 +0200229static void imr_resource(struct device *dev, int idx, uint32_t base,
230 uint32_t mask)
Subrata Banik7609c652017-05-19 14:50:09 +0530231{
232 uint32_t base_k, size_k;
233 /* Bits 28:0 encode the base address bits 38:10, hence the KiB unit. */
234 base_k = (base & 0x0fffffff);
235 /* Bits 28:0 encode the AND mask used for comparison, in KiB. */
236 size_k = ((~mask & 0x0fffffff) + 1);
237 /*
238 * IMRs sit in lower DRAM. Mark them cacheable, otherwise we run
239 * out of MTRRs. Memory reserved by IMRs is not usable for host
240 * so mark it reserved.
241 */
242 reserved_ram_resource(dev, idx, base_k, size_k);
243}
244
245/*
246 * Add IMR ranges that hang off the host bridge/memory
Martin Rothf48acbd2020-07-24 12:24:27 -0600247 * controller device in case CONFIG(SA_ENABLE_IMR) is selected by SoC.
Subrata Banik7609c652017-05-19 14:50:09 +0530248 */
249static void sa_add_imr_resources(struct device *dev, int *resource_cnt)
250{
251 size_t i, imr_offset;
252 uint32_t base, mask;
253 int index = *resource_cnt;
254
255 for (i = 0; i < MCH_NUM_IMRS; i++) {
256 imr_offset = i * MCH_IMR_PITCH;
257 base = MCHBAR32(imr_offset + MCH_IMR0_BASE);
258 mask = MCHBAR32(imr_offset + MCH_IMR0_MASK);
259
260 if (is_imr_enabled(base))
261 imr_resource(dev, index++, base, mask);
262 }
263
264 *resource_cnt = index;
265}
266
267static void systemagent_read_resources(struct device *dev)
268{
269 int index = 0;
270
271 /* Read standard PCI resources. */
272 pci_dev_read_resources(dev);
273
274 /* Add all fixed MMIO resources. */
275 soc_add_fixed_mmio_resources(dev, &index);
276 /* Calculate and add DRAM resources. */
277 sa_add_dram_resources(dev, &index);
Julius Wernercd49cce2019-03-05 16:53:33 -0800278 if (CONFIG(SA_ENABLE_IMR))
Subrata Banik7609c652017-05-19 14:50:09 +0530279 /* Add the isolated memory ranges (IMRs). */
280 sa_add_imr_resources(dev, &index);
Furquan Shaikhb53280a2020-11-25 14:30:15 -0800281
282 /* Reserve the window used for extended BIOS decoding. */
283 if (CONFIG(FAST_SPI_SUPPORTS_EXT_BIOS_WINDOW))
284 mmio_resource(dev, index++, CONFIG_EXT_BIOS_WIN_BASE / KiB,
285 CONFIG_EXT_BIOS_WIN_SIZE / KiB);
Subrata Banik7609c652017-05-19 14:50:09 +0530286}
287
288void enable_power_aware_intr(void)
289{
290 uint8_t pair;
291
292 /* Enable Power Aware Interrupt Routing */
293 pair = MCHBAR8(MCH_PAIR);
294 pair &= ~0x7; /* Clear 2:0 */
295 pair |= 0x4; /* Fixed Priority */
296 MCHBAR8(MCH_PAIR) = pair;
297}
298
Tim Wawrzynczakd87af792021-08-24 09:20:14 -0600299void sa_lock_pam(void)
300{
301 const struct device *dev = pcidev_path_on_root(SA_DEVFN_ROOT);
302 if (!dev)
303 return;
304
305 pci_or_config8(dev, PAM0, PAM_LOCK);
306}
307
Arthur Heymans08769c62022-05-09 14:33:15 +0200308void ssdt_set_above_4g_pci(const struct device *dev)
309{
310 if (dev->path.type != DEVICE_PATH_DOMAIN)
311 return;
312
313 uint64_t touud;
314 sa_read_map_entry(pcidev_path_on_root(SA_DEVFN_ROOT), &sa_memory_map[SA_TOUUD_REG],
315 &touud);
316 const uint64_t len = POWER_OF_2(cpu_phys_address_size()) - touud;
317
318 const char *scope = acpi_device_path(dev);
319 acpigen_write_scope(scope);
320 acpigen_write_name_qword("A4GB", touud);
321 acpigen_write_name_qword("A4GS", len);
322 acpigen_pop_len();
323
324 printk(BIOS_DEBUG, "PCI space above 4GB MMIO is at 0x%llx, len = 0x%llx\n", touud, len);
325}
326
Subrata Banik7609c652017-05-19 14:50:09 +0530327static struct device_operations systemagent_ops = {
Elyes HAOUAS1d191272018-11-27 12:23:48 +0100328 .read_resources = systemagent_read_resources,
329 .set_resources = pci_dev_set_resources,
330 .enable_resources = pci_dev_enable_resources,
Patrick Rudolph5e007802020-07-27 15:37:43 +0200331 .init = sa_soc_systemagent_init,
Subrata Banik6bbc91a2017-12-07 14:55:51 +0530332 .ops_pci = &pci_dev_ops_pci,
Julius Wernercd49cce2019-03-05 16:53:33 -0800333#if CONFIG(HAVE_ACPI_TABLES)
Werner Zehd12530c2018-12-14 13:09:12 +0100334 .write_acpi_tables = sa_write_acpi_tables,
335#endif
Subrata Banik7609c652017-05-19 14:50:09 +0530336};
337
338static const unsigned short systemagent_ids[] = {
Wonkyu Kim9f401072020-11-13 15:16:32 -0800339 PCI_DID_INTEL_MTL_M_ID,
340 PCI_DID_INTEL_MTL_P_ID_1,
341 PCI_DID_INTEL_MTL_P_ID_2,
Felix Singer43b7f412022-03-07 04:34:52 +0100342 PCI_DID_INTEL_GLK_NB,
343 PCI_DID_INTEL_APL_NB,
344 PCI_DID_INTEL_CNL_ID_U,
345 PCI_DID_INTEL_CNL_ID_Y,
346 PCI_DID_INTEL_SKL_ID_U,
347 PCI_DID_INTEL_SKL_ID_Y,
348 PCI_DID_INTEL_SKL_ID_ULX,
349 PCI_DID_INTEL_SKL_ID_H_4,
350 PCI_DID_INTEL_SKL_ID_H_2,
351 PCI_DID_INTEL_SKL_ID_S_2,
352 PCI_DID_INTEL_SKL_ID_S_4,
353 PCI_DID_INTEL_WHL_ID_W_2,
354 PCI_DID_INTEL_WHL_ID_W_4,
355 PCI_DID_INTEL_KBL_ID_S,
356 PCI_DID_INTEL_SKL_ID_H_EM,
357 PCI_DID_INTEL_KBL_ID_U,
358 PCI_DID_INTEL_KBL_ID_Y,
359 PCI_DID_INTEL_KBL_ID_H,
360 PCI_DID_INTEL_KBL_U_R,
361 PCI_DID_INTEL_KBL_ID_DT,
362 PCI_DID_INTEL_KBL_ID_DT_2,
363 PCI_DID_INTEL_CFL_ID_U,
364 PCI_DID_INTEL_CFL_ID_U_2,
365 PCI_DID_INTEL_CFL_ID_H,
366 PCI_DID_INTEL_CFL_ID_H_4,
367 PCI_DID_INTEL_CFL_ID_H_8,
368 PCI_DID_INTEL_CFL_ID_S,
369 PCI_DID_INTEL_CFL_ID_S_DT_2,
370 PCI_DID_INTEL_CFL_ID_S_DT_4,
371 PCI_DID_INTEL_CFL_ID_S_DT_8,
372 PCI_DID_INTEL_CFL_ID_S_WS_4,
373 PCI_DID_INTEL_CFL_ID_S_WS_6,
374 PCI_DID_INTEL_CFL_ID_S_WS_8,
375 PCI_DID_INTEL_CFL_ID_S_S_4,
376 PCI_DID_INTEL_CFL_ID_S_S_6,
377 PCI_DID_INTEL_CFL_ID_S_S_8,
378 PCI_DID_INTEL_ICL_ID_U,
379 PCI_DID_INTEL_ICL_ID_U_2_2,
380 PCI_DID_INTEL_ICL_ID_Y,
381 PCI_DID_INTEL_ICL_ID_Y_2,
382 PCI_DID_INTEL_CML_ULT,
383 PCI_DID_INTEL_CML_ULT_2_2,
384 PCI_DID_INTEL_CML_ULT_6_2,
385 PCI_DID_INTEL_CML_ULX,
386 PCI_DID_INTEL_CML_S,
387 PCI_DID_INTEL_CML_S_G0G1_P0P1_6_2,
388 PCI_DID_INTEL_CML_S_P0P1_8_2,
389 PCI_DID_INTEL_CML_S_P0P1_10_2,
390 PCI_DID_INTEL_CML_S_G0G1_4,
391 PCI_DID_INTEL_CML_S_G0G1_2,
392 PCI_DID_INTEL_CML_H,
393 PCI_DID_INTEL_CML_H_4_2,
394 PCI_DID_INTEL_CML_H_8_2,
395 PCI_DID_INTEL_TGL_ID_U_2_2,
396 PCI_DID_INTEL_TGL_ID_U_4_2,
397 PCI_DID_INTEL_TGL_ID_Y_2_2,
398 PCI_DID_INTEL_TGL_ID_Y_4_2,
399 PCI_DID_INTEL_TGL_ID_H_6_1,
400 PCI_DID_INTEL_TGL_ID_H_8_1,
401 PCI_DID_INTEL_EHL_ID_0,
402 PCI_DID_INTEL_EHL_ID_1,
403 PCI_DID_INTEL_EHL_ID_1A,
404 PCI_DID_INTEL_EHL_ID_2,
405 PCI_DID_INTEL_EHL_ID_2_1,
406 PCI_DID_INTEL_EHL_ID_3,
407 PCI_DID_INTEL_EHL_ID_3A,
408 PCI_DID_INTEL_EHL_ID_4,
409 PCI_DID_INTEL_EHL_ID_5,
410 PCI_DID_INTEL_EHL_ID_6,
411 PCI_DID_INTEL_EHL_ID_7,
412 PCI_DID_INTEL_EHL_ID_8,
413 PCI_DID_INTEL_EHL_ID_9,
414 PCI_DID_INTEL_EHL_ID_10,
415 PCI_DID_INTEL_EHL_ID_11,
416 PCI_DID_INTEL_EHL_ID_12,
417 PCI_DID_INTEL_EHL_ID_13,
418 PCI_DID_INTEL_EHL_ID_14,
419 PCI_DID_INTEL_EHL_ID_15,
420 PCI_DID_INTEL_JSL_ID_1,
421 PCI_DID_INTEL_JSL_ID_2,
422 PCI_DID_INTEL_JSL_ID_3,
423 PCI_DID_INTEL_JSL_ID_4,
424 PCI_DID_INTEL_JSL_ID_5,
425 PCI_DID_INTEL_ADL_S_ID_1,
426 PCI_DID_INTEL_ADL_S_ID_2,
427 PCI_DID_INTEL_ADL_S_ID_3,
428 PCI_DID_INTEL_ADL_S_ID_4,
429 PCI_DID_INTEL_ADL_S_ID_5,
430 PCI_DID_INTEL_ADL_S_ID_6,
431 PCI_DID_INTEL_ADL_S_ID_7,
432 PCI_DID_INTEL_ADL_S_ID_8,
433 PCI_DID_INTEL_ADL_S_ID_9,
434 PCI_DID_INTEL_ADL_S_ID_10,
435 PCI_DID_INTEL_ADL_S_ID_11,
436 PCI_DID_INTEL_ADL_S_ID_12,
437 PCI_DID_INTEL_ADL_S_ID_13,
438 PCI_DID_INTEL_ADL_S_ID_14,
439 PCI_DID_INTEL_ADL_S_ID_15,
440 PCI_DID_INTEL_ADL_P_ID_1,
441 PCI_DID_INTEL_ADL_P_ID_3,
442 PCI_DID_INTEL_ADL_P_ID_4,
443 PCI_DID_INTEL_ADL_P_ID_5,
444 PCI_DID_INTEL_ADL_P_ID_6,
445 PCI_DID_INTEL_ADL_P_ID_7,
446 PCI_DID_INTEL_ADL_P_ID_8,
447 PCI_DID_INTEL_ADL_P_ID_9,
448 PCI_DID_INTEL_ADL_P_ID_10,
449 PCI_DID_INTEL_ADL_M_ID_1,
450 PCI_DID_INTEL_ADL_M_ID_2,
451 PCI_DID_INTEL_ADL_N_ID_1,
452 PCI_DID_INTEL_ADL_N_ID_2,
453 PCI_DID_INTEL_ADL_N_ID_3,
454 PCI_DID_INTEL_ADL_N_ID_4,
Bora Guvendika15b25f2022-02-28 14:43:49 -0800455 PCI_DID_INTEL_RPL_P_ID_1,
456 PCI_DID_INTEL_RPL_P_ID_2,
Subrata Banik7609c652017-05-19 14:50:09 +0530457 0
458};
459
460static const struct pci_driver systemagent_driver __pci_driver = {
461 .ops = &systemagent_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100462 .vendor = PCI_VID_INTEL,
Subrata Banik7609c652017-05-19 14:50:09 +0530463 .devices = systemagent_ids
464};