blob: 903ecdd941ed2b6190ce1620af07c40d8f9d31b4 [file] [log] [blame]
Angel Ponsc74dae92020-04-02 23:48:16 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00002
3#include <console/console.h>
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +02004#include <commonlib/helpers.h>
Duncan Laurie90dcdd42011-10-25 14:15:11 -07005#include <delay.h>
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00006#include <device/device.h>
7#include <device/pci.h>
Nico Huberbba97352022-08-05 13:09:25 +02008#include <device/pci_ids.h>
Patrick Rudolphe56189c2018-04-18 10:11:59 +02009#include <device/pci_ops.h>
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000010#include <device/pciexp.h>
11
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060012static unsigned int pciexp_get_ext_cap_offset(const struct device *dev, unsigned int cap,
13 unsigned int offset)
Kenji Chen31c6e632014-10-04 01:14:44 +080014{
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060015 unsigned int this_cap_offset = offset;
Nico Huberb5118042022-08-05 13:16:31 +020016 unsigned int next_cap_offset, this_cap;
Nico Huber4b864e52022-08-05 12:44:11 +020017 while (this_cap_offset != 0) {
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +020018 this_cap = pci_read_config32(dev, this_cap_offset);
Bill XIE385e4322022-08-04 21:52:05 +080019 /* Bail out when this request is unsupported */
20 if (this_cap == 0xffffffff)
21 break;
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060022 if ((this_cap & 0xffff) == cap) {
Kenji Chen31c6e632014-10-04 01:14:44 +080023 return this_cap_offset;
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060024 } else {
25 next_cap_offset = this_cap >> 20;
Kenji Chen31c6e632014-10-04 01:14:44 +080026 this_cap_offset = next_cap_offset;
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060027 }
Nico Huber4b864e52022-08-05 12:44:11 +020028 }
Kenji Chen31c6e632014-10-04 01:14:44 +080029
30 return 0;
31}
Kenji Chen31c6e632014-10-04 01:14:44 +080032
Nico Huber5ffc2c82022-08-05 12:58:18 +020033/*
34 * Search for an extended capability with the ID `cap`.
35 *
36 * Returns the offset of the first matching extended
37 * capability if found, or 0 otherwise.
38 *
39 * A new search is started with `offset == 0`.
40 * To continue a search, the prior return value
41 * should be passed as `offset`.
42 */
43unsigned int pciexp_find_extended_cap(const struct device *dev, unsigned int cap,
44 unsigned int offset)
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060045{
Nico Huber5ffc2c82022-08-05 12:58:18 +020046 unsigned int next_cap_offset;
47
48 if (offset)
49 next_cap_offset = pci_read_config32(dev, offset) >> 20;
50 else
51 next_cap_offset = PCIE_EXT_CAP_OFFSET;
52
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060053 return pciexp_get_ext_cap_offset(dev, cap, next_cap_offset);
54}
55
Nico Huber9099fea2022-08-05 13:02:52 +020056/*
57 * Search for a vendor-specific extended capability,
58 * with the vendor-specific ID `cap`.
59 *
60 * Returns the offset of the vendor-specific header,
61 * i.e. the offset of the extended capability + 4,
62 * or 0 if none is found.
63 *
64 * A new search is started with `offset == 0`.
65 * To continue a search, the prior return value
66 * should be passed as `offset`.
67 */
68unsigned int pciexp_find_ext_vendor_cap(const struct device *dev, unsigned int cap,
69 unsigned int offset)
70{
71 /* Reconstruct capability offset from vendor-specific header offset. */
72 if (offset >= 4)
73 offset -= 4;
74
75 for (;;) {
76 offset = pciexp_find_extended_cap(dev, PCI_EXT_CAP_ID_VNDR, offset);
77 if (!offset)
78 return 0;
79
80 const unsigned int vndr_cap = pci_read_config32(dev, offset + 4);
81 if ((vndr_cap & 0xffff) == cap)
82 return offset + 4;
83 }
84}
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060085
Duncan Laurie90dcdd42011-10-25 14:15:11 -070086/*
87 * Re-train a PCIe link
88 */
89#define PCIE_TRAIN_RETRY 10000
Martin Roth38ddbfb2019-10-23 21:41:00 -060090static int pciexp_retrain_link(struct device *dev, unsigned int cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -070091{
Youness Alaouibb5fb642017-05-03 17:57:13 -040092 unsigned int try;
Duncan Laurie90dcdd42011-10-25 14:15:11 -070093 u16 lnk;
94
Youness Alaouibb5fb642017-05-03 17:57:13 -040095 /*
96 * Implementation note (page 633) in PCIe Specification 3.0 suggests
97 * polling the Link Training bit in the Link Status register until the
98 * value returned is 0 before setting the Retrain Link bit to 1.
99 * This is meant to avoid a race condition when using the
100 * Retrain Link mechanism.
101 */
102 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
103 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
104 if (!(lnk & PCI_EXP_LNKSTA_LT))
105 break;
106 udelay(100);
107 }
108 if (try == 0) {
109 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
110 return -1;
111 }
112
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700113 /* Start link retraining */
114 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKCTL);
115 lnk |= PCI_EXP_LNKCTL_RL;
116 pci_write_config16(dev, cap + PCI_EXP_LNKCTL, lnk);
117
118 /* Wait for training to complete */
Youness Alaouibb5fb642017-05-03 17:57:13 -0400119 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700120 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
121 if (!(lnk & PCI_EXP_LNKSTA_LT))
122 return 0;
123 udelay(100);
124 }
125
126 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
127 return -1;
128}
129
130/*
131 * Check the Slot Clock Configuration for root port and endpoint
132 * and enable Common Clock Configuration if possible. If CCC is
133 * enabled the link must be retrained.
134 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600135static void pciexp_enable_common_clock(struct device *root, unsigned int root_cap,
136 struct device *endp, unsigned int endp_cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700137{
138 u16 root_scc, endp_scc, lnkctl;
139
140 /* Get Slot Clock Configuration for root port */
141 root_scc = pci_read_config16(root, root_cap + PCI_EXP_LNKSTA);
142 root_scc &= PCI_EXP_LNKSTA_SLC;
143
144 /* Get Slot Clock Configuration for endpoint */
145 endp_scc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKSTA);
146 endp_scc &= PCI_EXP_LNKSTA_SLC;
147
148 /* Enable Common Clock Configuration and retrain */
149 if (root_scc && endp_scc) {
150 printk(BIOS_INFO, "Enabling Common Clock Configuration\n");
151
152 /* Set in endpoint */
153 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
154 lnkctl |= PCI_EXP_LNKCTL_CCC;
155 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
156
157 /* Set in root port */
158 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
159 lnkctl |= PCI_EXP_LNKCTL_CCC;
160 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
161
162 /* Retrain link if CCC was enabled */
163 pciexp_retrain_link(root, root_cap);
164 }
165}
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700166
Martin Roth38ddbfb2019-10-23 21:41:00 -0600167static void pciexp_enable_clock_power_pm(struct device *endp, unsigned int endp_cap)
Kane Chen18cb1342014-10-01 11:13:54 +0800168{
169 /* check if per port clk req is supported in device */
170 u32 endp_ca;
171 u16 lnkctl;
172 endp_ca = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
173 if ((endp_ca & PCI_EXP_CLK_PM) == 0) {
Arthur Heymans330c46b2017-07-12 19:17:56 +0200174 printk(BIOS_INFO, "PCIE CLK PM is not supported by endpoint\n");
Kane Chen18cb1342014-10-01 11:13:54 +0800175 return;
176 }
177 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
178 lnkctl = lnkctl | PCI_EXP_EN_CLK_PM;
179 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
180}
Kane Chen18cb1342014-10-01 11:13:54 +0800181
Nico Huber968ef752021-03-07 01:39:18 +0100182static bool _pciexp_ltr_supported(struct device *dev, unsigned int cap)
Kenji Chen31c6e632014-10-04 01:14:44 +0800183{
Nico Huber968ef752021-03-07 01:39:18 +0100184 return pci_read_config16(dev, cap + PCI_EXP_DEVCAP2) & PCI_EXP_DEVCAP2_LTR;
Kenji Chen31c6e632014-10-04 01:14:44 +0800185}
186
Nico Huber968ef752021-03-07 01:39:18 +0100187static bool _pciexp_ltr_enabled(struct device *dev, unsigned int cap)
Aamir Bohra2188f572017-09-22 19:07:21 +0530188{
Nico Huber968ef752021-03-07 01:39:18 +0100189 return pci_read_config16(dev, cap + PCI_EXP_DEVCTL2) & PCI_EXP_DEV2_LTR;
Aamir Bohra2188f572017-09-22 19:07:21 +0530190}
191
Nico Huber968ef752021-03-07 01:39:18 +0100192static bool _pciexp_enable_ltr(struct device *parent, unsigned int parent_cap,
193 struct device *dev, unsigned int cap)
Kenji Chen31c6e632014-10-04 01:14:44 +0800194{
Nico Huber968ef752021-03-07 01:39:18 +0100195 if (!_pciexp_ltr_supported(dev, cap)) {
196 printk(BIOS_DEBUG, "%s: No LTR support\n", dev_path(dev));
197 return false;
Pratik Prajapati0cd0d282015-06-09 12:06:20 -0700198 }
Aamir Bohra2188f572017-09-22 19:07:21 +0530199
Nico Huber968ef752021-03-07 01:39:18 +0100200 if (_pciexp_ltr_enabled(dev, cap))
201 return true;
Aamir Bohra2188f572017-09-22 19:07:21 +0530202
Nico Huber968ef752021-03-07 01:39:18 +0100203 if (parent &&
204 (parent->path.type != DEVICE_PATH_PCI ||
205 !_pciexp_ltr_supported(parent, parent_cap) ||
206 !_pciexp_ltr_enabled(parent, parent_cap)))
207 return false;
Aamir Bohra2188f572017-09-22 19:07:21 +0530208
Nico Huber968ef752021-03-07 01:39:18 +0100209 pci_or_config16(dev, cap + PCI_EXP_DEVCTL2, PCI_EXP_DEV2_LTR);
210 printk(BIOS_INFO, "%s: Enabled LTR\n", dev_path(dev));
211 return true;
Aamir Bohra2188f572017-09-22 19:07:21 +0530212}
213
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200214static void pciexp_enable_ltr(struct device *dev)
Aamir Bohra2188f572017-09-22 19:07:21 +0530215{
Nico Huber968ef752021-03-07 01:39:18 +0100216 const unsigned int cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
217 if (!cap)
218 return;
Aamir Bohra2188f572017-09-22 19:07:21 +0530219
Nico Huber968ef752021-03-07 01:39:18 +0100220 /*
221 * If we have get_ltr_max_latencies(), treat `dev` as the root.
222 * If not, let _pciexp_enable_ltr() query the parent's state.
223 */
224 struct device *parent = NULL;
225 unsigned int parent_cap = 0;
226 if (!dev->ops->ops_pci || !dev->ops->ops_pci->get_ltr_max_latencies) {
227 parent = dev->bus->dev;
Bill XIEa43380e2022-08-03 00:18:14 +0800228 parent_cap = pci_find_capability(parent, PCI_CAP_ID_PCIE);
Nico Huber968ef752021-03-07 01:39:18 +0100229 if (!parent_cap)
230 return;
Aamir Bohra2188f572017-09-22 19:07:21 +0530231 }
Nico Huber968ef752021-03-07 01:39:18 +0100232
233 (void)_pciexp_enable_ltr(parent, parent_cap, dev, cap);
234}
235
Tim Wawrzynczaka62cb562021-12-08 21:16:43 -0700236bool pciexp_get_ltr_max_latencies(struct device *dev, u16 *max_snoop, u16 *max_nosnoop)
Nico Huber968ef752021-03-07 01:39:18 +0100237{
238 /* Walk the hierarchy up to find get_ltr_max_latencies(). */
239 do {
240 if (dev->ops->ops_pci && dev->ops->ops_pci->get_ltr_max_latencies)
241 break;
242 if (dev->bus->dev == dev || dev->bus->dev->path.type != DEVICE_PATH_PCI)
243 return false;
244 dev = dev->bus->dev;
245 } while (true);
246
247 dev->ops->ops_pci->get_ltr_max_latencies(max_snoop, max_nosnoop);
248 return true;
249}
250
251static void pciexp_configure_ltr(struct device *parent, unsigned int parent_cap,
252 struct device *dev, unsigned int cap)
253{
254 if (!_pciexp_enable_ltr(parent, parent_cap, dev, cap))
255 return;
256
Nico Huber5ffc2c82022-08-05 12:58:18 +0200257 const unsigned int ltr_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_LTR_ID, 0);
Nico Huber968ef752021-03-07 01:39:18 +0100258 if (!ltr_cap)
259 return;
260
261 u16 max_snoop, max_nosnoop;
262 if (!pciexp_get_ltr_max_latencies(dev, &max_snoop, &max_nosnoop))
263 return;
264
265 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_SNOOP, max_snoop);
266 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_NOSNOOP, max_nosnoop);
267 printk(BIOS_INFO, "%s: Programmed LTR max latencies\n", dev_path(dev));
Kenji Chen31c6e632014-10-04 01:14:44 +0800268}
269
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200270static unsigned char pciexp_L1_substate_cal(struct device *dev, unsigned int endp_cap,
Kenji Chen31c6e632014-10-04 01:14:44 +0800271 unsigned int *data)
272{
273 unsigned char mult[4] = {2, 10, 100, 0};
274
275 unsigned int L1SubStateSupport = *data & 0xf;
276 unsigned int comm_mode_rst_time = (*data >> 8) & 0xff;
277 unsigned int power_on_scale = (*data >> 16) & 0x3;
278 unsigned int power_on_value = (*data >> 19) & 0x1f;
279
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200280 unsigned int endp_data = pci_read_config32(dev, endp_cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800281 unsigned int endp_L1SubStateSupport = endp_data & 0xf;
282 unsigned int endp_comm_mode_restore_time = (endp_data >> 8) & 0xff;
283 unsigned int endp_power_on_scale = (endp_data >> 16) & 0x3;
284 unsigned int endp_power_on_value = (endp_data >> 19) & 0x1f;
285
286 L1SubStateSupport &= endp_L1SubStateSupport;
287
288 if (L1SubStateSupport == 0)
289 return 0;
290
291 if (power_on_value * mult[power_on_scale] <
292 endp_power_on_value * mult[endp_power_on_scale]) {
293 power_on_value = endp_power_on_value;
294 power_on_scale = endp_power_on_scale;
295 }
296 if (comm_mode_rst_time < endp_comm_mode_restore_time)
297 comm_mode_rst_time = endp_comm_mode_restore_time;
298
299 *data = (comm_mode_rst_time << 8) | (power_on_scale << 16)
300 | (power_on_value << 19) | L1SubStateSupport;
301
302 return 1;
303}
304
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200305static void pciexp_L1_substate_commit(struct device *root, struct device *dev,
Kenji Chen31c6e632014-10-04 01:14:44 +0800306 unsigned int root_cap, unsigned int end_cap)
307{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200308 struct device *dev_t;
Kenji Chen31c6e632014-10-04 01:14:44 +0800309 unsigned char L1_ss_ok;
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200310 unsigned int rp_L1_support = pci_read_config32(root, root_cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800311 unsigned int L1SubStateSupport;
312 unsigned int comm_mode_rst_time;
313 unsigned int power_on_scale;
314 unsigned int endp_power_on_value;
315
316 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
317 /*
318 * rp_L1_support is init'd above from root port.
319 * it needs coordination with endpoints to reach in common.
320 * if certain endpoint doesn't support L1 Sub-State, abort
321 * this feature enabling.
322 */
323 L1_ss_ok = pciexp_L1_substate_cal(dev_t, end_cap,
324 &rp_L1_support);
325 if (!L1_ss_ok)
326 return;
327 }
328
329 L1SubStateSupport = rp_L1_support & 0xf;
330 comm_mode_rst_time = (rp_L1_support >> 8) & 0xff;
331 power_on_scale = (rp_L1_support >> 16) & 0x3;
332 endp_power_on_value = (rp_L1_support >> 19) & 0x1f;
333
334 printk(BIOS_INFO, "L1 Sub-State supported from root port %d\n",
335 root->path.pci.devfn >> 3);
336 printk(BIOS_INFO, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport);
337 printk(BIOS_INFO, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time);
338 printk(BIOS_INFO, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
339 endp_power_on_value, power_on_scale);
340
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300341 pci_update_config32(root, root_cap + 0x08, ~0xff00,
Kenji Chen31c6e632014-10-04 01:14:44 +0800342 (comm_mode_rst_time << 8));
343
Elyes HAOUASa342f392018-10-17 10:56:26 +0200344 pci_update_config32(root, root_cap + 0x0c, 0xffffff04,
Kenji Chen31c6e632014-10-04 01:14:44 +0800345 (endp_power_on_value << 3) | (power_on_scale));
346
Patrick Georgi9adcbfe2017-12-05 16:36:30 -0500347 /* TODO: 0xa0, 2 are values that work on some chipsets but really
348 * should be determined dynamically by looking at downstream devices.
349 */
350 pci_update_config32(root, root_cap + 0x08,
351 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
352 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
353 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
354 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
Kenji Chen31c6e632014-10-04 01:14:44 +0800355
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300356 pci_update_config32(root, root_cap + 0x08, ~0x1f,
Kenji Chen31c6e632014-10-04 01:14:44 +0800357 L1SubStateSupport);
358
359 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
Elyes HAOUASa342f392018-10-17 10:56:26 +0200360 pci_update_config32(dev_t, end_cap + 0x0c, 0xffffff04,
Kenji Chen31c6e632014-10-04 01:14:44 +0800361 (endp_power_on_value << 3) | (power_on_scale));
362
Patrick Georgi9adcbfe2017-12-05 16:36:30 -0500363 pci_update_config32(dev_t, end_cap + 0x08,
364 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
365 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
366 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
367 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
Kenji Chen31c6e632014-10-04 01:14:44 +0800368
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300369 pci_update_config32(dev_t, end_cap + 0x08, ~0x1f,
Kenji Chen31c6e632014-10-04 01:14:44 +0800370 L1SubStateSupport);
Kenji Chen31c6e632014-10-04 01:14:44 +0800371 }
372}
373
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200374static void pciexp_config_L1_sub_state(struct device *root, struct device *dev)
Kenji Chen31c6e632014-10-04 01:14:44 +0800375{
376 unsigned int root_cap, end_cap;
377
378 /* Do it for function 0 only */
379 if (dev->path.pci.devfn & 0x7)
380 return;
381
Nico Huber5ffc2c82022-08-05 12:58:18 +0200382 root_cap = pciexp_find_extended_cap(root, PCIE_EXT_CAP_L1SS_ID, 0);
Kenji Chen31c6e632014-10-04 01:14:44 +0800383 if (!root_cap)
384 return;
385
Nico Huber5ffc2c82022-08-05 12:58:18 +0200386 end_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_L1SS_ID, 0);
Kenji Chen31c6e632014-10-04 01:14:44 +0800387 if (!end_cap) {
Nico Huberbba97352022-08-05 13:09:25 +0200388 if (dev->vendor != PCI_VID_INTEL)
389 return;
390
391 end_cap = pciexp_find_ext_vendor_cap(dev, 0xcafe, 0);
Kenji Chen31c6e632014-10-04 01:14:44 +0800392 if (!end_cap)
393 return;
394 }
395
396 pciexp_L1_substate_commit(root, dev, root_cap, end_cap);
397}
Kenji Chen31c6e632014-10-04 01:14:44 +0800398
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700399/*
400 * Determine the ASPM L0s or L1 exit latency for a link
401 * by checking both root port and endpoint and returning
402 * the highest latency value.
403 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600404static int pciexp_aspm_latency(struct device *root, unsigned int root_cap,
405 struct device *endp, unsigned int endp_cap,
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700406 enum aspm_type type)
407{
408 int root_lat = 0, endp_lat = 0;
409 u32 root_lnkcap, endp_lnkcap;
410
411 root_lnkcap = pci_read_config32(root, root_cap + PCI_EXP_LNKCAP);
412 endp_lnkcap = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
413
414 /* Make sure the link supports this ASPM type by checking
415 * capability bits 11:10 with aspm_type offset by 1 */
416 if (!(root_lnkcap & (1 << (type + 9))) ||
417 !(endp_lnkcap & (1 << (type + 9))))
418 return -1;
419
420 /* Find the one with higher latency */
421 switch (type) {
422 case PCIE_ASPM_L0S:
423 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
424 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
425 break;
426 case PCIE_ASPM_L1:
427 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
428 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
429 break;
430 default:
431 return -1;
432 }
433
434 return (endp_lat > root_lat) ? endp_lat : root_lat;
435}
436
437/*
438 * Enable ASPM on PCIe root port and endpoint.
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700439 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600440static void pciexp_enable_aspm(struct device *root, unsigned int root_cap,
441 struct device *endp, unsigned int endp_cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700442{
443 const char *aspm_type_str[] = { "None", "L0s", "L1", "L0s and L1" };
444 enum aspm_type apmc = PCIE_ASPM_NONE;
445 int exit_latency, ok_latency;
446 u16 lnkctl;
447 u32 devcap;
448
Nico Huber570b1832017-08-30 13:38:50 +0200449 if (endp->disable_pcie_aspm)
450 return;
451
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700452 /* Get endpoint device capabilities for acceptable limits */
453 devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
454
455 /* Enable L0s if it is within endpoint acceptable limit */
456 ok_latency = (devcap & PCI_EXP_DEVCAP_L0S) >> 6;
457 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
458 PCIE_ASPM_L0S);
459 if (exit_latency >= 0 && exit_latency <= ok_latency)
460 apmc |= PCIE_ASPM_L0S;
461
462 /* Enable L1 if it is within endpoint acceptable limit */
463 ok_latency = (devcap & PCI_EXP_DEVCAP_L1) >> 9;
464 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
465 PCIE_ASPM_L1);
466 if (exit_latency >= 0 && exit_latency <= ok_latency)
467 apmc |= PCIE_ASPM_L1;
468
469 if (apmc != PCIE_ASPM_NONE) {
470 /* Set APMC in root port first */
471 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
472 lnkctl |= apmc;
473 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
474
475 /* Set APMC in endpoint device next */
476 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
477 lnkctl |= apmc;
478 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
479 }
480
481 printk(BIOS_INFO, "ASPM: Enabled %s\n", aspm_type_str[apmc]);
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700482}
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700483
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200484/*
485 * Set max payload size of endpoint in accordance with max payload size of root port.
486 */
487static void pciexp_set_max_payload_size(struct device *root, unsigned int root_cap,
488 struct device *endp, unsigned int endp_cap)
489{
490 unsigned int endp_max_payload, root_max_payload, max_payload;
491 u16 endp_devctl, root_devctl;
492 u32 endp_devcap, root_devcap;
493
494 /* Get max payload size supported by endpoint */
495 endp_devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
496 endp_max_payload = endp_devcap & PCI_EXP_DEVCAP_PAYLOAD;
497
498 /* Get max payload size supported by root port */
499 root_devcap = pci_read_config32(root, root_cap + PCI_EXP_DEVCAP);
500 root_max_payload = root_devcap & PCI_EXP_DEVCAP_PAYLOAD;
501
502 /* Set max payload to smaller of the reported device capability. */
503 max_payload = MIN(endp_max_payload, root_max_payload);
504 if (max_payload > 5) {
505 /* Values 6 and 7 are reserved in PCIe 3.0 specs. */
506 printk(BIOS_ERR, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
507 max_payload);
508 max_payload = 5;
509 }
510
511 endp_devctl = pci_read_config16(endp, endp_cap + PCI_EXP_DEVCTL);
512 endp_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
513 endp_devctl |= max_payload << 5;
514 pci_write_config16(endp, endp_cap + PCI_EXP_DEVCTL, endp_devctl);
515
516 root_devctl = pci_read_config16(root, root_cap + PCI_EXP_DEVCTL);
517 root_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
518 root_devctl |= max_payload << 5;
519 pci_write_config16(root, root_cap + PCI_EXP_DEVCTL, root_devctl);
520
521 printk(BIOS_INFO, "PCIe: Max_Payload_Size adjusted to %d\n", (1 << (max_payload + 7)));
522}
523
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200524static void pciexp_tune_dev(struct device *dev)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000525{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200526 struct device *root = dev->bus->dev;
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700527 unsigned int root_cap, cap;
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000528
529 cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
Uwe Hermannd453dd02010-10-18 00:00:57 +0000530 if (!cap)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000531 return;
Uwe Hermannd453dd02010-10-18 00:00:57 +0000532
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700533 root_cap = pci_find_capability(root, PCI_CAP_ID_PCIE);
534 if (!root_cap)
535 return;
Stefan Reinauerf6eb88a2010-01-17 13:54:08 +0000536
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700537 /* Check for and enable Common Clock */
Julius Wernercd49cce2019-03-05 16:53:33 -0800538 if (CONFIG(PCIEXP_COMMON_CLOCK))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200539 pciexp_enable_common_clock(root, root_cap, dev, cap);
Uwe Hermanne4870472010-11-04 23:23:47 +0000540
Kane Chen18cb1342014-10-01 11:13:54 +0800541 /* Check if per port CLK req is supported by endpoint*/
Julius Wernercd49cce2019-03-05 16:53:33 -0800542 if (CONFIG(PCIEXP_CLK_PM))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200543 pciexp_enable_clock_power_pm(dev, cap);
Kane Chen18cb1342014-10-01 11:13:54 +0800544
Kenji Chen31c6e632014-10-04 01:14:44 +0800545 /* Enable L1 Sub-State when both root port and endpoint support */
Julius Wernercd49cce2019-03-05 16:53:33 -0800546 if (CONFIG(PCIEXP_L1_SUB_STATE))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200547 pciexp_config_L1_sub_state(root, dev);
Kenji Chen31c6e632014-10-04 01:14:44 +0800548
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700549 /* Check for and enable ASPM */
Julius Wernercd49cce2019-03-05 16:53:33 -0800550 if (CONFIG(PCIEXP_ASPM))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200551 pciexp_enable_aspm(root, root_cap, dev, cap);
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200552
553 /* Adjust Max_Payload_Size of link ends. */
554 pciexp_set_max_payload_size(root, root_cap, dev, cap);
Nico Huber968ef752021-03-07 01:39:18 +0100555
556 pciexp_configure_ltr(root, root_cap, dev, cap);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000557}
558
Kyösti Mälkkide271a82015-03-18 13:09:47 +0200559void pciexp_scan_bus(struct bus *bus, unsigned int min_devfn,
560 unsigned int max_devfn)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000561{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200562 struct device *child;
Nico Huber968ef752021-03-07 01:39:18 +0100563
564 pciexp_enable_ltr(bus->dev);
565
Kyösti Mälkkide271a82015-03-18 13:09:47 +0200566 pci_scan_bus(bus, min_devfn, max_devfn);
Uwe Hermannd453dd02010-10-18 00:00:57 +0000567
568 for (child = bus->children; child; child = child->sibling) {
Duncan Lauriebf696222020-10-18 15:10:00 -0700569 if (child->path.type != DEVICE_PATH_PCI)
570 continue;
Uwe Hermannd453dd02010-10-18 00:00:57 +0000571 if ((child->path.pci.devfn < min_devfn) ||
572 (child->path.pci.devfn > max_devfn)) {
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000573 continue;
574 }
575 pciexp_tune_dev(child);
576 }
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000577}
578
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200579void pciexp_scan_bridge(struct device *dev)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000580{
Kyösti Mälkki580e7222015-03-19 21:04:23 +0200581 do_pci_scan_bridge(dev, pciexp_scan_bus);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000582}
583
584/** Default device operations for PCI Express bridges */
585static struct pci_operations pciexp_bus_ops_pci = {
586 .set_subsystem = 0,
587};
588
589struct device_operations default_pciexp_ops_bus = {
590 .read_resources = pci_bus_read_resources,
591 .set_resources = pci_dev_set_resources,
592 .enable_resources = pci_bus_enable_resources,
Uwe Hermannd453dd02010-10-18 00:00:57 +0000593 .scan_bus = pciexp_scan_bridge,
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000594 .reset_bus = pci_bus_reset,
595 .ops_pci = &pciexp_bus_ops_pci,
596};
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600597
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600598static void pciexp_hotplug_dummy_read_resources(struct device *dev)
599{
600 struct resource *resource;
601
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700602 /* Add extra memory space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600603 resource = new_resource(dev, 0x10);
604 resource->size = CONFIG_PCIEXP_HOTPLUG_MEM;
605 resource->align = 12;
606 resource->gran = 12;
607 resource->limit = 0xffffffff;
608 resource->flags |= IORESOURCE_MEM;
609
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700610 /* Add extra prefetchable memory space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600611 resource = new_resource(dev, 0x14);
612 resource->size = CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM;
613 resource->align = 12;
614 resource->gran = 12;
615 resource->limit = 0xffffffffffffffff;
616 resource->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
617
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700618 /* Set resource flag requesting allocation above 4G boundary. */
619 if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G))
620 resource->flags |= IORESOURCE_ABOVE_4G;
621
622 /* Add extra I/O space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600623 resource = new_resource(dev, 0x18);
624 resource->size = CONFIG_PCIEXP_HOTPLUG_IO;
625 resource->align = 12;
626 resource->gran = 12;
627 resource->limit = 0xffff;
628 resource->flags |= IORESOURCE_IO;
629}
630
631static struct device_operations pciexp_hotplug_dummy_ops = {
632 .read_resources = pciexp_hotplug_dummy_read_resources,
John Su3ecc7772022-03-25 10:37:52 +0800633 .set_resources = noop_set_resources,
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600634};
635
636void pciexp_hotplug_scan_bridge(struct device *dev)
637{
638 dev->hotplug_buses = CONFIG_PCIEXP_HOTPLUG_BUSES;
639
640 /* Normal PCIe Scan */
641 pciexp_scan_bridge(dev);
642
643 /* Add dummy slot to preserve resources, must happen after bus scan */
644 struct device *dummy;
645 struct device_path dummy_path = { .type = DEVICE_PATH_NONE };
646 dummy = alloc_dev(dev->link_list, &dummy_path);
647 dummy->ops = &pciexp_hotplug_dummy_ops;
648}
649
650struct device_operations default_pciexp_hotplug_ops_bus = {
651 .read_resources = pci_bus_read_resources,
652 .set_resources = pci_dev_set_resources,
653 .enable_resources = pci_bus_enable_resources,
654 .scan_bus = pciexp_hotplug_scan_bridge,
655 .reset_bus = pci_bus_reset,
656 .ops_pci = &pciexp_bus_ops_pci,
657};