blob: bc4012503af4fccf04cecf7eed5d513eba755933 [file] [log] [blame]
Angel Ponsc74dae92020-04-02 23:48:16 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00002
3#include <console/console.h>
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +02004#include <commonlib/helpers.h>
Duncan Laurie90dcdd42011-10-25 14:15:11 -07005#include <delay.h>
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00006#include <device/device.h>
7#include <device/pci.h>
Patrick Rudolphe56189c2018-04-18 10:11:59 +02008#include <device/pci_ops.h>
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00009#include <device/pciexp.h>
10
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060011static unsigned int pciexp_get_ext_cap_offset(const struct device *dev, unsigned int cap,
12 unsigned int offset)
Kenji Chen31c6e632014-10-04 01:14:44 +080013{
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060014 unsigned int this_cap_offset = offset;
15 unsigned int next_cap_offset, this_cap, cafe;
Nico Huber4b864e52022-08-05 12:44:11 +020016 while (this_cap_offset != 0) {
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +020017 this_cap = pci_read_config32(dev, this_cap_offset);
Bill XIE385e4322022-08-04 21:52:05 +080018 /* Bail out when this request is unsupported */
19 if (this_cap == 0xffffffff)
20 break;
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +020021 cafe = pci_read_config32(dev, this_cap_offset + 4);
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060022 if ((this_cap & 0xffff) == cap) {
Kenji Chen31c6e632014-10-04 01:14:44 +080023 return this_cap_offset;
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060024 } else if ((cafe & 0xffff) == cap) {
Kenji Chen31c6e632014-10-04 01:14:44 +080025 return this_cap_offset + 4;
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060026 } else {
27 next_cap_offset = this_cap >> 20;
Kenji Chen31c6e632014-10-04 01:14:44 +080028 this_cap_offset = next_cap_offset;
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060029 }
Nico Huber4b864e52022-08-05 12:44:11 +020030 }
Kenji Chen31c6e632014-10-04 01:14:44 +080031
32 return 0;
33}
Kenji Chen31c6e632014-10-04 01:14:44 +080034
Nico Huber5ffc2c82022-08-05 12:58:18 +020035/*
36 * Search for an extended capability with the ID `cap`.
37 *
38 * Returns the offset of the first matching extended
39 * capability if found, or 0 otherwise.
40 *
41 * A new search is started with `offset == 0`.
42 * To continue a search, the prior return value
43 * should be passed as `offset`.
44 */
45unsigned int pciexp_find_extended_cap(const struct device *dev, unsigned int cap,
46 unsigned int offset)
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060047{
Nico Huber5ffc2c82022-08-05 12:58:18 +020048 unsigned int next_cap_offset;
49
50 if (offset)
51 next_cap_offset = pci_read_config32(dev, offset) >> 20;
52 else
53 next_cap_offset = PCIE_EXT_CAP_OFFSET;
54
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060055 return pciexp_get_ext_cap_offset(dev, cap, next_cap_offset);
56}
57
Nico Huber9099fea2022-08-05 13:02:52 +020058/*
59 * Search for a vendor-specific extended capability,
60 * with the vendor-specific ID `cap`.
61 *
62 * Returns the offset of the vendor-specific header,
63 * i.e. the offset of the extended capability + 4,
64 * or 0 if none is found.
65 *
66 * A new search is started with `offset == 0`.
67 * To continue a search, the prior return value
68 * should be passed as `offset`.
69 */
70unsigned int pciexp_find_ext_vendor_cap(const struct device *dev, unsigned int cap,
71 unsigned int offset)
72{
73 /* Reconstruct capability offset from vendor-specific header offset. */
74 if (offset >= 4)
75 offset -= 4;
76
77 for (;;) {
78 offset = pciexp_find_extended_cap(dev, PCI_EXT_CAP_ID_VNDR, offset);
79 if (!offset)
80 return 0;
81
82 const unsigned int vndr_cap = pci_read_config32(dev, offset + 4);
83 if ((vndr_cap & 0xffff) == cap)
84 return offset + 4;
85 }
86}
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060087
Duncan Laurie90dcdd42011-10-25 14:15:11 -070088/*
89 * Re-train a PCIe link
90 */
91#define PCIE_TRAIN_RETRY 10000
Martin Roth38ddbfb2019-10-23 21:41:00 -060092static int pciexp_retrain_link(struct device *dev, unsigned int cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -070093{
Youness Alaouibb5fb642017-05-03 17:57:13 -040094 unsigned int try;
Duncan Laurie90dcdd42011-10-25 14:15:11 -070095 u16 lnk;
96
Youness Alaouibb5fb642017-05-03 17:57:13 -040097 /*
98 * Implementation note (page 633) in PCIe Specification 3.0 suggests
99 * polling the Link Training bit in the Link Status register until the
100 * value returned is 0 before setting the Retrain Link bit to 1.
101 * This is meant to avoid a race condition when using the
102 * Retrain Link mechanism.
103 */
104 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
105 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
106 if (!(lnk & PCI_EXP_LNKSTA_LT))
107 break;
108 udelay(100);
109 }
110 if (try == 0) {
111 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
112 return -1;
113 }
114
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700115 /* Start link retraining */
116 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKCTL);
117 lnk |= PCI_EXP_LNKCTL_RL;
118 pci_write_config16(dev, cap + PCI_EXP_LNKCTL, lnk);
119
120 /* Wait for training to complete */
Youness Alaouibb5fb642017-05-03 17:57:13 -0400121 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700122 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
123 if (!(lnk & PCI_EXP_LNKSTA_LT))
124 return 0;
125 udelay(100);
126 }
127
128 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
129 return -1;
130}
131
132/*
133 * Check the Slot Clock Configuration for root port and endpoint
134 * and enable Common Clock Configuration if possible. If CCC is
135 * enabled the link must be retrained.
136 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600137static void pciexp_enable_common_clock(struct device *root, unsigned int root_cap,
138 struct device *endp, unsigned int endp_cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700139{
140 u16 root_scc, endp_scc, lnkctl;
141
142 /* Get Slot Clock Configuration for root port */
143 root_scc = pci_read_config16(root, root_cap + PCI_EXP_LNKSTA);
144 root_scc &= PCI_EXP_LNKSTA_SLC;
145
146 /* Get Slot Clock Configuration for endpoint */
147 endp_scc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKSTA);
148 endp_scc &= PCI_EXP_LNKSTA_SLC;
149
150 /* Enable Common Clock Configuration and retrain */
151 if (root_scc && endp_scc) {
152 printk(BIOS_INFO, "Enabling Common Clock Configuration\n");
153
154 /* Set in endpoint */
155 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
156 lnkctl |= PCI_EXP_LNKCTL_CCC;
157 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
158
159 /* Set in root port */
160 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
161 lnkctl |= PCI_EXP_LNKCTL_CCC;
162 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
163
164 /* Retrain link if CCC was enabled */
165 pciexp_retrain_link(root, root_cap);
166 }
167}
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700168
Martin Roth38ddbfb2019-10-23 21:41:00 -0600169static void pciexp_enable_clock_power_pm(struct device *endp, unsigned int endp_cap)
Kane Chen18cb1342014-10-01 11:13:54 +0800170{
171 /* check if per port clk req is supported in device */
172 u32 endp_ca;
173 u16 lnkctl;
174 endp_ca = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
175 if ((endp_ca & PCI_EXP_CLK_PM) == 0) {
Arthur Heymans330c46b2017-07-12 19:17:56 +0200176 printk(BIOS_INFO, "PCIE CLK PM is not supported by endpoint\n");
Kane Chen18cb1342014-10-01 11:13:54 +0800177 return;
178 }
179 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
180 lnkctl = lnkctl | PCI_EXP_EN_CLK_PM;
181 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
182}
Kane Chen18cb1342014-10-01 11:13:54 +0800183
Nico Huber968ef752021-03-07 01:39:18 +0100184static bool _pciexp_ltr_supported(struct device *dev, unsigned int cap)
Kenji Chen31c6e632014-10-04 01:14:44 +0800185{
Nico Huber968ef752021-03-07 01:39:18 +0100186 return pci_read_config16(dev, cap + PCI_EXP_DEVCAP2) & PCI_EXP_DEVCAP2_LTR;
Kenji Chen31c6e632014-10-04 01:14:44 +0800187}
188
Nico Huber968ef752021-03-07 01:39:18 +0100189static bool _pciexp_ltr_enabled(struct device *dev, unsigned int cap)
Aamir Bohra2188f572017-09-22 19:07:21 +0530190{
Nico Huber968ef752021-03-07 01:39:18 +0100191 return pci_read_config16(dev, cap + PCI_EXP_DEVCTL2) & PCI_EXP_DEV2_LTR;
Aamir Bohra2188f572017-09-22 19:07:21 +0530192}
193
Nico Huber968ef752021-03-07 01:39:18 +0100194static bool _pciexp_enable_ltr(struct device *parent, unsigned int parent_cap,
195 struct device *dev, unsigned int cap)
Kenji Chen31c6e632014-10-04 01:14:44 +0800196{
Nico Huber968ef752021-03-07 01:39:18 +0100197 if (!_pciexp_ltr_supported(dev, cap)) {
198 printk(BIOS_DEBUG, "%s: No LTR support\n", dev_path(dev));
199 return false;
Pratik Prajapati0cd0d282015-06-09 12:06:20 -0700200 }
Aamir Bohra2188f572017-09-22 19:07:21 +0530201
Nico Huber968ef752021-03-07 01:39:18 +0100202 if (_pciexp_ltr_enabled(dev, cap))
203 return true;
Aamir Bohra2188f572017-09-22 19:07:21 +0530204
Nico Huber968ef752021-03-07 01:39:18 +0100205 if (parent &&
206 (parent->path.type != DEVICE_PATH_PCI ||
207 !_pciexp_ltr_supported(parent, parent_cap) ||
208 !_pciexp_ltr_enabled(parent, parent_cap)))
209 return false;
Aamir Bohra2188f572017-09-22 19:07:21 +0530210
Nico Huber968ef752021-03-07 01:39:18 +0100211 pci_or_config16(dev, cap + PCI_EXP_DEVCTL2, PCI_EXP_DEV2_LTR);
212 printk(BIOS_INFO, "%s: Enabled LTR\n", dev_path(dev));
213 return true;
Aamir Bohra2188f572017-09-22 19:07:21 +0530214}
215
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200216static void pciexp_enable_ltr(struct device *dev)
Aamir Bohra2188f572017-09-22 19:07:21 +0530217{
Nico Huber968ef752021-03-07 01:39:18 +0100218 const unsigned int cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
219 if (!cap)
220 return;
Aamir Bohra2188f572017-09-22 19:07:21 +0530221
Nico Huber968ef752021-03-07 01:39:18 +0100222 /*
223 * If we have get_ltr_max_latencies(), treat `dev` as the root.
224 * If not, let _pciexp_enable_ltr() query the parent's state.
225 */
226 struct device *parent = NULL;
227 unsigned int parent_cap = 0;
228 if (!dev->ops->ops_pci || !dev->ops->ops_pci->get_ltr_max_latencies) {
229 parent = dev->bus->dev;
Bill XIEa43380e2022-08-03 00:18:14 +0800230 parent_cap = pci_find_capability(parent, PCI_CAP_ID_PCIE);
Nico Huber968ef752021-03-07 01:39:18 +0100231 if (!parent_cap)
232 return;
Aamir Bohra2188f572017-09-22 19:07:21 +0530233 }
Nico Huber968ef752021-03-07 01:39:18 +0100234
235 (void)_pciexp_enable_ltr(parent, parent_cap, dev, cap);
236}
237
Tim Wawrzynczaka62cb562021-12-08 21:16:43 -0700238bool pciexp_get_ltr_max_latencies(struct device *dev, u16 *max_snoop, u16 *max_nosnoop)
Nico Huber968ef752021-03-07 01:39:18 +0100239{
240 /* Walk the hierarchy up to find get_ltr_max_latencies(). */
241 do {
242 if (dev->ops->ops_pci && dev->ops->ops_pci->get_ltr_max_latencies)
243 break;
244 if (dev->bus->dev == dev || dev->bus->dev->path.type != DEVICE_PATH_PCI)
245 return false;
246 dev = dev->bus->dev;
247 } while (true);
248
249 dev->ops->ops_pci->get_ltr_max_latencies(max_snoop, max_nosnoop);
250 return true;
251}
252
253static void pciexp_configure_ltr(struct device *parent, unsigned int parent_cap,
254 struct device *dev, unsigned int cap)
255{
256 if (!_pciexp_enable_ltr(parent, parent_cap, dev, cap))
257 return;
258
Nico Huber5ffc2c82022-08-05 12:58:18 +0200259 const unsigned int ltr_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_LTR_ID, 0);
Nico Huber968ef752021-03-07 01:39:18 +0100260 if (!ltr_cap)
261 return;
262
263 u16 max_snoop, max_nosnoop;
264 if (!pciexp_get_ltr_max_latencies(dev, &max_snoop, &max_nosnoop))
265 return;
266
267 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_SNOOP, max_snoop);
268 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_NOSNOOP, max_nosnoop);
269 printk(BIOS_INFO, "%s: Programmed LTR max latencies\n", dev_path(dev));
Kenji Chen31c6e632014-10-04 01:14:44 +0800270}
271
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200272static unsigned char pciexp_L1_substate_cal(struct device *dev, unsigned int endp_cap,
Kenji Chen31c6e632014-10-04 01:14:44 +0800273 unsigned int *data)
274{
275 unsigned char mult[4] = {2, 10, 100, 0};
276
277 unsigned int L1SubStateSupport = *data & 0xf;
278 unsigned int comm_mode_rst_time = (*data >> 8) & 0xff;
279 unsigned int power_on_scale = (*data >> 16) & 0x3;
280 unsigned int power_on_value = (*data >> 19) & 0x1f;
281
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200282 unsigned int endp_data = pci_read_config32(dev, endp_cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800283 unsigned int endp_L1SubStateSupport = endp_data & 0xf;
284 unsigned int endp_comm_mode_restore_time = (endp_data >> 8) & 0xff;
285 unsigned int endp_power_on_scale = (endp_data >> 16) & 0x3;
286 unsigned int endp_power_on_value = (endp_data >> 19) & 0x1f;
287
288 L1SubStateSupport &= endp_L1SubStateSupport;
289
290 if (L1SubStateSupport == 0)
291 return 0;
292
293 if (power_on_value * mult[power_on_scale] <
294 endp_power_on_value * mult[endp_power_on_scale]) {
295 power_on_value = endp_power_on_value;
296 power_on_scale = endp_power_on_scale;
297 }
298 if (comm_mode_rst_time < endp_comm_mode_restore_time)
299 comm_mode_rst_time = endp_comm_mode_restore_time;
300
301 *data = (comm_mode_rst_time << 8) | (power_on_scale << 16)
302 | (power_on_value << 19) | L1SubStateSupport;
303
304 return 1;
305}
306
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200307static void pciexp_L1_substate_commit(struct device *root, struct device *dev,
Kenji Chen31c6e632014-10-04 01:14:44 +0800308 unsigned int root_cap, unsigned int end_cap)
309{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200310 struct device *dev_t;
Kenji Chen31c6e632014-10-04 01:14:44 +0800311 unsigned char L1_ss_ok;
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200312 unsigned int rp_L1_support = pci_read_config32(root, root_cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800313 unsigned int L1SubStateSupport;
314 unsigned int comm_mode_rst_time;
315 unsigned int power_on_scale;
316 unsigned int endp_power_on_value;
317
318 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
319 /*
320 * rp_L1_support is init'd above from root port.
321 * it needs coordination with endpoints to reach in common.
322 * if certain endpoint doesn't support L1 Sub-State, abort
323 * this feature enabling.
324 */
325 L1_ss_ok = pciexp_L1_substate_cal(dev_t, end_cap,
326 &rp_L1_support);
327 if (!L1_ss_ok)
328 return;
329 }
330
331 L1SubStateSupport = rp_L1_support & 0xf;
332 comm_mode_rst_time = (rp_L1_support >> 8) & 0xff;
333 power_on_scale = (rp_L1_support >> 16) & 0x3;
334 endp_power_on_value = (rp_L1_support >> 19) & 0x1f;
335
336 printk(BIOS_INFO, "L1 Sub-State supported from root port %d\n",
337 root->path.pci.devfn >> 3);
338 printk(BIOS_INFO, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport);
339 printk(BIOS_INFO, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time);
340 printk(BIOS_INFO, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
341 endp_power_on_value, power_on_scale);
342
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300343 pci_update_config32(root, root_cap + 0x08, ~0xff00,
Kenji Chen31c6e632014-10-04 01:14:44 +0800344 (comm_mode_rst_time << 8));
345
Elyes HAOUASa342f392018-10-17 10:56:26 +0200346 pci_update_config32(root, root_cap + 0x0c, 0xffffff04,
Kenji Chen31c6e632014-10-04 01:14:44 +0800347 (endp_power_on_value << 3) | (power_on_scale));
348
Patrick Georgi9adcbfe2017-12-05 16:36:30 -0500349 /* TODO: 0xa0, 2 are values that work on some chipsets but really
350 * should be determined dynamically by looking at downstream devices.
351 */
352 pci_update_config32(root, root_cap + 0x08,
353 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
354 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
355 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
356 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
Kenji Chen31c6e632014-10-04 01:14:44 +0800357
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300358 pci_update_config32(root, root_cap + 0x08, ~0x1f,
Kenji Chen31c6e632014-10-04 01:14:44 +0800359 L1SubStateSupport);
360
361 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
Elyes HAOUASa342f392018-10-17 10:56:26 +0200362 pci_update_config32(dev_t, end_cap + 0x0c, 0xffffff04,
Kenji Chen31c6e632014-10-04 01:14:44 +0800363 (endp_power_on_value << 3) | (power_on_scale));
364
Patrick Georgi9adcbfe2017-12-05 16:36:30 -0500365 pci_update_config32(dev_t, end_cap + 0x08,
366 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
367 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
368 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
369 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
Kenji Chen31c6e632014-10-04 01:14:44 +0800370
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300371 pci_update_config32(dev_t, end_cap + 0x08, ~0x1f,
Kenji Chen31c6e632014-10-04 01:14:44 +0800372 L1SubStateSupport);
Kenji Chen31c6e632014-10-04 01:14:44 +0800373 }
374}
375
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200376static void pciexp_config_L1_sub_state(struct device *root, struct device *dev)
Kenji Chen31c6e632014-10-04 01:14:44 +0800377{
378 unsigned int root_cap, end_cap;
379
380 /* Do it for function 0 only */
381 if (dev->path.pci.devfn & 0x7)
382 return;
383
Nico Huber5ffc2c82022-08-05 12:58:18 +0200384 root_cap = pciexp_find_extended_cap(root, PCIE_EXT_CAP_L1SS_ID, 0);
Kenji Chen31c6e632014-10-04 01:14:44 +0800385 if (!root_cap)
386 return;
387
Nico Huber5ffc2c82022-08-05 12:58:18 +0200388 end_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_L1SS_ID, 0);
Kenji Chen31c6e632014-10-04 01:14:44 +0800389 if (!end_cap) {
Nico Huber5ffc2c82022-08-05 12:58:18 +0200390 end_cap = pciexp_find_extended_cap(dev, 0xcafe, 0);
Kenji Chen31c6e632014-10-04 01:14:44 +0800391 if (!end_cap)
392 return;
393 }
394
395 pciexp_L1_substate_commit(root, dev, root_cap, end_cap);
396}
Kenji Chen31c6e632014-10-04 01:14:44 +0800397
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700398/*
399 * Determine the ASPM L0s or L1 exit latency for a link
400 * by checking both root port and endpoint and returning
401 * the highest latency value.
402 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600403static int pciexp_aspm_latency(struct device *root, unsigned int root_cap,
404 struct device *endp, unsigned int endp_cap,
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700405 enum aspm_type type)
406{
407 int root_lat = 0, endp_lat = 0;
408 u32 root_lnkcap, endp_lnkcap;
409
410 root_lnkcap = pci_read_config32(root, root_cap + PCI_EXP_LNKCAP);
411 endp_lnkcap = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
412
413 /* Make sure the link supports this ASPM type by checking
414 * capability bits 11:10 with aspm_type offset by 1 */
415 if (!(root_lnkcap & (1 << (type + 9))) ||
416 !(endp_lnkcap & (1 << (type + 9))))
417 return -1;
418
419 /* Find the one with higher latency */
420 switch (type) {
421 case PCIE_ASPM_L0S:
422 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
423 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
424 break;
425 case PCIE_ASPM_L1:
426 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
427 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
428 break;
429 default:
430 return -1;
431 }
432
433 return (endp_lat > root_lat) ? endp_lat : root_lat;
434}
435
436/*
437 * Enable ASPM on PCIe root port and endpoint.
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700438 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600439static void pciexp_enable_aspm(struct device *root, unsigned int root_cap,
440 struct device *endp, unsigned int endp_cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700441{
442 const char *aspm_type_str[] = { "None", "L0s", "L1", "L0s and L1" };
443 enum aspm_type apmc = PCIE_ASPM_NONE;
444 int exit_latency, ok_latency;
445 u16 lnkctl;
446 u32 devcap;
447
Nico Huber570b1832017-08-30 13:38:50 +0200448 if (endp->disable_pcie_aspm)
449 return;
450
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700451 /* Get endpoint device capabilities for acceptable limits */
452 devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
453
454 /* Enable L0s if it is within endpoint acceptable limit */
455 ok_latency = (devcap & PCI_EXP_DEVCAP_L0S) >> 6;
456 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
457 PCIE_ASPM_L0S);
458 if (exit_latency >= 0 && exit_latency <= ok_latency)
459 apmc |= PCIE_ASPM_L0S;
460
461 /* Enable L1 if it is within endpoint acceptable limit */
462 ok_latency = (devcap & PCI_EXP_DEVCAP_L1) >> 9;
463 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
464 PCIE_ASPM_L1);
465 if (exit_latency >= 0 && exit_latency <= ok_latency)
466 apmc |= PCIE_ASPM_L1;
467
468 if (apmc != PCIE_ASPM_NONE) {
469 /* Set APMC in root port first */
470 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
471 lnkctl |= apmc;
472 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
473
474 /* Set APMC in endpoint device next */
475 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
476 lnkctl |= apmc;
477 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
478 }
479
480 printk(BIOS_INFO, "ASPM: Enabled %s\n", aspm_type_str[apmc]);
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700481}
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700482
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200483/*
484 * Set max payload size of endpoint in accordance with max payload size of root port.
485 */
486static void pciexp_set_max_payload_size(struct device *root, unsigned int root_cap,
487 struct device *endp, unsigned int endp_cap)
488{
489 unsigned int endp_max_payload, root_max_payload, max_payload;
490 u16 endp_devctl, root_devctl;
491 u32 endp_devcap, root_devcap;
492
493 /* Get max payload size supported by endpoint */
494 endp_devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
495 endp_max_payload = endp_devcap & PCI_EXP_DEVCAP_PAYLOAD;
496
497 /* Get max payload size supported by root port */
498 root_devcap = pci_read_config32(root, root_cap + PCI_EXP_DEVCAP);
499 root_max_payload = root_devcap & PCI_EXP_DEVCAP_PAYLOAD;
500
501 /* Set max payload to smaller of the reported device capability. */
502 max_payload = MIN(endp_max_payload, root_max_payload);
503 if (max_payload > 5) {
504 /* Values 6 and 7 are reserved in PCIe 3.0 specs. */
505 printk(BIOS_ERR, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
506 max_payload);
507 max_payload = 5;
508 }
509
510 endp_devctl = pci_read_config16(endp, endp_cap + PCI_EXP_DEVCTL);
511 endp_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
512 endp_devctl |= max_payload << 5;
513 pci_write_config16(endp, endp_cap + PCI_EXP_DEVCTL, endp_devctl);
514
515 root_devctl = pci_read_config16(root, root_cap + PCI_EXP_DEVCTL);
516 root_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
517 root_devctl |= max_payload << 5;
518 pci_write_config16(root, root_cap + PCI_EXP_DEVCTL, root_devctl);
519
520 printk(BIOS_INFO, "PCIe: Max_Payload_Size adjusted to %d\n", (1 << (max_payload + 7)));
521}
522
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200523static void pciexp_tune_dev(struct device *dev)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000524{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200525 struct device *root = dev->bus->dev;
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700526 unsigned int root_cap, cap;
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000527
528 cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
Uwe Hermannd453dd02010-10-18 00:00:57 +0000529 if (!cap)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000530 return;
Uwe Hermannd453dd02010-10-18 00:00:57 +0000531
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700532 root_cap = pci_find_capability(root, PCI_CAP_ID_PCIE);
533 if (!root_cap)
534 return;
Stefan Reinauerf6eb88a2010-01-17 13:54:08 +0000535
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700536 /* Check for and enable Common Clock */
Julius Wernercd49cce2019-03-05 16:53:33 -0800537 if (CONFIG(PCIEXP_COMMON_CLOCK))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200538 pciexp_enable_common_clock(root, root_cap, dev, cap);
Uwe Hermanne4870472010-11-04 23:23:47 +0000539
Kane Chen18cb1342014-10-01 11:13:54 +0800540 /* Check if per port CLK req is supported by endpoint*/
Julius Wernercd49cce2019-03-05 16:53:33 -0800541 if (CONFIG(PCIEXP_CLK_PM))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200542 pciexp_enable_clock_power_pm(dev, cap);
Kane Chen18cb1342014-10-01 11:13:54 +0800543
Kenji Chen31c6e632014-10-04 01:14:44 +0800544 /* Enable L1 Sub-State when both root port and endpoint support */
Julius Wernercd49cce2019-03-05 16:53:33 -0800545 if (CONFIG(PCIEXP_L1_SUB_STATE))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200546 pciexp_config_L1_sub_state(root, dev);
Kenji Chen31c6e632014-10-04 01:14:44 +0800547
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700548 /* Check for and enable ASPM */
Julius Wernercd49cce2019-03-05 16:53:33 -0800549 if (CONFIG(PCIEXP_ASPM))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200550 pciexp_enable_aspm(root, root_cap, dev, cap);
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200551
552 /* Adjust Max_Payload_Size of link ends. */
553 pciexp_set_max_payload_size(root, root_cap, dev, cap);
Nico Huber968ef752021-03-07 01:39:18 +0100554
555 pciexp_configure_ltr(root, root_cap, dev, cap);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000556}
557
Kyösti Mälkkide271a82015-03-18 13:09:47 +0200558void pciexp_scan_bus(struct bus *bus, unsigned int min_devfn,
559 unsigned int max_devfn)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000560{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200561 struct device *child;
Nico Huber968ef752021-03-07 01:39:18 +0100562
563 pciexp_enable_ltr(bus->dev);
564
Kyösti Mälkkide271a82015-03-18 13:09:47 +0200565 pci_scan_bus(bus, min_devfn, max_devfn);
Uwe Hermannd453dd02010-10-18 00:00:57 +0000566
567 for (child = bus->children; child; child = child->sibling) {
Duncan Lauriebf696222020-10-18 15:10:00 -0700568 if (child->path.type != DEVICE_PATH_PCI)
569 continue;
Uwe Hermannd453dd02010-10-18 00:00:57 +0000570 if ((child->path.pci.devfn < min_devfn) ||
571 (child->path.pci.devfn > max_devfn)) {
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000572 continue;
573 }
574 pciexp_tune_dev(child);
575 }
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000576}
577
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200578void pciexp_scan_bridge(struct device *dev)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000579{
Kyösti Mälkki580e7222015-03-19 21:04:23 +0200580 do_pci_scan_bridge(dev, pciexp_scan_bus);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000581}
582
583/** Default device operations for PCI Express bridges */
584static struct pci_operations pciexp_bus_ops_pci = {
585 .set_subsystem = 0,
586};
587
588struct device_operations default_pciexp_ops_bus = {
589 .read_resources = pci_bus_read_resources,
590 .set_resources = pci_dev_set_resources,
591 .enable_resources = pci_bus_enable_resources,
Uwe Hermannd453dd02010-10-18 00:00:57 +0000592 .scan_bus = pciexp_scan_bridge,
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000593 .reset_bus = pci_bus_reset,
594 .ops_pci = &pciexp_bus_ops_pci,
595};
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600596
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600597static void pciexp_hotplug_dummy_read_resources(struct device *dev)
598{
599 struct resource *resource;
600
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700601 /* Add extra memory space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600602 resource = new_resource(dev, 0x10);
603 resource->size = CONFIG_PCIEXP_HOTPLUG_MEM;
604 resource->align = 12;
605 resource->gran = 12;
606 resource->limit = 0xffffffff;
607 resource->flags |= IORESOURCE_MEM;
608
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700609 /* Add extra prefetchable memory space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600610 resource = new_resource(dev, 0x14);
611 resource->size = CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM;
612 resource->align = 12;
613 resource->gran = 12;
614 resource->limit = 0xffffffffffffffff;
615 resource->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
616
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700617 /* Set resource flag requesting allocation above 4G boundary. */
618 if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G))
619 resource->flags |= IORESOURCE_ABOVE_4G;
620
621 /* Add extra I/O space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600622 resource = new_resource(dev, 0x18);
623 resource->size = CONFIG_PCIEXP_HOTPLUG_IO;
624 resource->align = 12;
625 resource->gran = 12;
626 resource->limit = 0xffff;
627 resource->flags |= IORESOURCE_IO;
628}
629
630static struct device_operations pciexp_hotplug_dummy_ops = {
631 .read_resources = pciexp_hotplug_dummy_read_resources,
John Su3ecc7772022-03-25 10:37:52 +0800632 .set_resources = noop_set_resources,
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600633};
634
635void pciexp_hotplug_scan_bridge(struct device *dev)
636{
637 dev->hotplug_buses = CONFIG_PCIEXP_HOTPLUG_BUSES;
638
639 /* Normal PCIe Scan */
640 pciexp_scan_bridge(dev);
641
642 /* Add dummy slot to preserve resources, must happen after bus scan */
643 struct device *dummy;
644 struct device_path dummy_path = { .type = DEVICE_PATH_NONE };
645 dummy = alloc_dev(dev->link_list, &dummy_path);
646 dummy->ops = &pciexp_hotplug_dummy_ops;
647}
648
649struct device_operations default_pciexp_hotplug_ops_bus = {
650 .read_resources = pci_bus_read_resources,
651 .set_resources = pci_dev_set_resources,
652 .enable_resources = pci_bus_enable_resources,
653 .scan_bus = pciexp_hotplug_scan_bridge,
654 .reset_bus = pci_bus_reset,
655 .ops_pci = &pciexp_bus_ops_pci,
656};