blob: c8ac391c0687c18d03cffe56ab6cc8bbe2130f75 [file] [log] [blame]
Angel Ponsc74dae92020-04-02 23:48:16 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00002
3#include <console/console.h>
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +02004#include <commonlib/helpers.h>
Duncan Laurie90dcdd42011-10-25 14:15:11 -07005#include <delay.h>
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00006#include <device/device.h>
7#include <device/pci.h>
Nico Huberbba97352022-08-05 13:09:25 +02008#include <device/pci_ids.h>
Patrick Rudolphe56189c2018-04-18 10:11:59 +02009#include <device/pci_ops.h>
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000010#include <device/pciexp.h>
11
Nico Huber077dc2e2022-08-05 14:47:35 +020012static unsigned int ext_cap_id(unsigned int cap)
13{
14 return cap & 0xffff;
15}
16
17static unsigned int ext_cap_next_offset(unsigned int cap)
18{
Nico Huber5f7cfb32022-08-05 14:50:06 +020019 return cap >> 20 & 0xffc;
Nico Huber077dc2e2022-08-05 14:47:35 +020020}
21
22static unsigned int find_ext_cap_offset(const struct device *dev, unsigned int cap_id,
23 unsigned int offset)
Kenji Chen31c6e632014-10-04 01:14:44 +080024{
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060025 unsigned int this_cap_offset = offset;
Nico Huber077dc2e2022-08-05 14:47:35 +020026
Nico Huber5f7cfb32022-08-05 14:50:06 +020027 while (this_cap_offset >= PCIE_EXT_CAP_OFFSET) {
Nico Huber077dc2e2022-08-05 14:47:35 +020028 const unsigned int this_cap = pci_read_config32(dev, this_cap_offset);
29
Bill XIE385e4322022-08-04 21:52:05 +080030 /* Bail out when this request is unsupported */
31 if (this_cap == 0xffffffff)
32 break;
Nico Huber077dc2e2022-08-05 14:47:35 +020033
34 if (ext_cap_id(this_cap) == cap_id)
Kenji Chen31c6e632014-10-04 01:14:44 +080035 return this_cap_offset;
Nico Huber077dc2e2022-08-05 14:47:35 +020036
37 this_cap_offset = ext_cap_next_offset(this_cap);
Nico Huber4b864e52022-08-05 12:44:11 +020038 }
Kenji Chen31c6e632014-10-04 01:14:44 +080039
40 return 0;
41}
Kenji Chen31c6e632014-10-04 01:14:44 +080042
Nico Huber5ffc2c82022-08-05 12:58:18 +020043/*
44 * Search for an extended capability with the ID `cap`.
45 *
46 * Returns the offset of the first matching extended
47 * capability if found, or 0 otherwise.
48 *
49 * A new search is started with `offset == 0`.
50 * To continue a search, the prior return value
51 * should be passed as `offset`.
52 */
53unsigned int pciexp_find_extended_cap(const struct device *dev, unsigned int cap,
54 unsigned int offset)
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060055{
Nico Huber5ffc2c82022-08-05 12:58:18 +020056 unsigned int next_cap_offset;
57
58 if (offset)
Nico Huber077dc2e2022-08-05 14:47:35 +020059 next_cap_offset = ext_cap_next_offset(pci_read_config32(dev, offset));
Nico Huber5ffc2c82022-08-05 12:58:18 +020060 else
61 next_cap_offset = PCIE_EXT_CAP_OFFSET;
62
Nico Huber077dc2e2022-08-05 14:47:35 +020063 return find_ext_cap_offset(dev, cap, next_cap_offset);
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060064}
65
Nico Huber9099fea2022-08-05 13:02:52 +020066/*
67 * Search for a vendor-specific extended capability,
68 * with the vendor-specific ID `cap`.
69 *
70 * Returns the offset of the vendor-specific header,
71 * i.e. the offset of the extended capability + 4,
72 * or 0 if none is found.
73 *
74 * A new search is started with `offset == 0`.
75 * To continue a search, the prior return value
76 * should be passed as `offset`.
77 */
78unsigned int pciexp_find_ext_vendor_cap(const struct device *dev, unsigned int cap,
79 unsigned int offset)
80{
81 /* Reconstruct capability offset from vendor-specific header offset. */
82 if (offset >= 4)
83 offset -= 4;
84
85 for (;;) {
86 offset = pciexp_find_extended_cap(dev, PCI_EXT_CAP_ID_VNDR, offset);
87 if (!offset)
88 return 0;
89
90 const unsigned int vndr_cap = pci_read_config32(dev, offset + 4);
91 if ((vndr_cap & 0xffff) == cap)
92 return offset + 4;
93 }
94}
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060095
Duncan Laurie90dcdd42011-10-25 14:15:11 -070096/*
97 * Re-train a PCIe link
98 */
99#define PCIE_TRAIN_RETRY 10000
Martin Roth38ddbfb2019-10-23 21:41:00 -0600100static int pciexp_retrain_link(struct device *dev, unsigned int cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700101{
Youness Alaouibb5fb642017-05-03 17:57:13 -0400102 unsigned int try;
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700103 u16 lnk;
104
Youness Alaouibb5fb642017-05-03 17:57:13 -0400105 /*
106 * Implementation note (page 633) in PCIe Specification 3.0 suggests
107 * polling the Link Training bit in the Link Status register until the
108 * value returned is 0 before setting the Retrain Link bit to 1.
109 * This is meant to avoid a race condition when using the
110 * Retrain Link mechanism.
111 */
112 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
113 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
114 if (!(lnk & PCI_EXP_LNKSTA_LT))
115 break;
116 udelay(100);
117 }
118 if (try == 0) {
119 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
120 return -1;
121 }
122
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700123 /* Start link retraining */
124 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKCTL);
125 lnk |= PCI_EXP_LNKCTL_RL;
126 pci_write_config16(dev, cap + PCI_EXP_LNKCTL, lnk);
127
128 /* Wait for training to complete */
Youness Alaouibb5fb642017-05-03 17:57:13 -0400129 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700130 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
131 if (!(lnk & PCI_EXP_LNKSTA_LT))
132 return 0;
133 udelay(100);
134 }
135
136 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
137 return -1;
138}
139
140/*
141 * Check the Slot Clock Configuration for root port and endpoint
142 * and enable Common Clock Configuration if possible. If CCC is
143 * enabled the link must be retrained.
144 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600145static void pciexp_enable_common_clock(struct device *root, unsigned int root_cap,
146 struct device *endp, unsigned int endp_cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700147{
148 u16 root_scc, endp_scc, lnkctl;
149
150 /* Get Slot Clock Configuration for root port */
151 root_scc = pci_read_config16(root, root_cap + PCI_EXP_LNKSTA);
152 root_scc &= PCI_EXP_LNKSTA_SLC;
153
154 /* Get Slot Clock Configuration for endpoint */
155 endp_scc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKSTA);
156 endp_scc &= PCI_EXP_LNKSTA_SLC;
157
158 /* Enable Common Clock Configuration and retrain */
159 if (root_scc && endp_scc) {
160 printk(BIOS_INFO, "Enabling Common Clock Configuration\n");
161
162 /* Set in endpoint */
163 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
164 lnkctl |= PCI_EXP_LNKCTL_CCC;
165 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
166
167 /* Set in root port */
168 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
169 lnkctl |= PCI_EXP_LNKCTL_CCC;
170 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
171
172 /* Retrain link if CCC was enabled */
173 pciexp_retrain_link(root, root_cap);
174 }
175}
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700176
Martin Roth38ddbfb2019-10-23 21:41:00 -0600177static void pciexp_enable_clock_power_pm(struct device *endp, unsigned int endp_cap)
Kane Chen18cb1342014-10-01 11:13:54 +0800178{
179 /* check if per port clk req is supported in device */
180 u32 endp_ca;
181 u16 lnkctl;
182 endp_ca = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
183 if ((endp_ca & PCI_EXP_CLK_PM) == 0) {
Arthur Heymans330c46b2017-07-12 19:17:56 +0200184 printk(BIOS_INFO, "PCIE CLK PM is not supported by endpoint\n");
Kane Chen18cb1342014-10-01 11:13:54 +0800185 return;
186 }
187 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
188 lnkctl = lnkctl | PCI_EXP_EN_CLK_PM;
189 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
190}
Kane Chen18cb1342014-10-01 11:13:54 +0800191
Nico Huber968ef752021-03-07 01:39:18 +0100192static bool _pciexp_ltr_supported(struct device *dev, unsigned int cap)
Kenji Chen31c6e632014-10-04 01:14:44 +0800193{
Nico Huber968ef752021-03-07 01:39:18 +0100194 return pci_read_config16(dev, cap + PCI_EXP_DEVCAP2) & PCI_EXP_DEVCAP2_LTR;
Kenji Chen31c6e632014-10-04 01:14:44 +0800195}
196
Nico Huber968ef752021-03-07 01:39:18 +0100197static bool _pciexp_ltr_enabled(struct device *dev, unsigned int cap)
Aamir Bohra2188f572017-09-22 19:07:21 +0530198{
Nico Huber968ef752021-03-07 01:39:18 +0100199 return pci_read_config16(dev, cap + PCI_EXP_DEVCTL2) & PCI_EXP_DEV2_LTR;
Aamir Bohra2188f572017-09-22 19:07:21 +0530200}
201
Nico Huber968ef752021-03-07 01:39:18 +0100202static bool _pciexp_enable_ltr(struct device *parent, unsigned int parent_cap,
203 struct device *dev, unsigned int cap)
Kenji Chen31c6e632014-10-04 01:14:44 +0800204{
Nico Huber968ef752021-03-07 01:39:18 +0100205 if (!_pciexp_ltr_supported(dev, cap)) {
206 printk(BIOS_DEBUG, "%s: No LTR support\n", dev_path(dev));
207 return false;
Pratik Prajapati0cd0d282015-06-09 12:06:20 -0700208 }
Aamir Bohra2188f572017-09-22 19:07:21 +0530209
Nico Huber968ef752021-03-07 01:39:18 +0100210 if (_pciexp_ltr_enabled(dev, cap))
211 return true;
Aamir Bohra2188f572017-09-22 19:07:21 +0530212
Nico Huber968ef752021-03-07 01:39:18 +0100213 if (parent &&
Nico Huber49fc4e32022-08-17 21:57:46 +0200214 (!_pciexp_ltr_supported(parent, parent_cap) ||
Nico Huber968ef752021-03-07 01:39:18 +0100215 !_pciexp_ltr_enabled(parent, parent_cap)))
216 return false;
Aamir Bohra2188f572017-09-22 19:07:21 +0530217
Nico Huber968ef752021-03-07 01:39:18 +0100218 pci_or_config16(dev, cap + PCI_EXP_DEVCTL2, PCI_EXP_DEV2_LTR);
219 printk(BIOS_INFO, "%s: Enabled LTR\n", dev_path(dev));
220 return true;
Aamir Bohra2188f572017-09-22 19:07:21 +0530221}
222
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200223static void pciexp_enable_ltr(struct device *dev)
Aamir Bohra2188f572017-09-22 19:07:21 +0530224{
Nico Huber968ef752021-03-07 01:39:18 +0100225 const unsigned int cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
226 if (!cap)
227 return;
Aamir Bohra2188f572017-09-22 19:07:21 +0530228
Nico Huber968ef752021-03-07 01:39:18 +0100229 /*
230 * If we have get_ltr_max_latencies(), treat `dev` as the root.
231 * If not, let _pciexp_enable_ltr() query the parent's state.
232 */
233 struct device *parent = NULL;
234 unsigned int parent_cap = 0;
235 if (!dev->ops->ops_pci || !dev->ops->ops_pci->get_ltr_max_latencies) {
236 parent = dev->bus->dev;
Nico Huber49fc4e32022-08-17 21:57:46 +0200237 if (parent->path.type != DEVICE_PATH_PCI)
238 return;
Bill XIEa43380e2022-08-03 00:18:14 +0800239 parent_cap = pci_find_capability(parent, PCI_CAP_ID_PCIE);
Nico Huber968ef752021-03-07 01:39:18 +0100240 if (!parent_cap)
241 return;
Aamir Bohra2188f572017-09-22 19:07:21 +0530242 }
Nico Huber968ef752021-03-07 01:39:18 +0100243
244 (void)_pciexp_enable_ltr(parent, parent_cap, dev, cap);
245}
246
Tim Wawrzynczaka62cb562021-12-08 21:16:43 -0700247bool pciexp_get_ltr_max_latencies(struct device *dev, u16 *max_snoop, u16 *max_nosnoop)
Nico Huber968ef752021-03-07 01:39:18 +0100248{
249 /* Walk the hierarchy up to find get_ltr_max_latencies(). */
250 do {
251 if (dev->ops->ops_pci && dev->ops->ops_pci->get_ltr_max_latencies)
252 break;
253 if (dev->bus->dev == dev || dev->bus->dev->path.type != DEVICE_PATH_PCI)
254 return false;
255 dev = dev->bus->dev;
256 } while (true);
257
258 dev->ops->ops_pci->get_ltr_max_latencies(max_snoop, max_nosnoop);
259 return true;
260}
261
262static void pciexp_configure_ltr(struct device *parent, unsigned int parent_cap,
263 struct device *dev, unsigned int cap)
264{
265 if (!_pciexp_enable_ltr(parent, parent_cap, dev, cap))
266 return;
267
Nico Huber5ffc2c82022-08-05 12:58:18 +0200268 const unsigned int ltr_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_LTR_ID, 0);
Nico Huber968ef752021-03-07 01:39:18 +0100269 if (!ltr_cap)
270 return;
271
272 u16 max_snoop, max_nosnoop;
273 if (!pciexp_get_ltr_max_latencies(dev, &max_snoop, &max_nosnoop))
274 return;
275
276 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_SNOOP, max_snoop);
277 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_NOSNOOP, max_nosnoop);
278 printk(BIOS_INFO, "%s: Programmed LTR max latencies\n", dev_path(dev));
Kenji Chen31c6e632014-10-04 01:14:44 +0800279}
280
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200281static unsigned char pciexp_L1_substate_cal(struct device *dev, unsigned int endp_cap,
Kenji Chen31c6e632014-10-04 01:14:44 +0800282 unsigned int *data)
283{
284 unsigned char mult[4] = {2, 10, 100, 0};
285
286 unsigned int L1SubStateSupport = *data & 0xf;
287 unsigned int comm_mode_rst_time = (*data >> 8) & 0xff;
288 unsigned int power_on_scale = (*data >> 16) & 0x3;
289 unsigned int power_on_value = (*data >> 19) & 0x1f;
290
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200291 unsigned int endp_data = pci_read_config32(dev, endp_cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800292 unsigned int endp_L1SubStateSupport = endp_data & 0xf;
293 unsigned int endp_comm_mode_restore_time = (endp_data >> 8) & 0xff;
294 unsigned int endp_power_on_scale = (endp_data >> 16) & 0x3;
295 unsigned int endp_power_on_value = (endp_data >> 19) & 0x1f;
296
297 L1SubStateSupport &= endp_L1SubStateSupport;
298
299 if (L1SubStateSupport == 0)
300 return 0;
301
302 if (power_on_value * mult[power_on_scale] <
303 endp_power_on_value * mult[endp_power_on_scale]) {
304 power_on_value = endp_power_on_value;
305 power_on_scale = endp_power_on_scale;
306 }
307 if (comm_mode_rst_time < endp_comm_mode_restore_time)
308 comm_mode_rst_time = endp_comm_mode_restore_time;
309
310 *data = (comm_mode_rst_time << 8) | (power_on_scale << 16)
311 | (power_on_value << 19) | L1SubStateSupport;
312
313 return 1;
314}
315
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200316static void pciexp_L1_substate_commit(struct device *root, struct device *dev,
Kenji Chen31c6e632014-10-04 01:14:44 +0800317 unsigned int root_cap, unsigned int end_cap)
318{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200319 struct device *dev_t;
Kenji Chen31c6e632014-10-04 01:14:44 +0800320 unsigned char L1_ss_ok;
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200321 unsigned int rp_L1_support = pci_read_config32(root, root_cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800322 unsigned int L1SubStateSupport;
323 unsigned int comm_mode_rst_time;
324 unsigned int power_on_scale;
325 unsigned int endp_power_on_value;
326
327 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
328 /*
329 * rp_L1_support is init'd above from root port.
330 * it needs coordination with endpoints to reach in common.
331 * if certain endpoint doesn't support L1 Sub-State, abort
332 * this feature enabling.
333 */
334 L1_ss_ok = pciexp_L1_substate_cal(dev_t, end_cap,
335 &rp_L1_support);
336 if (!L1_ss_ok)
337 return;
338 }
339
340 L1SubStateSupport = rp_L1_support & 0xf;
341 comm_mode_rst_time = (rp_L1_support >> 8) & 0xff;
342 power_on_scale = (rp_L1_support >> 16) & 0x3;
343 endp_power_on_value = (rp_L1_support >> 19) & 0x1f;
344
345 printk(BIOS_INFO, "L1 Sub-State supported from root port %d\n",
346 root->path.pci.devfn >> 3);
347 printk(BIOS_INFO, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport);
348 printk(BIOS_INFO, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time);
349 printk(BIOS_INFO, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
350 endp_power_on_value, power_on_scale);
351
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300352 pci_update_config32(root, root_cap + 0x08, ~0xff00,
Kenji Chen31c6e632014-10-04 01:14:44 +0800353 (comm_mode_rst_time << 8));
354
Elyes HAOUASa342f392018-10-17 10:56:26 +0200355 pci_update_config32(root, root_cap + 0x0c, 0xffffff04,
Kenji Chen31c6e632014-10-04 01:14:44 +0800356 (endp_power_on_value << 3) | (power_on_scale));
357
Patrick Georgi9adcbfe2017-12-05 16:36:30 -0500358 /* TODO: 0xa0, 2 are values that work on some chipsets but really
359 * should be determined dynamically by looking at downstream devices.
360 */
361 pci_update_config32(root, root_cap + 0x08,
362 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
363 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
364 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
365 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
Kenji Chen31c6e632014-10-04 01:14:44 +0800366
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300367 pci_update_config32(root, root_cap + 0x08, ~0x1f,
Kenji Chen31c6e632014-10-04 01:14:44 +0800368 L1SubStateSupport);
369
370 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
Elyes HAOUASa342f392018-10-17 10:56:26 +0200371 pci_update_config32(dev_t, end_cap + 0x0c, 0xffffff04,
Kenji Chen31c6e632014-10-04 01:14:44 +0800372 (endp_power_on_value << 3) | (power_on_scale));
373
Patrick Georgi9adcbfe2017-12-05 16:36:30 -0500374 pci_update_config32(dev_t, end_cap + 0x08,
375 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
376 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
377 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
378 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
Kenji Chen31c6e632014-10-04 01:14:44 +0800379
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300380 pci_update_config32(dev_t, end_cap + 0x08, ~0x1f,
Kenji Chen31c6e632014-10-04 01:14:44 +0800381 L1SubStateSupport);
Kenji Chen31c6e632014-10-04 01:14:44 +0800382 }
383}
384
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200385static void pciexp_config_L1_sub_state(struct device *root, struct device *dev)
Kenji Chen31c6e632014-10-04 01:14:44 +0800386{
387 unsigned int root_cap, end_cap;
388
389 /* Do it for function 0 only */
390 if (dev->path.pci.devfn & 0x7)
391 return;
392
Nico Huber5ffc2c82022-08-05 12:58:18 +0200393 root_cap = pciexp_find_extended_cap(root, PCIE_EXT_CAP_L1SS_ID, 0);
Kenji Chen31c6e632014-10-04 01:14:44 +0800394 if (!root_cap)
395 return;
396
Nico Huber5ffc2c82022-08-05 12:58:18 +0200397 end_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_L1SS_ID, 0);
Kenji Chen31c6e632014-10-04 01:14:44 +0800398 if (!end_cap) {
Nico Huberbba97352022-08-05 13:09:25 +0200399 if (dev->vendor != PCI_VID_INTEL)
400 return;
401
402 end_cap = pciexp_find_ext_vendor_cap(dev, 0xcafe, 0);
Kenji Chen31c6e632014-10-04 01:14:44 +0800403 if (!end_cap)
404 return;
405 }
406
407 pciexp_L1_substate_commit(root, dev, root_cap, end_cap);
408}
Kenji Chen31c6e632014-10-04 01:14:44 +0800409
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700410/*
411 * Determine the ASPM L0s or L1 exit latency for a link
412 * by checking both root port and endpoint and returning
413 * the highest latency value.
414 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600415static int pciexp_aspm_latency(struct device *root, unsigned int root_cap,
416 struct device *endp, unsigned int endp_cap,
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700417 enum aspm_type type)
418{
419 int root_lat = 0, endp_lat = 0;
420 u32 root_lnkcap, endp_lnkcap;
421
422 root_lnkcap = pci_read_config32(root, root_cap + PCI_EXP_LNKCAP);
423 endp_lnkcap = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
424
425 /* Make sure the link supports this ASPM type by checking
426 * capability bits 11:10 with aspm_type offset by 1 */
427 if (!(root_lnkcap & (1 << (type + 9))) ||
428 !(endp_lnkcap & (1 << (type + 9))))
429 return -1;
430
431 /* Find the one with higher latency */
432 switch (type) {
433 case PCIE_ASPM_L0S:
434 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
435 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
436 break;
437 case PCIE_ASPM_L1:
438 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
439 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
440 break;
441 default:
442 return -1;
443 }
444
445 return (endp_lat > root_lat) ? endp_lat : root_lat;
446}
447
448/*
449 * Enable ASPM on PCIe root port and endpoint.
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700450 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600451static void pciexp_enable_aspm(struct device *root, unsigned int root_cap,
452 struct device *endp, unsigned int endp_cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700453{
454 const char *aspm_type_str[] = { "None", "L0s", "L1", "L0s and L1" };
455 enum aspm_type apmc = PCIE_ASPM_NONE;
456 int exit_latency, ok_latency;
457 u16 lnkctl;
458 u32 devcap;
459
Nico Huber570b1832017-08-30 13:38:50 +0200460 if (endp->disable_pcie_aspm)
461 return;
462
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700463 /* Get endpoint device capabilities for acceptable limits */
464 devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
465
466 /* Enable L0s if it is within endpoint acceptable limit */
467 ok_latency = (devcap & PCI_EXP_DEVCAP_L0S) >> 6;
468 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
469 PCIE_ASPM_L0S);
470 if (exit_latency >= 0 && exit_latency <= ok_latency)
471 apmc |= PCIE_ASPM_L0S;
472
473 /* Enable L1 if it is within endpoint acceptable limit */
474 ok_latency = (devcap & PCI_EXP_DEVCAP_L1) >> 9;
475 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
476 PCIE_ASPM_L1);
477 if (exit_latency >= 0 && exit_latency <= ok_latency)
478 apmc |= PCIE_ASPM_L1;
479
480 if (apmc != PCIE_ASPM_NONE) {
481 /* Set APMC in root port first */
482 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
483 lnkctl |= apmc;
484 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
485
486 /* Set APMC in endpoint device next */
487 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
488 lnkctl |= apmc;
489 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
490 }
491
492 printk(BIOS_INFO, "ASPM: Enabled %s\n", aspm_type_str[apmc]);
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700493}
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700494
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200495/*
496 * Set max payload size of endpoint in accordance with max payload size of root port.
497 */
498static void pciexp_set_max_payload_size(struct device *root, unsigned int root_cap,
499 struct device *endp, unsigned int endp_cap)
500{
501 unsigned int endp_max_payload, root_max_payload, max_payload;
502 u16 endp_devctl, root_devctl;
503 u32 endp_devcap, root_devcap;
504
505 /* Get max payload size supported by endpoint */
506 endp_devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
507 endp_max_payload = endp_devcap & PCI_EXP_DEVCAP_PAYLOAD;
508
509 /* Get max payload size supported by root port */
510 root_devcap = pci_read_config32(root, root_cap + PCI_EXP_DEVCAP);
511 root_max_payload = root_devcap & PCI_EXP_DEVCAP_PAYLOAD;
512
513 /* Set max payload to smaller of the reported device capability. */
514 max_payload = MIN(endp_max_payload, root_max_payload);
515 if (max_payload > 5) {
516 /* Values 6 and 7 are reserved in PCIe 3.0 specs. */
517 printk(BIOS_ERR, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
518 max_payload);
519 max_payload = 5;
520 }
521
522 endp_devctl = pci_read_config16(endp, endp_cap + PCI_EXP_DEVCTL);
523 endp_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
524 endp_devctl |= max_payload << 5;
525 pci_write_config16(endp, endp_cap + PCI_EXP_DEVCTL, endp_devctl);
526
527 root_devctl = pci_read_config16(root, root_cap + PCI_EXP_DEVCTL);
528 root_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
529 root_devctl |= max_payload << 5;
530 pci_write_config16(root, root_cap + PCI_EXP_DEVCTL, root_devctl);
531
532 printk(BIOS_INFO, "PCIe: Max_Payload_Size adjusted to %d\n", (1 << (max_payload + 7)));
533}
534
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200535static void pciexp_tune_dev(struct device *dev)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000536{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200537 struct device *root = dev->bus->dev;
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700538 unsigned int root_cap, cap;
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000539
540 cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
Uwe Hermannd453dd02010-10-18 00:00:57 +0000541 if (!cap)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000542 return;
Uwe Hermannd453dd02010-10-18 00:00:57 +0000543
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700544 root_cap = pci_find_capability(root, PCI_CAP_ID_PCIE);
545 if (!root_cap)
546 return;
Stefan Reinauerf6eb88a2010-01-17 13:54:08 +0000547
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700548 /* Check for and enable Common Clock */
Julius Wernercd49cce2019-03-05 16:53:33 -0800549 if (CONFIG(PCIEXP_COMMON_CLOCK))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200550 pciexp_enable_common_clock(root, root_cap, dev, cap);
Uwe Hermanne4870472010-11-04 23:23:47 +0000551
Kane Chen18cb1342014-10-01 11:13:54 +0800552 /* Check if per port CLK req is supported by endpoint*/
Julius Wernercd49cce2019-03-05 16:53:33 -0800553 if (CONFIG(PCIEXP_CLK_PM))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200554 pciexp_enable_clock_power_pm(dev, cap);
Kane Chen18cb1342014-10-01 11:13:54 +0800555
Kenji Chen31c6e632014-10-04 01:14:44 +0800556 /* Enable L1 Sub-State when both root port and endpoint support */
Julius Wernercd49cce2019-03-05 16:53:33 -0800557 if (CONFIG(PCIEXP_L1_SUB_STATE))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200558 pciexp_config_L1_sub_state(root, dev);
Kenji Chen31c6e632014-10-04 01:14:44 +0800559
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700560 /* Check for and enable ASPM */
Julius Wernercd49cce2019-03-05 16:53:33 -0800561 if (CONFIG(PCIEXP_ASPM))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200562 pciexp_enable_aspm(root, root_cap, dev, cap);
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200563
564 /* Adjust Max_Payload_Size of link ends. */
565 pciexp_set_max_payload_size(root, root_cap, dev, cap);
Nico Huber968ef752021-03-07 01:39:18 +0100566
567 pciexp_configure_ltr(root, root_cap, dev, cap);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000568}
569
Kyösti Mälkkide271a82015-03-18 13:09:47 +0200570void pciexp_scan_bus(struct bus *bus, unsigned int min_devfn,
571 unsigned int max_devfn)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000572{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200573 struct device *child;
Nico Huber968ef752021-03-07 01:39:18 +0100574
575 pciexp_enable_ltr(bus->dev);
576
Kyösti Mälkkide271a82015-03-18 13:09:47 +0200577 pci_scan_bus(bus, min_devfn, max_devfn);
Uwe Hermannd453dd02010-10-18 00:00:57 +0000578
579 for (child = bus->children; child; child = child->sibling) {
Duncan Lauriebf696222020-10-18 15:10:00 -0700580 if (child->path.type != DEVICE_PATH_PCI)
581 continue;
Uwe Hermannd453dd02010-10-18 00:00:57 +0000582 if ((child->path.pci.devfn < min_devfn) ||
583 (child->path.pci.devfn > max_devfn)) {
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000584 continue;
585 }
586 pciexp_tune_dev(child);
587 }
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000588}
589
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200590void pciexp_scan_bridge(struct device *dev)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000591{
Kyösti Mälkki580e7222015-03-19 21:04:23 +0200592 do_pci_scan_bridge(dev, pciexp_scan_bus);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000593}
594
595/** Default device operations for PCI Express bridges */
596static struct pci_operations pciexp_bus_ops_pci = {
597 .set_subsystem = 0,
598};
599
600struct device_operations default_pciexp_ops_bus = {
601 .read_resources = pci_bus_read_resources,
602 .set_resources = pci_dev_set_resources,
603 .enable_resources = pci_bus_enable_resources,
Uwe Hermannd453dd02010-10-18 00:00:57 +0000604 .scan_bus = pciexp_scan_bridge,
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000605 .reset_bus = pci_bus_reset,
606 .ops_pci = &pciexp_bus_ops_pci,
607};
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600608
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600609static void pciexp_hotplug_dummy_read_resources(struct device *dev)
610{
611 struct resource *resource;
612
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700613 /* Add extra memory space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600614 resource = new_resource(dev, 0x10);
615 resource->size = CONFIG_PCIEXP_HOTPLUG_MEM;
616 resource->align = 12;
617 resource->gran = 12;
618 resource->limit = 0xffffffff;
619 resource->flags |= IORESOURCE_MEM;
620
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700621 /* Add extra prefetchable memory space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600622 resource = new_resource(dev, 0x14);
623 resource->size = CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM;
624 resource->align = 12;
625 resource->gran = 12;
626 resource->limit = 0xffffffffffffffff;
627 resource->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
628
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700629 /* Set resource flag requesting allocation above 4G boundary. */
630 if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G))
631 resource->flags |= IORESOURCE_ABOVE_4G;
632
633 /* Add extra I/O space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600634 resource = new_resource(dev, 0x18);
635 resource->size = CONFIG_PCIEXP_HOTPLUG_IO;
636 resource->align = 12;
637 resource->gran = 12;
638 resource->limit = 0xffff;
639 resource->flags |= IORESOURCE_IO;
640}
641
642static struct device_operations pciexp_hotplug_dummy_ops = {
643 .read_resources = pciexp_hotplug_dummy_read_resources,
John Su3ecc7772022-03-25 10:37:52 +0800644 .set_resources = noop_set_resources,
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600645};
646
647void pciexp_hotplug_scan_bridge(struct device *dev)
648{
649 dev->hotplug_buses = CONFIG_PCIEXP_HOTPLUG_BUSES;
650
651 /* Normal PCIe Scan */
652 pciexp_scan_bridge(dev);
653
654 /* Add dummy slot to preserve resources, must happen after bus scan */
655 struct device *dummy;
656 struct device_path dummy_path = { .type = DEVICE_PATH_NONE };
657 dummy = alloc_dev(dev->link_list, &dummy_path);
658 dummy->ops = &pciexp_hotplug_dummy_ops;
659}
660
661struct device_operations default_pciexp_hotplug_ops_bus = {
662 .read_resources = pci_bus_read_resources,
663 .set_resources = pci_dev_set_resources,
664 .enable_resources = pci_bus_enable_resources,
665 .scan_bus = pciexp_hotplug_scan_bridge,
666 .reset_bus = pci_bus_reset,
667 .ops_pci = &pciexp_bus_ops_pci,
668};