blob: ac4e668f08a3a56278566ae07d7fafac55b4e109 [file] [log] [blame]
Angel Ponsc74dae92020-04-02 23:48:16 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00002
3#include <console/console.h>
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +02004#include <commonlib/helpers.h>
Duncan Laurie90dcdd42011-10-25 14:15:11 -07005#include <delay.h>
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00006#include <device/device.h>
7#include <device/pci.h>
Patrick Rudolphe56189c2018-04-18 10:11:59 +02008#include <device/pci_ops.h>
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00009#include <device/pciexp.h>
10
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060011static unsigned int pciexp_get_ext_cap_offset(const struct device *dev, unsigned int cap,
12 unsigned int offset)
Kenji Chen31c6e632014-10-04 01:14:44 +080013{
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060014 unsigned int this_cap_offset = offset;
15 unsigned int next_cap_offset, this_cap, cafe;
Kenji Chen31c6e632014-10-04 01:14:44 +080016 do {
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +020017 this_cap = pci_read_config32(dev, this_cap_offset);
Bill XIE385e4322022-08-04 21:52:05 +080018 /* Bail out when this request is unsupported */
19 if (this_cap == 0xffffffff)
20 break;
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +020021 cafe = pci_read_config32(dev, this_cap_offset + 4);
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060022 if ((this_cap & 0xffff) == cap) {
Kenji Chen31c6e632014-10-04 01:14:44 +080023 return this_cap_offset;
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060024 } else if ((cafe & 0xffff) == cap) {
Kenji Chen31c6e632014-10-04 01:14:44 +080025 return this_cap_offset + 4;
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060026 } else {
27 next_cap_offset = this_cap >> 20;
Kenji Chen31c6e632014-10-04 01:14:44 +080028 this_cap_offset = next_cap_offset;
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060029 }
Kenji Chen31c6e632014-10-04 01:14:44 +080030 } while (next_cap_offset != 0);
31
32 return 0;
33}
Kenji Chen31c6e632014-10-04 01:14:44 +080034
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060035unsigned int pciexp_find_next_extended_cap(const struct device *dev, unsigned int cap,
36 unsigned int pos)
37{
38 const unsigned int next_cap_offset = pci_read_config32(dev, pos) >> 20;
39 return pciexp_get_ext_cap_offset(dev, cap, next_cap_offset);
40}
41
42unsigned int pciexp_find_extended_cap(const struct device *dev, unsigned int cap)
43{
44 return pciexp_get_ext_cap_offset(dev, cap, PCIE_EXT_CAP_OFFSET);
45}
46
Duncan Laurie90dcdd42011-10-25 14:15:11 -070047/*
48 * Re-train a PCIe link
49 */
50#define PCIE_TRAIN_RETRY 10000
Martin Roth38ddbfb2019-10-23 21:41:00 -060051static int pciexp_retrain_link(struct device *dev, unsigned int cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -070052{
Youness Alaouibb5fb642017-05-03 17:57:13 -040053 unsigned int try;
Duncan Laurie90dcdd42011-10-25 14:15:11 -070054 u16 lnk;
55
Youness Alaouibb5fb642017-05-03 17:57:13 -040056 /*
57 * Implementation note (page 633) in PCIe Specification 3.0 suggests
58 * polling the Link Training bit in the Link Status register until the
59 * value returned is 0 before setting the Retrain Link bit to 1.
60 * This is meant to avoid a race condition when using the
61 * Retrain Link mechanism.
62 */
63 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
64 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
65 if (!(lnk & PCI_EXP_LNKSTA_LT))
66 break;
67 udelay(100);
68 }
69 if (try == 0) {
70 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
71 return -1;
72 }
73
Duncan Laurie90dcdd42011-10-25 14:15:11 -070074 /* Start link retraining */
75 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKCTL);
76 lnk |= PCI_EXP_LNKCTL_RL;
77 pci_write_config16(dev, cap + PCI_EXP_LNKCTL, lnk);
78
79 /* Wait for training to complete */
Youness Alaouibb5fb642017-05-03 17:57:13 -040080 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
Duncan Laurie90dcdd42011-10-25 14:15:11 -070081 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
82 if (!(lnk & PCI_EXP_LNKSTA_LT))
83 return 0;
84 udelay(100);
85 }
86
87 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
88 return -1;
89}
90
91/*
92 * Check the Slot Clock Configuration for root port and endpoint
93 * and enable Common Clock Configuration if possible. If CCC is
94 * enabled the link must be retrained.
95 */
Martin Roth38ddbfb2019-10-23 21:41:00 -060096static void pciexp_enable_common_clock(struct device *root, unsigned int root_cap,
97 struct device *endp, unsigned int endp_cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -070098{
99 u16 root_scc, endp_scc, lnkctl;
100
101 /* Get Slot Clock Configuration for root port */
102 root_scc = pci_read_config16(root, root_cap + PCI_EXP_LNKSTA);
103 root_scc &= PCI_EXP_LNKSTA_SLC;
104
105 /* Get Slot Clock Configuration for endpoint */
106 endp_scc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKSTA);
107 endp_scc &= PCI_EXP_LNKSTA_SLC;
108
109 /* Enable Common Clock Configuration and retrain */
110 if (root_scc && endp_scc) {
111 printk(BIOS_INFO, "Enabling Common Clock Configuration\n");
112
113 /* Set in endpoint */
114 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
115 lnkctl |= PCI_EXP_LNKCTL_CCC;
116 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
117
118 /* Set in root port */
119 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
120 lnkctl |= PCI_EXP_LNKCTL_CCC;
121 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
122
123 /* Retrain link if CCC was enabled */
124 pciexp_retrain_link(root, root_cap);
125 }
126}
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700127
Martin Roth38ddbfb2019-10-23 21:41:00 -0600128static void pciexp_enable_clock_power_pm(struct device *endp, unsigned int endp_cap)
Kane Chen18cb1342014-10-01 11:13:54 +0800129{
130 /* check if per port clk req is supported in device */
131 u32 endp_ca;
132 u16 lnkctl;
133 endp_ca = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
134 if ((endp_ca & PCI_EXP_CLK_PM) == 0) {
Arthur Heymans330c46b2017-07-12 19:17:56 +0200135 printk(BIOS_INFO, "PCIE CLK PM is not supported by endpoint\n");
Kane Chen18cb1342014-10-01 11:13:54 +0800136 return;
137 }
138 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
139 lnkctl = lnkctl | PCI_EXP_EN_CLK_PM;
140 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
141}
Kane Chen18cb1342014-10-01 11:13:54 +0800142
Nico Huber968ef752021-03-07 01:39:18 +0100143static bool _pciexp_ltr_supported(struct device *dev, unsigned int cap)
Kenji Chen31c6e632014-10-04 01:14:44 +0800144{
Nico Huber968ef752021-03-07 01:39:18 +0100145 return pci_read_config16(dev, cap + PCI_EXP_DEVCAP2) & PCI_EXP_DEVCAP2_LTR;
Kenji Chen31c6e632014-10-04 01:14:44 +0800146}
147
Nico Huber968ef752021-03-07 01:39:18 +0100148static bool _pciexp_ltr_enabled(struct device *dev, unsigned int cap)
Aamir Bohra2188f572017-09-22 19:07:21 +0530149{
Nico Huber968ef752021-03-07 01:39:18 +0100150 return pci_read_config16(dev, cap + PCI_EXP_DEVCTL2) & PCI_EXP_DEV2_LTR;
Aamir Bohra2188f572017-09-22 19:07:21 +0530151}
152
Nico Huber968ef752021-03-07 01:39:18 +0100153static bool _pciexp_enable_ltr(struct device *parent, unsigned int parent_cap,
154 struct device *dev, unsigned int cap)
Kenji Chen31c6e632014-10-04 01:14:44 +0800155{
Nico Huber968ef752021-03-07 01:39:18 +0100156 if (!_pciexp_ltr_supported(dev, cap)) {
157 printk(BIOS_DEBUG, "%s: No LTR support\n", dev_path(dev));
158 return false;
Pratik Prajapati0cd0d282015-06-09 12:06:20 -0700159 }
Aamir Bohra2188f572017-09-22 19:07:21 +0530160
Nico Huber968ef752021-03-07 01:39:18 +0100161 if (_pciexp_ltr_enabled(dev, cap))
162 return true;
Aamir Bohra2188f572017-09-22 19:07:21 +0530163
Nico Huber968ef752021-03-07 01:39:18 +0100164 if (parent &&
165 (parent->path.type != DEVICE_PATH_PCI ||
166 !_pciexp_ltr_supported(parent, parent_cap) ||
167 !_pciexp_ltr_enabled(parent, parent_cap)))
168 return false;
Aamir Bohra2188f572017-09-22 19:07:21 +0530169
Nico Huber968ef752021-03-07 01:39:18 +0100170 pci_or_config16(dev, cap + PCI_EXP_DEVCTL2, PCI_EXP_DEV2_LTR);
171 printk(BIOS_INFO, "%s: Enabled LTR\n", dev_path(dev));
172 return true;
Aamir Bohra2188f572017-09-22 19:07:21 +0530173}
174
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200175static void pciexp_enable_ltr(struct device *dev)
Aamir Bohra2188f572017-09-22 19:07:21 +0530176{
Nico Huber968ef752021-03-07 01:39:18 +0100177 const unsigned int cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
178 if (!cap)
179 return;
Aamir Bohra2188f572017-09-22 19:07:21 +0530180
Nico Huber968ef752021-03-07 01:39:18 +0100181 /*
182 * If we have get_ltr_max_latencies(), treat `dev` as the root.
183 * If not, let _pciexp_enable_ltr() query the parent's state.
184 */
185 struct device *parent = NULL;
186 unsigned int parent_cap = 0;
187 if (!dev->ops->ops_pci || !dev->ops->ops_pci->get_ltr_max_latencies) {
188 parent = dev->bus->dev;
Bill XIEa43380e2022-08-03 00:18:14 +0800189 parent_cap = pci_find_capability(parent, PCI_CAP_ID_PCIE);
Nico Huber968ef752021-03-07 01:39:18 +0100190 if (!parent_cap)
191 return;
Aamir Bohra2188f572017-09-22 19:07:21 +0530192 }
Nico Huber968ef752021-03-07 01:39:18 +0100193
194 (void)_pciexp_enable_ltr(parent, parent_cap, dev, cap);
195}
196
Tim Wawrzynczaka62cb562021-12-08 21:16:43 -0700197bool pciexp_get_ltr_max_latencies(struct device *dev, u16 *max_snoop, u16 *max_nosnoop)
Nico Huber968ef752021-03-07 01:39:18 +0100198{
199 /* Walk the hierarchy up to find get_ltr_max_latencies(). */
200 do {
201 if (dev->ops->ops_pci && dev->ops->ops_pci->get_ltr_max_latencies)
202 break;
203 if (dev->bus->dev == dev || dev->bus->dev->path.type != DEVICE_PATH_PCI)
204 return false;
205 dev = dev->bus->dev;
206 } while (true);
207
208 dev->ops->ops_pci->get_ltr_max_latencies(max_snoop, max_nosnoop);
209 return true;
210}
211
212static void pciexp_configure_ltr(struct device *parent, unsigned int parent_cap,
213 struct device *dev, unsigned int cap)
214{
215 if (!_pciexp_enable_ltr(parent, parent_cap, dev, cap))
216 return;
217
218 const unsigned int ltr_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_LTR_ID);
219 if (!ltr_cap)
220 return;
221
222 u16 max_snoop, max_nosnoop;
223 if (!pciexp_get_ltr_max_latencies(dev, &max_snoop, &max_nosnoop))
224 return;
225
226 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_SNOOP, max_snoop);
227 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_NOSNOOP, max_nosnoop);
228 printk(BIOS_INFO, "%s: Programmed LTR max latencies\n", dev_path(dev));
Kenji Chen31c6e632014-10-04 01:14:44 +0800229}
230
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200231static unsigned char pciexp_L1_substate_cal(struct device *dev, unsigned int endp_cap,
Kenji Chen31c6e632014-10-04 01:14:44 +0800232 unsigned int *data)
233{
234 unsigned char mult[4] = {2, 10, 100, 0};
235
236 unsigned int L1SubStateSupport = *data & 0xf;
237 unsigned int comm_mode_rst_time = (*data >> 8) & 0xff;
238 unsigned int power_on_scale = (*data >> 16) & 0x3;
239 unsigned int power_on_value = (*data >> 19) & 0x1f;
240
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200241 unsigned int endp_data = pci_read_config32(dev, endp_cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800242 unsigned int endp_L1SubStateSupport = endp_data & 0xf;
243 unsigned int endp_comm_mode_restore_time = (endp_data >> 8) & 0xff;
244 unsigned int endp_power_on_scale = (endp_data >> 16) & 0x3;
245 unsigned int endp_power_on_value = (endp_data >> 19) & 0x1f;
246
247 L1SubStateSupport &= endp_L1SubStateSupport;
248
249 if (L1SubStateSupport == 0)
250 return 0;
251
252 if (power_on_value * mult[power_on_scale] <
253 endp_power_on_value * mult[endp_power_on_scale]) {
254 power_on_value = endp_power_on_value;
255 power_on_scale = endp_power_on_scale;
256 }
257 if (comm_mode_rst_time < endp_comm_mode_restore_time)
258 comm_mode_rst_time = endp_comm_mode_restore_time;
259
260 *data = (comm_mode_rst_time << 8) | (power_on_scale << 16)
261 | (power_on_value << 19) | L1SubStateSupport;
262
263 return 1;
264}
265
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200266static void pciexp_L1_substate_commit(struct device *root, struct device *dev,
Kenji Chen31c6e632014-10-04 01:14:44 +0800267 unsigned int root_cap, unsigned int end_cap)
268{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200269 struct device *dev_t;
Kenji Chen31c6e632014-10-04 01:14:44 +0800270 unsigned char L1_ss_ok;
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200271 unsigned int rp_L1_support = pci_read_config32(root, root_cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800272 unsigned int L1SubStateSupport;
273 unsigned int comm_mode_rst_time;
274 unsigned int power_on_scale;
275 unsigned int endp_power_on_value;
276
277 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
278 /*
279 * rp_L1_support is init'd above from root port.
280 * it needs coordination with endpoints to reach in common.
281 * if certain endpoint doesn't support L1 Sub-State, abort
282 * this feature enabling.
283 */
284 L1_ss_ok = pciexp_L1_substate_cal(dev_t, end_cap,
285 &rp_L1_support);
286 if (!L1_ss_ok)
287 return;
288 }
289
290 L1SubStateSupport = rp_L1_support & 0xf;
291 comm_mode_rst_time = (rp_L1_support >> 8) & 0xff;
292 power_on_scale = (rp_L1_support >> 16) & 0x3;
293 endp_power_on_value = (rp_L1_support >> 19) & 0x1f;
294
295 printk(BIOS_INFO, "L1 Sub-State supported from root port %d\n",
296 root->path.pci.devfn >> 3);
297 printk(BIOS_INFO, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport);
298 printk(BIOS_INFO, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time);
299 printk(BIOS_INFO, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
300 endp_power_on_value, power_on_scale);
301
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300302 pci_update_config32(root, root_cap + 0x08, ~0xff00,
Kenji Chen31c6e632014-10-04 01:14:44 +0800303 (comm_mode_rst_time << 8));
304
Elyes HAOUASa342f392018-10-17 10:56:26 +0200305 pci_update_config32(root, root_cap + 0x0c, 0xffffff04,
Kenji Chen31c6e632014-10-04 01:14:44 +0800306 (endp_power_on_value << 3) | (power_on_scale));
307
Patrick Georgi9adcbfe2017-12-05 16:36:30 -0500308 /* TODO: 0xa0, 2 are values that work on some chipsets but really
309 * should be determined dynamically by looking at downstream devices.
310 */
311 pci_update_config32(root, root_cap + 0x08,
312 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
313 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
314 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
315 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
Kenji Chen31c6e632014-10-04 01:14:44 +0800316
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300317 pci_update_config32(root, root_cap + 0x08, ~0x1f,
Kenji Chen31c6e632014-10-04 01:14:44 +0800318 L1SubStateSupport);
319
320 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
Elyes HAOUASa342f392018-10-17 10:56:26 +0200321 pci_update_config32(dev_t, end_cap + 0x0c, 0xffffff04,
Kenji Chen31c6e632014-10-04 01:14:44 +0800322 (endp_power_on_value << 3) | (power_on_scale));
323
Patrick Georgi9adcbfe2017-12-05 16:36:30 -0500324 pci_update_config32(dev_t, end_cap + 0x08,
325 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
326 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
327 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
328 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
Kenji Chen31c6e632014-10-04 01:14:44 +0800329
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300330 pci_update_config32(dev_t, end_cap + 0x08, ~0x1f,
Kenji Chen31c6e632014-10-04 01:14:44 +0800331 L1SubStateSupport);
Kenji Chen31c6e632014-10-04 01:14:44 +0800332 }
333}
334
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200335static void pciexp_config_L1_sub_state(struct device *root, struct device *dev)
Kenji Chen31c6e632014-10-04 01:14:44 +0800336{
337 unsigned int root_cap, end_cap;
338
339 /* Do it for function 0 only */
340 if (dev->path.pci.devfn & 0x7)
341 return;
342
343 root_cap = pciexp_find_extended_cap(root, PCIE_EXT_CAP_L1SS_ID);
344 if (!root_cap)
345 return;
346
347 end_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_L1SS_ID);
348 if (!end_cap) {
349 end_cap = pciexp_find_extended_cap(dev, 0xcafe);
350 if (!end_cap)
351 return;
352 }
353
354 pciexp_L1_substate_commit(root, dev, root_cap, end_cap);
355}
Kenji Chen31c6e632014-10-04 01:14:44 +0800356
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700357/*
358 * Determine the ASPM L0s or L1 exit latency for a link
359 * by checking both root port and endpoint and returning
360 * the highest latency value.
361 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600362static int pciexp_aspm_latency(struct device *root, unsigned int root_cap,
363 struct device *endp, unsigned int endp_cap,
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700364 enum aspm_type type)
365{
366 int root_lat = 0, endp_lat = 0;
367 u32 root_lnkcap, endp_lnkcap;
368
369 root_lnkcap = pci_read_config32(root, root_cap + PCI_EXP_LNKCAP);
370 endp_lnkcap = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
371
372 /* Make sure the link supports this ASPM type by checking
373 * capability bits 11:10 with aspm_type offset by 1 */
374 if (!(root_lnkcap & (1 << (type + 9))) ||
375 !(endp_lnkcap & (1 << (type + 9))))
376 return -1;
377
378 /* Find the one with higher latency */
379 switch (type) {
380 case PCIE_ASPM_L0S:
381 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
382 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
383 break;
384 case PCIE_ASPM_L1:
385 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
386 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
387 break;
388 default:
389 return -1;
390 }
391
392 return (endp_lat > root_lat) ? endp_lat : root_lat;
393}
394
395/*
396 * Enable ASPM on PCIe root port and endpoint.
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700397 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600398static void pciexp_enable_aspm(struct device *root, unsigned int root_cap,
399 struct device *endp, unsigned int endp_cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700400{
401 const char *aspm_type_str[] = { "None", "L0s", "L1", "L0s and L1" };
402 enum aspm_type apmc = PCIE_ASPM_NONE;
403 int exit_latency, ok_latency;
404 u16 lnkctl;
405 u32 devcap;
406
Nico Huber570b1832017-08-30 13:38:50 +0200407 if (endp->disable_pcie_aspm)
408 return;
409
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700410 /* Get endpoint device capabilities for acceptable limits */
411 devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
412
413 /* Enable L0s if it is within endpoint acceptable limit */
414 ok_latency = (devcap & PCI_EXP_DEVCAP_L0S) >> 6;
415 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
416 PCIE_ASPM_L0S);
417 if (exit_latency >= 0 && exit_latency <= ok_latency)
418 apmc |= PCIE_ASPM_L0S;
419
420 /* Enable L1 if it is within endpoint acceptable limit */
421 ok_latency = (devcap & PCI_EXP_DEVCAP_L1) >> 9;
422 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
423 PCIE_ASPM_L1);
424 if (exit_latency >= 0 && exit_latency <= ok_latency)
425 apmc |= PCIE_ASPM_L1;
426
427 if (apmc != PCIE_ASPM_NONE) {
428 /* Set APMC in root port first */
429 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
430 lnkctl |= apmc;
431 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
432
433 /* Set APMC in endpoint device next */
434 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
435 lnkctl |= apmc;
436 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
437 }
438
439 printk(BIOS_INFO, "ASPM: Enabled %s\n", aspm_type_str[apmc]);
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700440}
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700441
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200442/*
443 * Set max payload size of endpoint in accordance with max payload size of root port.
444 */
445static void pciexp_set_max_payload_size(struct device *root, unsigned int root_cap,
446 struct device *endp, unsigned int endp_cap)
447{
448 unsigned int endp_max_payload, root_max_payload, max_payload;
449 u16 endp_devctl, root_devctl;
450 u32 endp_devcap, root_devcap;
451
452 /* Get max payload size supported by endpoint */
453 endp_devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
454 endp_max_payload = endp_devcap & PCI_EXP_DEVCAP_PAYLOAD;
455
456 /* Get max payload size supported by root port */
457 root_devcap = pci_read_config32(root, root_cap + PCI_EXP_DEVCAP);
458 root_max_payload = root_devcap & PCI_EXP_DEVCAP_PAYLOAD;
459
460 /* Set max payload to smaller of the reported device capability. */
461 max_payload = MIN(endp_max_payload, root_max_payload);
462 if (max_payload > 5) {
463 /* Values 6 and 7 are reserved in PCIe 3.0 specs. */
464 printk(BIOS_ERR, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
465 max_payload);
466 max_payload = 5;
467 }
468
469 endp_devctl = pci_read_config16(endp, endp_cap + PCI_EXP_DEVCTL);
470 endp_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
471 endp_devctl |= max_payload << 5;
472 pci_write_config16(endp, endp_cap + PCI_EXP_DEVCTL, endp_devctl);
473
474 root_devctl = pci_read_config16(root, root_cap + PCI_EXP_DEVCTL);
475 root_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
476 root_devctl |= max_payload << 5;
477 pci_write_config16(root, root_cap + PCI_EXP_DEVCTL, root_devctl);
478
479 printk(BIOS_INFO, "PCIe: Max_Payload_Size adjusted to %d\n", (1 << (max_payload + 7)));
480}
481
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200482static void pciexp_tune_dev(struct device *dev)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000483{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200484 struct device *root = dev->bus->dev;
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700485 unsigned int root_cap, cap;
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000486
487 cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
Uwe Hermannd453dd02010-10-18 00:00:57 +0000488 if (!cap)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000489 return;
Uwe Hermannd453dd02010-10-18 00:00:57 +0000490
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700491 root_cap = pci_find_capability(root, PCI_CAP_ID_PCIE);
492 if (!root_cap)
493 return;
Stefan Reinauerf6eb88a2010-01-17 13:54:08 +0000494
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700495 /* Check for and enable Common Clock */
Julius Wernercd49cce2019-03-05 16:53:33 -0800496 if (CONFIG(PCIEXP_COMMON_CLOCK))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200497 pciexp_enable_common_clock(root, root_cap, dev, cap);
Uwe Hermanne4870472010-11-04 23:23:47 +0000498
Kane Chen18cb1342014-10-01 11:13:54 +0800499 /* Check if per port CLK req is supported by endpoint*/
Julius Wernercd49cce2019-03-05 16:53:33 -0800500 if (CONFIG(PCIEXP_CLK_PM))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200501 pciexp_enable_clock_power_pm(dev, cap);
Kane Chen18cb1342014-10-01 11:13:54 +0800502
Kenji Chen31c6e632014-10-04 01:14:44 +0800503 /* Enable L1 Sub-State when both root port and endpoint support */
Julius Wernercd49cce2019-03-05 16:53:33 -0800504 if (CONFIG(PCIEXP_L1_SUB_STATE))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200505 pciexp_config_L1_sub_state(root, dev);
Kenji Chen31c6e632014-10-04 01:14:44 +0800506
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700507 /* Check for and enable ASPM */
Julius Wernercd49cce2019-03-05 16:53:33 -0800508 if (CONFIG(PCIEXP_ASPM))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200509 pciexp_enable_aspm(root, root_cap, dev, cap);
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200510
511 /* Adjust Max_Payload_Size of link ends. */
512 pciexp_set_max_payload_size(root, root_cap, dev, cap);
Nico Huber968ef752021-03-07 01:39:18 +0100513
514 pciexp_configure_ltr(root, root_cap, dev, cap);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000515}
516
Kyösti Mälkkide271a82015-03-18 13:09:47 +0200517void pciexp_scan_bus(struct bus *bus, unsigned int min_devfn,
518 unsigned int max_devfn)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000519{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200520 struct device *child;
Nico Huber968ef752021-03-07 01:39:18 +0100521
522 pciexp_enable_ltr(bus->dev);
523
Kyösti Mälkkide271a82015-03-18 13:09:47 +0200524 pci_scan_bus(bus, min_devfn, max_devfn);
Uwe Hermannd453dd02010-10-18 00:00:57 +0000525
526 for (child = bus->children; child; child = child->sibling) {
Duncan Lauriebf696222020-10-18 15:10:00 -0700527 if (child->path.type != DEVICE_PATH_PCI)
528 continue;
Uwe Hermannd453dd02010-10-18 00:00:57 +0000529 if ((child->path.pci.devfn < min_devfn) ||
530 (child->path.pci.devfn > max_devfn)) {
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000531 continue;
532 }
533 pciexp_tune_dev(child);
534 }
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000535}
536
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200537void pciexp_scan_bridge(struct device *dev)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000538{
Kyösti Mälkki580e7222015-03-19 21:04:23 +0200539 do_pci_scan_bridge(dev, pciexp_scan_bus);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000540}
541
542/** Default device operations for PCI Express bridges */
543static struct pci_operations pciexp_bus_ops_pci = {
544 .set_subsystem = 0,
545};
546
547struct device_operations default_pciexp_ops_bus = {
548 .read_resources = pci_bus_read_resources,
549 .set_resources = pci_dev_set_resources,
550 .enable_resources = pci_bus_enable_resources,
Uwe Hermannd453dd02010-10-18 00:00:57 +0000551 .scan_bus = pciexp_scan_bridge,
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000552 .reset_bus = pci_bus_reset,
553 .ops_pci = &pciexp_bus_ops_pci,
554};
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600555
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600556static void pciexp_hotplug_dummy_read_resources(struct device *dev)
557{
558 struct resource *resource;
559
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700560 /* Add extra memory space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600561 resource = new_resource(dev, 0x10);
562 resource->size = CONFIG_PCIEXP_HOTPLUG_MEM;
563 resource->align = 12;
564 resource->gran = 12;
565 resource->limit = 0xffffffff;
566 resource->flags |= IORESOURCE_MEM;
567
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700568 /* Add extra prefetchable memory space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600569 resource = new_resource(dev, 0x14);
570 resource->size = CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM;
571 resource->align = 12;
572 resource->gran = 12;
573 resource->limit = 0xffffffffffffffff;
574 resource->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
575
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700576 /* Set resource flag requesting allocation above 4G boundary. */
577 if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G))
578 resource->flags |= IORESOURCE_ABOVE_4G;
579
580 /* Add extra I/O space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600581 resource = new_resource(dev, 0x18);
582 resource->size = CONFIG_PCIEXP_HOTPLUG_IO;
583 resource->align = 12;
584 resource->gran = 12;
585 resource->limit = 0xffff;
586 resource->flags |= IORESOURCE_IO;
587}
588
589static struct device_operations pciexp_hotplug_dummy_ops = {
590 .read_resources = pciexp_hotplug_dummy_read_resources,
John Su3ecc7772022-03-25 10:37:52 +0800591 .set_resources = noop_set_resources,
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600592};
593
594void pciexp_hotplug_scan_bridge(struct device *dev)
595{
596 dev->hotplug_buses = CONFIG_PCIEXP_HOTPLUG_BUSES;
597
598 /* Normal PCIe Scan */
599 pciexp_scan_bridge(dev);
600
601 /* Add dummy slot to preserve resources, must happen after bus scan */
602 struct device *dummy;
603 struct device_path dummy_path = { .type = DEVICE_PATH_NONE };
604 dummy = alloc_dev(dev->link_list, &dummy_path);
605 dummy->ops = &pciexp_hotplug_dummy_ops;
606}
607
608struct device_operations default_pciexp_hotplug_ops_bus = {
609 .read_resources = pci_bus_read_resources,
610 .set_resources = pci_dev_set_resources,
611 .enable_resources = pci_bus_enable_resources,
612 .scan_bus = pciexp_hotplug_scan_bridge,
613 .reset_bus = pci_bus_reset,
614 .ops_pci = &pciexp_bus_ops_pci,
615};