blob: 8d4bb9849d8e3e44f3111d33e3a1e92eb53aee0b [file] [log] [blame]
Angel Ponsc74dae92020-04-02 23:48:16 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00002
3#include <console/console.h>
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +02004#include <commonlib/helpers.h>
Duncan Laurie90dcdd42011-10-25 14:15:11 -07005#include <delay.h>
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00006#include <device/device.h>
7#include <device/pci.h>
Patrick Rudolphe56189c2018-04-18 10:11:59 +02008#include <device/pci_ops.h>
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00009#include <device/pciexp.h>
10
Elyes HAOUASb1fa2872018-05-02 21:11:38 +020011unsigned int pciexp_find_extended_cap(struct device *dev, unsigned int cap)
Kenji Chen31c6e632014-10-04 01:14:44 +080012{
13 unsigned int this_cap_offset, next_cap_offset;
14 unsigned int this_cap, cafe;
15
16 this_cap_offset = PCIE_EXT_CAP_OFFSET;
17 do {
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +020018 this_cap = pci_read_config32(dev, this_cap_offset);
Kenji Chen31c6e632014-10-04 01:14:44 +080019 next_cap_offset = this_cap >> 20;
20 this_cap &= 0xffff;
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +020021 cafe = pci_read_config32(dev, this_cap_offset + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +080022 cafe &= 0xffff;
23 if (this_cap == cap)
24 return this_cap_offset;
25 else if (cafe == cap)
26 return this_cap_offset + 4;
27 else
28 this_cap_offset = next_cap_offset;
29 } while (next_cap_offset != 0);
30
31 return 0;
32}
Kenji Chen31c6e632014-10-04 01:14:44 +080033
Duncan Laurie90dcdd42011-10-25 14:15:11 -070034/*
35 * Re-train a PCIe link
36 */
37#define PCIE_TRAIN_RETRY 10000
Martin Roth38ddbfb2019-10-23 21:41:00 -060038static int pciexp_retrain_link(struct device *dev, unsigned int cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -070039{
Youness Alaouibb5fb642017-05-03 17:57:13 -040040 unsigned int try;
Duncan Laurie90dcdd42011-10-25 14:15:11 -070041 u16 lnk;
42
Youness Alaouibb5fb642017-05-03 17:57:13 -040043 /*
44 * Implementation note (page 633) in PCIe Specification 3.0 suggests
45 * polling the Link Training bit in the Link Status register until the
46 * value returned is 0 before setting the Retrain Link bit to 1.
47 * This is meant to avoid a race condition when using the
48 * Retrain Link mechanism.
49 */
50 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
51 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
52 if (!(lnk & PCI_EXP_LNKSTA_LT))
53 break;
54 udelay(100);
55 }
56 if (try == 0) {
57 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
58 return -1;
59 }
60
Duncan Laurie90dcdd42011-10-25 14:15:11 -070061 /* Start link retraining */
62 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKCTL);
63 lnk |= PCI_EXP_LNKCTL_RL;
64 pci_write_config16(dev, cap + PCI_EXP_LNKCTL, lnk);
65
66 /* Wait for training to complete */
Youness Alaouibb5fb642017-05-03 17:57:13 -040067 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
Duncan Laurie90dcdd42011-10-25 14:15:11 -070068 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
69 if (!(lnk & PCI_EXP_LNKSTA_LT))
70 return 0;
71 udelay(100);
72 }
73
74 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
75 return -1;
76}
77
78/*
79 * Check the Slot Clock Configuration for root port and endpoint
80 * and enable Common Clock Configuration if possible. If CCC is
81 * enabled the link must be retrained.
82 */
Martin Roth38ddbfb2019-10-23 21:41:00 -060083static void pciexp_enable_common_clock(struct device *root, unsigned int root_cap,
84 struct device *endp, unsigned int endp_cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -070085{
86 u16 root_scc, endp_scc, lnkctl;
87
88 /* Get Slot Clock Configuration for root port */
89 root_scc = pci_read_config16(root, root_cap + PCI_EXP_LNKSTA);
90 root_scc &= PCI_EXP_LNKSTA_SLC;
91
92 /* Get Slot Clock Configuration for endpoint */
93 endp_scc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKSTA);
94 endp_scc &= PCI_EXP_LNKSTA_SLC;
95
96 /* Enable Common Clock Configuration and retrain */
97 if (root_scc && endp_scc) {
98 printk(BIOS_INFO, "Enabling Common Clock Configuration\n");
99
100 /* Set in endpoint */
101 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
102 lnkctl |= PCI_EXP_LNKCTL_CCC;
103 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
104
105 /* Set in root port */
106 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
107 lnkctl |= PCI_EXP_LNKCTL_CCC;
108 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
109
110 /* Retrain link if CCC was enabled */
111 pciexp_retrain_link(root, root_cap);
112 }
113}
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700114
Martin Roth38ddbfb2019-10-23 21:41:00 -0600115static void pciexp_enable_clock_power_pm(struct device *endp, unsigned int endp_cap)
Kane Chen18cb1342014-10-01 11:13:54 +0800116{
117 /* check if per port clk req is supported in device */
118 u32 endp_ca;
119 u16 lnkctl;
120 endp_ca = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
121 if ((endp_ca & PCI_EXP_CLK_PM) == 0) {
Arthur Heymans330c46b2017-07-12 19:17:56 +0200122 printk(BIOS_INFO, "PCIE CLK PM is not supported by endpoint\n");
Kane Chen18cb1342014-10-01 11:13:54 +0800123 return;
124 }
125 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
126 lnkctl = lnkctl | PCI_EXP_EN_CLK_PM;
127 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
128}
Kane Chen18cb1342014-10-01 11:13:54 +0800129
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200130static void pciexp_config_max_latency(struct device *root, struct device *dev)
Kenji Chen31c6e632014-10-04 01:14:44 +0800131{
132 unsigned int cap;
133 cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_LTR_ID);
Pratik Prajapati0cd0d282015-06-09 12:06:20 -0700134 if ((cap) && (root->ops->ops_pci != NULL) &&
135 (root->ops->ops_pci->set_L1_ss_latency != NULL))
136 root->ops->ops_pci->set_L1_ss_latency(dev, cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800137}
138
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200139static bool pciexp_is_ltr_supported(struct device *dev, unsigned int cap)
Aamir Bohra2188f572017-09-22 19:07:21 +0530140{
141 unsigned int val;
142
143 val = pci_read_config16(dev, cap + PCI_EXP_DEV_CAP2_OFFSET);
144
145 if (val & LTR_MECHANISM_SUPPORT)
146 return true;
147
148 return false;
149}
150
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200151static void pciexp_configure_ltr(struct device *dev)
Kenji Chen31c6e632014-10-04 01:14:44 +0800152{
153 unsigned int cap;
Aamir Bohra2188f572017-09-22 19:07:21 +0530154
Kenji Chen31c6e632014-10-04 01:14:44 +0800155 cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
Aamir Bohra2188f572017-09-22 19:07:21 +0530156
157 /*
Elyes HAOUASaeff5122019-12-07 11:57:35 +0100158 * Check if capability pointer is valid and
Aamir Bohra2188f572017-09-22 19:07:21 +0530159 * device supports LTR mechanism.
160 */
161 if (!cap || !pciexp_is_ltr_supported(dev, cap)) {
Pratik Prajapati0cd0d282015-06-09 12:06:20 -0700162 printk(BIOS_INFO, "Failed to enable LTR for dev = %s\n",
Aamir Bohra2188f572017-09-22 19:07:21 +0530163 dev_path(dev));
Pratik Prajapati0cd0d282015-06-09 12:06:20 -0700164 return;
165 }
Aamir Bohra2188f572017-09-22 19:07:21 +0530166
167 cap += PCI_EXP_DEV_CTL_STS2_CAP_OFFSET;
168
169 /* Enable LTR for device */
170 pci_update_config32(dev, cap, ~LTR_MECHANISM_EN, LTR_MECHANISM_EN);
171
172 /* Configure Max Snoop Latency */
173 pciexp_config_max_latency(dev->bus->dev, dev);
174}
175
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200176static void pciexp_enable_ltr(struct device *dev)
Aamir Bohra2188f572017-09-22 19:07:21 +0530177{
178 struct bus *bus;
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200179 struct device *child;
Aamir Bohra2188f572017-09-22 19:07:21 +0530180
181 for (bus = dev->link_list ; bus ; bus = bus->next) {
182 for (child = bus->children; child; child = child->sibling) {
Duncan Lauriebf696222020-10-18 15:10:00 -0700183 if (child->path.type != DEVICE_PATH_PCI)
184 continue;
Aamir Bohra2188f572017-09-22 19:07:21 +0530185 pciexp_configure_ltr(child);
186 if (child->ops && child->ops->scan_bus)
187 pciexp_enable_ltr(child);
188 }
189 }
Kenji Chen31c6e632014-10-04 01:14:44 +0800190}
191
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200192static unsigned char pciexp_L1_substate_cal(struct device *dev, unsigned int endp_cap,
Kenji Chen31c6e632014-10-04 01:14:44 +0800193 unsigned int *data)
194{
195 unsigned char mult[4] = {2, 10, 100, 0};
196
197 unsigned int L1SubStateSupport = *data & 0xf;
198 unsigned int comm_mode_rst_time = (*data >> 8) & 0xff;
199 unsigned int power_on_scale = (*data >> 16) & 0x3;
200 unsigned int power_on_value = (*data >> 19) & 0x1f;
201
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200202 unsigned int endp_data = pci_read_config32(dev, endp_cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800203 unsigned int endp_L1SubStateSupport = endp_data & 0xf;
204 unsigned int endp_comm_mode_restore_time = (endp_data >> 8) & 0xff;
205 unsigned int endp_power_on_scale = (endp_data >> 16) & 0x3;
206 unsigned int endp_power_on_value = (endp_data >> 19) & 0x1f;
207
208 L1SubStateSupport &= endp_L1SubStateSupport;
209
210 if (L1SubStateSupport == 0)
211 return 0;
212
213 if (power_on_value * mult[power_on_scale] <
214 endp_power_on_value * mult[endp_power_on_scale]) {
215 power_on_value = endp_power_on_value;
216 power_on_scale = endp_power_on_scale;
217 }
218 if (comm_mode_rst_time < endp_comm_mode_restore_time)
219 comm_mode_rst_time = endp_comm_mode_restore_time;
220
221 *data = (comm_mode_rst_time << 8) | (power_on_scale << 16)
222 | (power_on_value << 19) | L1SubStateSupport;
223
224 return 1;
225}
226
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200227static void pciexp_L1_substate_commit(struct device *root, struct device *dev,
Kenji Chen31c6e632014-10-04 01:14:44 +0800228 unsigned int root_cap, unsigned int end_cap)
229{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200230 struct device *dev_t;
Kenji Chen31c6e632014-10-04 01:14:44 +0800231 unsigned char L1_ss_ok;
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200232 unsigned int rp_L1_support = pci_read_config32(root, root_cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800233 unsigned int L1SubStateSupport;
234 unsigned int comm_mode_rst_time;
235 unsigned int power_on_scale;
236 unsigned int endp_power_on_value;
237
238 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
239 /*
240 * rp_L1_support is init'd above from root port.
241 * it needs coordination with endpoints to reach in common.
242 * if certain endpoint doesn't support L1 Sub-State, abort
243 * this feature enabling.
244 */
245 L1_ss_ok = pciexp_L1_substate_cal(dev_t, end_cap,
246 &rp_L1_support);
247 if (!L1_ss_ok)
248 return;
249 }
250
251 L1SubStateSupport = rp_L1_support & 0xf;
252 comm_mode_rst_time = (rp_L1_support >> 8) & 0xff;
253 power_on_scale = (rp_L1_support >> 16) & 0x3;
254 endp_power_on_value = (rp_L1_support >> 19) & 0x1f;
255
256 printk(BIOS_INFO, "L1 Sub-State supported from root port %d\n",
257 root->path.pci.devfn >> 3);
258 printk(BIOS_INFO, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport);
259 printk(BIOS_INFO, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time);
260 printk(BIOS_INFO, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
261 endp_power_on_value, power_on_scale);
262
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300263 pci_update_config32(root, root_cap + 0x08, ~0xff00,
Kenji Chen31c6e632014-10-04 01:14:44 +0800264 (comm_mode_rst_time << 8));
265
Elyes HAOUASa342f392018-10-17 10:56:26 +0200266 pci_update_config32(root, root_cap + 0x0c, 0xffffff04,
Kenji Chen31c6e632014-10-04 01:14:44 +0800267 (endp_power_on_value << 3) | (power_on_scale));
268
Patrick Georgi9adcbfe2017-12-05 16:36:30 -0500269 /* TODO: 0xa0, 2 are values that work on some chipsets but really
270 * should be determined dynamically by looking at downstream devices.
271 */
272 pci_update_config32(root, root_cap + 0x08,
273 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
274 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
275 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
276 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
Kenji Chen31c6e632014-10-04 01:14:44 +0800277
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300278 pci_update_config32(root, root_cap + 0x08, ~0x1f,
Kenji Chen31c6e632014-10-04 01:14:44 +0800279 L1SubStateSupport);
280
281 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
Elyes HAOUASa342f392018-10-17 10:56:26 +0200282 pci_update_config32(dev_t, end_cap + 0x0c, 0xffffff04,
Kenji Chen31c6e632014-10-04 01:14:44 +0800283 (endp_power_on_value << 3) | (power_on_scale));
284
Patrick Georgi9adcbfe2017-12-05 16:36:30 -0500285 pci_update_config32(dev_t, end_cap + 0x08,
286 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
287 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
288 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
289 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
Kenji Chen31c6e632014-10-04 01:14:44 +0800290
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300291 pci_update_config32(dev_t, end_cap + 0x08, ~0x1f,
Kenji Chen31c6e632014-10-04 01:14:44 +0800292 L1SubStateSupport);
Kenji Chen31c6e632014-10-04 01:14:44 +0800293 }
294}
295
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200296static void pciexp_config_L1_sub_state(struct device *root, struct device *dev)
Kenji Chen31c6e632014-10-04 01:14:44 +0800297{
298 unsigned int root_cap, end_cap;
299
300 /* Do it for function 0 only */
301 if (dev->path.pci.devfn & 0x7)
302 return;
303
304 root_cap = pciexp_find_extended_cap(root, PCIE_EXT_CAP_L1SS_ID);
305 if (!root_cap)
306 return;
307
308 end_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_L1SS_ID);
309 if (!end_cap) {
310 end_cap = pciexp_find_extended_cap(dev, 0xcafe);
311 if (!end_cap)
312 return;
313 }
314
315 pciexp_L1_substate_commit(root, dev, root_cap, end_cap);
316}
Kenji Chen31c6e632014-10-04 01:14:44 +0800317
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700318/*
319 * Determine the ASPM L0s or L1 exit latency for a link
320 * by checking both root port and endpoint and returning
321 * the highest latency value.
322 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600323static int pciexp_aspm_latency(struct device *root, unsigned int root_cap,
324 struct device *endp, unsigned int endp_cap,
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700325 enum aspm_type type)
326{
327 int root_lat = 0, endp_lat = 0;
328 u32 root_lnkcap, endp_lnkcap;
329
330 root_lnkcap = pci_read_config32(root, root_cap + PCI_EXP_LNKCAP);
331 endp_lnkcap = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
332
333 /* Make sure the link supports this ASPM type by checking
334 * capability bits 11:10 with aspm_type offset by 1 */
335 if (!(root_lnkcap & (1 << (type + 9))) ||
336 !(endp_lnkcap & (1 << (type + 9))))
337 return -1;
338
339 /* Find the one with higher latency */
340 switch (type) {
341 case PCIE_ASPM_L0S:
342 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
343 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
344 break;
345 case PCIE_ASPM_L1:
346 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
347 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
348 break;
349 default:
350 return -1;
351 }
352
353 return (endp_lat > root_lat) ? endp_lat : root_lat;
354}
355
356/*
357 * Enable ASPM on PCIe root port and endpoint.
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700358 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600359static void pciexp_enable_aspm(struct device *root, unsigned int root_cap,
360 struct device *endp, unsigned int endp_cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700361{
362 const char *aspm_type_str[] = { "None", "L0s", "L1", "L0s and L1" };
363 enum aspm_type apmc = PCIE_ASPM_NONE;
364 int exit_latency, ok_latency;
365 u16 lnkctl;
366 u32 devcap;
367
Nico Huber570b1832017-08-30 13:38:50 +0200368 if (endp->disable_pcie_aspm)
369 return;
370
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700371 /* Get endpoint device capabilities for acceptable limits */
372 devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
373
374 /* Enable L0s if it is within endpoint acceptable limit */
375 ok_latency = (devcap & PCI_EXP_DEVCAP_L0S) >> 6;
376 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
377 PCIE_ASPM_L0S);
378 if (exit_latency >= 0 && exit_latency <= ok_latency)
379 apmc |= PCIE_ASPM_L0S;
380
381 /* Enable L1 if it is within endpoint acceptable limit */
382 ok_latency = (devcap & PCI_EXP_DEVCAP_L1) >> 9;
383 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
384 PCIE_ASPM_L1);
385 if (exit_latency >= 0 && exit_latency <= ok_latency)
386 apmc |= PCIE_ASPM_L1;
387
388 if (apmc != PCIE_ASPM_NONE) {
389 /* Set APMC in root port first */
390 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
391 lnkctl |= apmc;
392 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
393
394 /* Set APMC in endpoint device next */
395 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
396 lnkctl |= apmc;
397 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
398 }
399
400 printk(BIOS_INFO, "ASPM: Enabled %s\n", aspm_type_str[apmc]);
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700401}
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700402
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200403/*
404 * Set max payload size of endpoint in accordance with max payload size of root port.
405 */
406static void pciexp_set_max_payload_size(struct device *root, unsigned int root_cap,
407 struct device *endp, unsigned int endp_cap)
408{
409 unsigned int endp_max_payload, root_max_payload, max_payload;
410 u16 endp_devctl, root_devctl;
411 u32 endp_devcap, root_devcap;
412
413 /* Get max payload size supported by endpoint */
414 endp_devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
415 endp_max_payload = endp_devcap & PCI_EXP_DEVCAP_PAYLOAD;
416
417 /* Get max payload size supported by root port */
418 root_devcap = pci_read_config32(root, root_cap + PCI_EXP_DEVCAP);
419 root_max_payload = root_devcap & PCI_EXP_DEVCAP_PAYLOAD;
420
421 /* Set max payload to smaller of the reported device capability. */
422 max_payload = MIN(endp_max_payload, root_max_payload);
423 if (max_payload > 5) {
424 /* Values 6 and 7 are reserved in PCIe 3.0 specs. */
425 printk(BIOS_ERR, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
426 max_payload);
427 max_payload = 5;
428 }
429
430 endp_devctl = pci_read_config16(endp, endp_cap + PCI_EXP_DEVCTL);
431 endp_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
432 endp_devctl |= max_payload << 5;
433 pci_write_config16(endp, endp_cap + PCI_EXP_DEVCTL, endp_devctl);
434
435 root_devctl = pci_read_config16(root, root_cap + PCI_EXP_DEVCTL);
436 root_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
437 root_devctl |= max_payload << 5;
438 pci_write_config16(root, root_cap + PCI_EXP_DEVCTL, root_devctl);
439
440 printk(BIOS_INFO, "PCIe: Max_Payload_Size adjusted to %d\n", (1 << (max_payload + 7)));
441}
442
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200443static void pciexp_tune_dev(struct device *dev)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000444{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200445 struct device *root = dev->bus->dev;
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700446 unsigned int root_cap, cap;
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000447
448 cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
Uwe Hermannd453dd02010-10-18 00:00:57 +0000449 if (!cap)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000450 return;
Uwe Hermannd453dd02010-10-18 00:00:57 +0000451
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700452 root_cap = pci_find_capability(root, PCI_CAP_ID_PCIE);
453 if (!root_cap)
454 return;
Stefan Reinauerf6eb88a2010-01-17 13:54:08 +0000455
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700456 /* Check for and enable Common Clock */
Julius Wernercd49cce2019-03-05 16:53:33 -0800457 if (CONFIG(PCIEXP_COMMON_CLOCK))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200458 pciexp_enable_common_clock(root, root_cap, dev, cap);
Uwe Hermanne4870472010-11-04 23:23:47 +0000459
Kane Chen18cb1342014-10-01 11:13:54 +0800460 /* Check if per port CLK req is supported by endpoint*/
Julius Wernercd49cce2019-03-05 16:53:33 -0800461 if (CONFIG(PCIEXP_CLK_PM))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200462 pciexp_enable_clock_power_pm(dev, cap);
Kane Chen18cb1342014-10-01 11:13:54 +0800463
Kenji Chen31c6e632014-10-04 01:14:44 +0800464 /* Enable L1 Sub-State when both root port and endpoint support */
Julius Wernercd49cce2019-03-05 16:53:33 -0800465 if (CONFIG(PCIEXP_L1_SUB_STATE))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200466 pciexp_config_L1_sub_state(root, dev);
Kenji Chen31c6e632014-10-04 01:14:44 +0800467
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700468 /* Check for and enable ASPM */
Julius Wernercd49cce2019-03-05 16:53:33 -0800469 if (CONFIG(PCIEXP_ASPM))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200470 pciexp_enable_aspm(root, root_cap, dev, cap);
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200471
472 /* Adjust Max_Payload_Size of link ends. */
473 pciexp_set_max_payload_size(root, root_cap, dev, cap);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000474}
475
Kyösti Mälkkide271a82015-03-18 13:09:47 +0200476void pciexp_scan_bus(struct bus *bus, unsigned int min_devfn,
477 unsigned int max_devfn)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000478{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200479 struct device *child;
Kyösti Mälkkide271a82015-03-18 13:09:47 +0200480 pci_scan_bus(bus, min_devfn, max_devfn);
Uwe Hermannd453dd02010-10-18 00:00:57 +0000481
482 for (child = bus->children; child; child = child->sibling) {
Duncan Lauriebf696222020-10-18 15:10:00 -0700483 if (child->path.type != DEVICE_PATH_PCI)
484 continue;
Uwe Hermannd453dd02010-10-18 00:00:57 +0000485 if ((child->path.pci.devfn < min_devfn) ||
486 (child->path.pci.devfn > max_devfn)) {
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000487 continue;
488 }
489 pciexp_tune_dev(child);
490 }
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000491}
492
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200493void pciexp_scan_bridge(struct device *dev)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000494{
Kyösti Mälkki580e7222015-03-19 21:04:23 +0200495 do_pci_scan_bridge(dev, pciexp_scan_bus);
Aamir Bohra2188f572017-09-22 19:07:21 +0530496 pciexp_enable_ltr(dev);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000497}
498
499/** Default device operations for PCI Express bridges */
500static struct pci_operations pciexp_bus_ops_pci = {
501 .set_subsystem = 0,
502};
503
504struct device_operations default_pciexp_ops_bus = {
505 .read_resources = pci_bus_read_resources,
506 .set_resources = pci_dev_set_resources,
507 .enable_resources = pci_bus_enable_resources,
Uwe Hermannd453dd02010-10-18 00:00:57 +0000508 .scan_bus = pciexp_scan_bridge,
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000509 .reset_bus = pci_bus_reset,
510 .ops_pci = &pciexp_bus_ops_pci,
511};
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600512
513#if CONFIG(PCIEXP_HOTPLUG)
514
515static void pciexp_hotplug_dummy_read_resources(struct device *dev)
516{
517 struct resource *resource;
518
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700519 /* Add extra memory space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600520 resource = new_resource(dev, 0x10);
521 resource->size = CONFIG_PCIEXP_HOTPLUG_MEM;
522 resource->align = 12;
523 resource->gran = 12;
524 resource->limit = 0xffffffff;
525 resource->flags |= IORESOURCE_MEM;
526
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700527 /* Add extra prefetchable memory space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600528 resource = new_resource(dev, 0x14);
529 resource->size = CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM;
530 resource->align = 12;
531 resource->gran = 12;
532 resource->limit = 0xffffffffffffffff;
533 resource->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
534
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700535 /* Set resource flag requesting allocation above 4G boundary. */
536 if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G))
537 resource->flags |= IORESOURCE_ABOVE_4G;
538
539 /* Add extra I/O space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600540 resource = new_resource(dev, 0x18);
541 resource->size = CONFIG_PCIEXP_HOTPLUG_IO;
542 resource->align = 12;
543 resource->gran = 12;
544 resource->limit = 0xffff;
545 resource->flags |= IORESOURCE_IO;
546}
547
548static struct device_operations pciexp_hotplug_dummy_ops = {
549 .read_resources = pciexp_hotplug_dummy_read_resources,
550};
551
552void pciexp_hotplug_scan_bridge(struct device *dev)
553{
554 dev->hotplug_buses = CONFIG_PCIEXP_HOTPLUG_BUSES;
555
556 /* Normal PCIe Scan */
557 pciexp_scan_bridge(dev);
558
559 /* Add dummy slot to preserve resources, must happen after bus scan */
560 struct device *dummy;
561 struct device_path dummy_path = { .type = DEVICE_PATH_NONE };
562 dummy = alloc_dev(dev->link_list, &dummy_path);
563 dummy->ops = &pciexp_hotplug_dummy_ops;
564}
565
566struct device_operations default_pciexp_hotplug_ops_bus = {
567 .read_resources = pci_bus_read_resources,
568 .set_resources = pci_dev_set_resources,
569 .enable_resources = pci_bus_enable_resources,
570 .scan_bus = pciexp_hotplug_scan_bridge,
571 .reset_bus = pci_bus_reset,
572 .ops_pci = &pciexp_bus_ops_pci,
573};
574#endif /* CONFIG(PCIEXP_HOTPLUG) */