Angel Pons | c74dae9 | 2020-04-02 23:48:16 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 2 | |
| 3 | #include <console/console.h> |
Kyösti Mälkki | 94ce79d | 2019-12-16 17:21:13 +0200 | [diff] [blame] | 4 | #include <commonlib/helpers.h> |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 5 | #include <delay.h> |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 6 | #include <device/device.h> |
| 7 | #include <device/pci.h> |
Nico Huber | bba9735 | 2022-08-05 13:09:25 +0200 | [diff] [blame] | 8 | #include <device/pci_ids.h> |
Patrick Rudolph | e56189c | 2018-04-18 10:11:59 +0200 | [diff] [blame] | 9 | #include <device/pci_ops.h> |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 10 | #include <device/pciexp.h> |
| 11 | |
Nico Huber | 077dc2e | 2022-08-05 14:47:35 +0200 | [diff] [blame] | 12 | static unsigned int ext_cap_id(unsigned int cap) |
| 13 | { |
| 14 | return cap & 0xffff; |
| 15 | } |
| 16 | |
| 17 | static unsigned int ext_cap_next_offset(unsigned int cap) |
| 18 | { |
Nico Huber | 5f7cfb3 | 2022-08-05 14:50:06 +0200 | [diff] [blame] | 19 | return cap >> 20 & 0xffc; |
Nico Huber | 077dc2e | 2022-08-05 14:47:35 +0200 | [diff] [blame] | 20 | } |
| 21 | |
| 22 | static unsigned int find_ext_cap_offset(const struct device *dev, unsigned int cap_id, |
| 23 | unsigned int offset) |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 24 | { |
Tim Wawrzynczak | 3d121ae1 | 2021-09-16 20:18:16 -0600 | [diff] [blame] | 25 | unsigned int this_cap_offset = offset; |
Nico Huber | 077dc2e | 2022-08-05 14:47:35 +0200 | [diff] [blame] | 26 | |
Nico Huber | 5f7cfb3 | 2022-08-05 14:50:06 +0200 | [diff] [blame] | 27 | while (this_cap_offset >= PCIE_EXT_CAP_OFFSET) { |
Nico Huber | 077dc2e | 2022-08-05 14:47:35 +0200 | [diff] [blame] | 28 | const unsigned int this_cap = pci_read_config32(dev, this_cap_offset); |
| 29 | |
Bill XIE | 385e432 | 2022-08-04 21:52:05 +0800 | [diff] [blame] | 30 | /* Bail out when this request is unsupported */ |
| 31 | if (this_cap == 0xffffffff) |
| 32 | break; |
Nico Huber | 077dc2e | 2022-08-05 14:47:35 +0200 | [diff] [blame] | 33 | |
| 34 | if (ext_cap_id(this_cap) == cap_id) |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 35 | return this_cap_offset; |
Nico Huber | 077dc2e | 2022-08-05 14:47:35 +0200 | [diff] [blame] | 36 | |
| 37 | this_cap_offset = ext_cap_next_offset(this_cap); |
Nico Huber | 4b864e5 | 2022-08-05 12:44:11 +0200 | [diff] [blame] | 38 | } |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 39 | |
| 40 | return 0; |
| 41 | } |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 42 | |
Nico Huber | 5ffc2c8 | 2022-08-05 12:58:18 +0200 | [diff] [blame] | 43 | /* |
| 44 | * Search for an extended capability with the ID `cap`. |
| 45 | * |
| 46 | * Returns the offset of the first matching extended |
| 47 | * capability if found, or 0 otherwise. |
| 48 | * |
| 49 | * A new search is started with `offset == 0`. |
| 50 | * To continue a search, the prior return value |
| 51 | * should be passed as `offset`. |
| 52 | */ |
| 53 | unsigned int pciexp_find_extended_cap(const struct device *dev, unsigned int cap, |
| 54 | unsigned int offset) |
Tim Wawrzynczak | 3d121ae1 | 2021-09-16 20:18:16 -0600 | [diff] [blame] | 55 | { |
Nico Huber | 5ffc2c8 | 2022-08-05 12:58:18 +0200 | [diff] [blame] | 56 | unsigned int next_cap_offset; |
| 57 | |
| 58 | if (offset) |
Nico Huber | 077dc2e | 2022-08-05 14:47:35 +0200 | [diff] [blame] | 59 | next_cap_offset = ext_cap_next_offset(pci_read_config32(dev, offset)); |
Nico Huber | 5ffc2c8 | 2022-08-05 12:58:18 +0200 | [diff] [blame] | 60 | else |
| 61 | next_cap_offset = PCIE_EXT_CAP_OFFSET; |
| 62 | |
Nico Huber | 077dc2e | 2022-08-05 14:47:35 +0200 | [diff] [blame] | 63 | return find_ext_cap_offset(dev, cap, next_cap_offset); |
Tim Wawrzynczak | 3d121ae1 | 2021-09-16 20:18:16 -0600 | [diff] [blame] | 64 | } |
| 65 | |
Nico Huber | 9099fea | 2022-08-05 13:02:52 +0200 | [diff] [blame] | 66 | /* |
| 67 | * Search for a vendor-specific extended capability, |
| 68 | * with the vendor-specific ID `cap`. |
| 69 | * |
| 70 | * Returns the offset of the vendor-specific header, |
| 71 | * i.e. the offset of the extended capability + 4, |
| 72 | * or 0 if none is found. |
| 73 | * |
| 74 | * A new search is started with `offset == 0`. |
| 75 | * To continue a search, the prior return value |
| 76 | * should be passed as `offset`. |
| 77 | */ |
| 78 | unsigned int pciexp_find_ext_vendor_cap(const struct device *dev, unsigned int cap, |
| 79 | unsigned int offset) |
| 80 | { |
| 81 | /* Reconstruct capability offset from vendor-specific header offset. */ |
| 82 | if (offset >= 4) |
| 83 | offset -= 4; |
| 84 | |
| 85 | for (;;) { |
| 86 | offset = pciexp_find_extended_cap(dev, PCI_EXT_CAP_ID_VNDR, offset); |
| 87 | if (!offset) |
| 88 | return 0; |
| 89 | |
| 90 | const unsigned int vndr_cap = pci_read_config32(dev, offset + 4); |
| 91 | if ((vndr_cap & 0xffff) == cap) |
| 92 | return offset + 4; |
| 93 | } |
| 94 | } |
Tim Wawrzynczak | 3d121ae1 | 2021-09-16 20:18:16 -0600 | [diff] [blame] | 95 | |
Jonathan Zhang | 1864f12 | 2022-10-10 16:27:48 -0700 | [diff] [blame] | 96 | /** |
| 97 | * Find a PCIe device with a given serial number, and a given VID if applicable |
| 98 | * |
| 99 | * @param serial The serial number of the device. |
| 100 | * @param vid Vendor ID of the device, may be 0 if not applicable. |
| 101 | * @param from Pointer to the device structure, used as a starting point in |
| 102 | * the linked list of all_devices, which can be 0 to start at the |
| 103 | * head of the list (i.e. all_devices). |
| 104 | * @return Pointer to the device struct. |
| 105 | */ |
| 106 | struct device *pcie_find_dsn(const uint64_t serial, const uint16_t vid, |
| 107 | struct device *from) |
| 108 | { |
| 109 | union dsn { |
| 110 | struct { |
| 111 | uint32_t dsn_low; |
| 112 | uint32_t dsn_high; |
| 113 | }; |
| 114 | uint64_t dsn; |
| 115 | } dsn; |
| 116 | unsigned int cap; |
| 117 | uint16_t vendor_id; |
| 118 | |
| 119 | if (!from) |
| 120 | from = all_devices; |
| 121 | else |
| 122 | from = from->next; |
| 123 | |
| 124 | while (from) { |
| 125 | if (from->path.type == DEVICE_PATH_PCI) { |
| 126 | cap = pciexp_find_extended_cap(from, PCI_EXT_CAP_ID_DSN, 0); |
| 127 | /* |
| 128 | * For PCIe device, find extended capability for serial number. |
| 129 | * The capability header is 4 bytes, followed by lower 4 bytes |
| 130 | * of serial number, then higher 4 byes of serial number. |
| 131 | */ |
| 132 | if (cap != 0) { |
| 133 | dsn.dsn_low = pci_read_config32(from, cap + 4); |
| 134 | dsn.dsn_high = pci_read_config32(from, cap + 8); |
| 135 | vendor_id = pci_read_config16(from, PCI_VENDOR_ID); |
| 136 | if ((dsn.dsn == serial) && (vid == 0 || vendor_id == vid)) |
| 137 | return from; |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | from = from->next; |
| 142 | } |
| 143 | |
| 144 | return from; |
| 145 | } |
| 146 | |
Michał Żygowski | 9f0443c | 2024-01-31 13:09:37 +0100 | [diff] [blame] | 147 | static bool pcie_is_root_port(struct device *dev) |
| 148 | { |
| 149 | unsigned int pcie_pos, pcie_type; |
| 150 | |
| 151 | pcie_pos = pci_find_capability(dev, PCI_CAP_ID_PCIE); |
| 152 | if (!pcie_pos) |
| 153 | return false; |
| 154 | |
| 155 | pcie_type = pci_read_config16(dev, pcie_pos + PCI_EXP_FLAGS) & PCI_EXP_FLAGS_TYPE; |
| 156 | pcie_type >>= 4; |
| 157 | |
| 158 | return (pcie_type == PCI_EXP_TYPE_ROOT_PORT); |
| 159 | } |
| 160 | |
| 161 | static bool pcie_is_endpoint(struct device *dev) |
| 162 | { |
| 163 | unsigned int pcie_pos, pcie_type; |
| 164 | |
| 165 | pcie_pos = pci_find_capability(dev, PCI_CAP_ID_PCIE); |
| 166 | if (!pcie_pos) |
| 167 | return false; |
| 168 | |
| 169 | pcie_type = pci_read_config16(dev, pcie_pos + PCI_EXP_FLAGS) & PCI_EXP_FLAGS_TYPE; |
| 170 | pcie_type >>= 4; |
| 171 | |
| 172 | return ((pcie_type == PCI_EXP_TYPE_ENDPOINT) || (pcie_type == PCI_EXP_TYPE_LEG_END)); |
| 173 | } |
| 174 | |
| 175 | |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 176 | /* |
| 177 | * Re-train a PCIe link |
| 178 | */ |
| 179 | #define PCIE_TRAIN_RETRY 10000 |
Martin Roth | 38ddbfb | 2019-10-23 21:41:00 -0600 | [diff] [blame] | 180 | static int pciexp_retrain_link(struct device *dev, unsigned int cap) |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 181 | { |
Youness Alaoui | bb5fb64 | 2017-05-03 17:57:13 -0400 | [diff] [blame] | 182 | unsigned int try; |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 183 | u16 lnk; |
| 184 | |
Youness Alaoui | bb5fb64 | 2017-05-03 17:57:13 -0400 | [diff] [blame] | 185 | /* |
| 186 | * Implementation note (page 633) in PCIe Specification 3.0 suggests |
| 187 | * polling the Link Training bit in the Link Status register until the |
| 188 | * value returned is 0 before setting the Retrain Link bit to 1. |
| 189 | * This is meant to avoid a race condition when using the |
| 190 | * Retrain Link mechanism. |
| 191 | */ |
| 192 | for (try = PCIE_TRAIN_RETRY; try > 0; try--) { |
| 193 | lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA); |
| 194 | if (!(lnk & PCI_EXP_LNKSTA_LT)) |
| 195 | break; |
| 196 | udelay(100); |
| 197 | } |
| 198 | if (try == 0) { |
| 199 | printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev)); |
| 200 | return -1; |
| 201 | } |
| 202 | |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 203 | /* Start link retraining */ |
| 204 | lnk = pci_read_config16(dev, cap + PCI_EXP_LNKCTL); |
| 205 | lnk |= PCI_EXP_LNKCTL_RL; |
| 206 | pci_write_config16(dev, cap + PCI_EXP_LNKCTL, lnk); |
| 207 | |
| 208 | /* Wait for training to complete */ |
Youness Alaoui | bb5fb64 | 2017-05-03 17:57:13 -0400 | [diff] [blame] | 209 | for (try = PCIE_TRAIN_RETRY; try > 0; try--) { |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 210 | lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA); |
| 211 | if (!(lnk & PCI_EXP_LNKSTA_LT)) |
| 212 | return 0; |
| 213 | udelay(100); |
| 214 | } |
| 215 | |
| 216 | printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev)); |
| 217 | return -1; |
| 218 | } |
| 219 | |
Werner Zeh | c83c958 | 2023-02-27 07:08:59 +0100 | [diff] [blame] | 220 | static bool pciexp_is_ccc_active(struct device *root, unsigned int root_cap, |
| 221 | struct device *endp, unsigned int endp_cap) |
| 222 | { |
| 223 | u16 root_ccc, endp_ccc; |
| 224 | |
| 225 | root_ccc = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_CCC; |
| 226 | endp_ccc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_CCC; |
| 227 | if (root_ccc && endp_ccc) { |
| 228 | printk(BIOS_INFO, "PCIe: Common Clock Configuration already enabled\n"); |
| 229 | return true; |
| 230 | } |
| 231 | return false; |
| 232 | } |
| 233 | |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 234 | /* |
| 235 | * Check the Slot Clock Configuration for root port and endpoint |
| 236 | * and enable Common Clock Configuration if possible. If CCC is |
| 237 | * enabled the link must be retrained. |
| 238 | */ |
Martin Roth | 38ddbfb | 2019-10-23 21:41:00 -0600 | [diff] [blame] | 239 | static void pciexp_enable_common_clock(struct device *root, unsigned int root_cap, |
| 240 | struct device *endp, unsigned int endp_cap) |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 241 | { |
| 242 | u16 root_scc, endp_scc, lnkctl; |
| 243 | |
Werner Zeh | c83c958 | 2023-02-27 07:08:59 +0100 | [diff] [blame] | 244 | /* No need to enable common clock if it is already active. */ |
| 245 | if (pciexp_is_ccc_active(root, root_cap, endp, endp_cap)) |
| 246 | return; |
| 247 | |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 248 | /* Get Slot Clock Configuration for root port */ |
| 249 | root_scc = pci_read_config16(root, root_cap + PCI_EXP_LNKSTA); |
| 250 | root_scc &= PCI_EXP_LNKSTA_SLC; |
| 251 | |
| 252 | /* Get Slot Clock Configuration for endpoint */ |
| 253 | endp_scc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKSTA); |
| 254 | endp_scc &= PCI_EXP_LNKSTA_SLC; |
| 255 | |
| 256 | /* Enable Common Clock Configuration and retrain */ |
| 257 | if (root_scc && endp_scc) { |
| 258 | printk(BIOS_INFO, "Enabling Common Clock Configuration\n"); |
| 259 | |
| 260 | /* Set in endpoint */ |
| 261 | lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL); |
| 262 | lnkctl |= PCI_EXP_LNKCTL_CCC; |
| 263 | pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl); |
| 264 | |
| 265 | /* Set in root port */ |
| 266 | lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL); |
| 267 | lnkctl |= PCI_EXP_LNKCTL_CCC; |
| 268 | pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl); |
| 269 | |
| 270 | /* Retrain link if CCC was enabled */ |
| 271 | pciexp_retrain_link(root, root_cap); |
| 272 | } |
| 273 | } |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 274 | |
Martin Roth | 38ddbfb | 2019-10-23 21:41:00 -0600 | [diff] [blame] | 275 | static void pciexp_enable_clock_power_pm(struct device *endp, unsigned int endp_cap) |
Kane Chen | 18cb134 | 2014-10-01 11:13:54 +0800 | [diff] [blame] | 276 | { |
Martin Roth | 74f1877 | 2023-09-03 21:38:29 -0600 | [diff] [blame] | 277 | /* check if per port clkreq is supported in device */ |
Kane Chen | 18cb134 | 2014-10-01 11:13:54 +0800 | [diff] [blame] | 278 | u32 endp_ca; |
| 279 | u16 lnkctl; |
| 280 | endp_ca = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP); |
| 281 | if ((endp_ca & PCI_EXP_CLK_PM) == 0) { |
Arthur Heymans | 330c46b | 2017-07-12 19:17:56 +0200 | [diff] [blame] | 282 | printk(BIOS_INFO, "PCIE CLK PM is not supported by endpoint\n"); |
Kane Chen | 18cb134 | 2014-10-01 11:13:54 +0800 | [diff] [blame] | 283 | return; |
| 284 | } |
| 285 | lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL); |
| 286 | lnkctl = lnkctl | PCI_EXP_EN_CLK_PM; |
| 287 | pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl); |
| 288 | } |
Kane Chen | 18cb134 | 2014-10-01 11:13:54 +0800 | [diff] [blame] | 289 | |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 290 | static bool _pciexp_ltr_supported(struct device *dev, unsigned int cap) |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 291 | { |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 292 | return pci_read_config16(dev, cap + PCI_EXP_DEVCAP2) & PCI_EXP_DEVCAP2_LTR; |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 293 | } |
| 294 | |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 295 | static bool _pciexp_ltr_enabled(struct device *dev, unsigned int cap) |
Aamir Bohra | 2188f57 | 2017-09-22 19:07:21 +0530 | [diff] [blame] | 296 | { |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 297 | return pci_read_config16(dev, cap + PCI_EXP_DEVCTL2) & PCI_EXP_DEV2_LTR; |
Aamir Bohra | 2188f57 | 2017-09-22 19:07:21 +0530 | [diff] [blame] | 298 | } |
| 299 | |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 300 | static bool _pciexp_enable_ltr(struct device *parent, unsigned int parent_cap, |
| 301 | struct device *dev, unsigned int cap) |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 302 | { |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 303 | if (!_pciexp_ltr_supported(dev, cap)) { |
| 304 | printk(BIOS_DEBUG, "%s: No LTR support\n", dev_path(dev)); |
| 305 | return false; |
Pratik Prajapati | 0cd0d28 | 2015-06-09 12:06:20 -0700 | [diff] [blame] | 306 | } |
Aamir Bohra | 2188f57 | 2017-09-22 19:07:21 +0530 | [diff] [blame] | 307 | |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 308 | if (_pciexp_ltr_enabled(dev, cap)) |
| 309 | return true; |
Aamir Bohra | 2188f57 | 2017-09-22 19:07:21 +0530 | [diff] [blame] | 310 | |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 311 | if (parent && |
Nico Huber | 49fc4e3 | 2022-08-17 21:57:46 +0200 | [diff] [blame] | 312 | (!_pciexp_ltr_supported(parent, parent_cap) || |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 313 | !_pciexp_ltr_enabled(parent, parent_cap))) |
| 314 | return false; |
Aamir Bohra | 2188f57 | 2017-09-22 19:07:21 +0530 | [diff] [blame] | 315 | |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 316 | pci_or_config16(dev, cap + PCI_EXP_DEVCTL2, PCI_EXP_DEV2_LTR); |
| 317 | printk(BIOS_INFO, "%s: Enabled LTR\n", dev_path(dev)); |
| 318 | return true; |
Aamir Bohra | 2188f57 | 2017-09-22 19:07:21 +0530 | [diff] [blame] | 319 | } |
| 320 | |
Elyes HAOUAS | b1fa287 | 2018-05-02 21:11:38 +0200 | [diff] [blame] | 321 | static void pciexp_enable_ltr(struct device *dev) |
Aamir Bohra | 2188f57 | 2017-09-22 19:07:21 +0530 | [diff] [blame] | 322 | { |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 323 | const unsigned int cap = pci_find_capability(dev, PCI_CAP_ID_PCIE); |
| 324 | if (!cap) |
| 325 | return; |
Aamir Bohra | 2188f57 | 2017-09-22 19:07:21 +0530 | [diff] [blame] | 326 | |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 327 | /* |
| 328 | * If we have get_ltr_max_latencies(), treat `dev` as the root. |
| 329 | * If not, let _pciexp_enable_ltr() query the parent's state. |
| 330 | */ |
| 331 | struct device *parent = NULL; |
| 332 | unsigned int parent_cap = 0; |
| 333 | if (!dev->ops->ops_pci || !dev->ops->ops_pci->get_ltr_max_latencies) { |
Arthur Heymans | 7fcd4d5 | 2023-08-24 15:12:19 +0200 | [diff] [blame] | 334 | parent = dev->upstream->dev; |
Nico Huber | 49fc4e3 | 2022-08-17 21:57:46 +0200 | [diff] [blame] | 335 | if (parent->path.type != DEVICE_PATH_PCI) |
| 336 | return; |
Bill XIE | a43380e | 2022-08-03 00:18:14 +0800 | [diff] [blame] | 337 | parent_cap = pci_find_capability(parent, PCI_CAP_ID_PCIE); |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 338 | if (!parent_cap) |
| 339 | return; |
Aamir Bohra | 2188f57 | 2017-09-22 19:07:21 +0530 | [diff] [blame] | 340 | } |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 341 | |
| 342 | (void)_pciexp_enable_ltr(parent, parent_cap, dev, cap); |
| 343 | } |
| 344 | |
Tim Wawrzynczak | a62cb56 | 2021-12-08 21:16:43 -0700 | [diff] [blame] | 345 | bool pciexp_get_ltr_max_latencies(struct device *dev, u16 *max_snoop, u16 *max_nosnoop) |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 346 | { |
| 347 | /* Walk the hierarchy up to find get_ltr_max_latencies(). */ |
| 348 | do { |
| 349 | if (dev->ops->ops_pci && dev->ops->ops_pci->get_ltr_max_latencies) |
| 350 | break; |
Arthur Heymans | 7fcd4d5 | 2023-08-24 15:12:19 +0200 | [diff] [blame] | 351 | if (dev->upstream->dev == dev || dev->upstream->dev->path.type != DEVICE_PATH_PCI) |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 352 | return false; |
Arthur Heymans | 7fcd4d5 | 2023-08-24 15:12:19 +0200 | [diff] [blame] | 353 | dev = dev->upstream->dev; |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 354 | } while (true); |
| 355 | |
| 356 | dev->ops->ops_pci->get_ltr_max_latencies(max_snoop, max_nosnoop); |
| 357 | return true; |
| 358 | } |
| 359 | |
| 360 | static void pciexp_configure_ltr(struct device *parent, unsigned int parent_cap, |
| 361 | struct device *dev, unsigned int cap) |
| 362 | { |
| 363 | if (!_pciexp_enable_ltr(parent, parent_cap, dev, cap)) |
| 364 | return; |
| 365 | |
Nico Huber | 5ffc2c8 | 2022-08-05 12:58:18 +0200 | [diff] [blame] | 366 | const unsigned int ltr_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_LTR_ID, 0); |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 367 | if (!ltr_cap) |
| 368 | return; |
| 369 | |
| 370 | u16 max_snoop, max_nosnoop; |
| 371 | if (!pciexp_get_ltr_max_latencies(dev, &max_snoop, &max_nosnoop)) |
| 372 | return; |
| 373 | |
| 374 | pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_SNOOP, max_snoop); |
| 375 | pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_NOSNOOP, max_nosnoop); |
| 376 | printk(BIOS_INFO, "%s: Programmed LTR max latencies\n", dev_path(dev)); |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 377 | } |
| 378 | |
Elyes HAOUAS | b1fa287 | 2018-05-02 21:11:38 +0200 | [diff] [blame] | 379 | static unsigned char pciexp_L1_substate_cal(struct device *dev, unsigned int endp_cap, |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 380 | unsigned int *data) |
| 381 | { |
| 382 | unsigned char mult[4] = {2, 10, 100, 0}; |
| 383 | |
| 384 | unsigned int L1SubStateSupport = *data & 0xf; |
| 385 | unsigned int comm_mode_rst_time = (*data >> 8) & 0xff; |
| 386 | unsigned int power_on_scale = (*data >> 16) & 0x3; |
| 387 | unsigned int power_on_value = (*data >> 19) & 0x1f; |
| 388 | |
Kyösti Mälkki | 91bfa8e | 2016-11-20 20:39:56 +0200 | [diff] [blame] | 389 | unsigned int endp_data = pci_read_config32(dev, endp_cap + 4); |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 390 | unsigned int endp_L1SubStateSupport = endp_data & 0xf; |
| 391 | unsigned int endp_comm_mode_restore_time = (endp_data >> 8) & 0xff; |
| 392 | unsigned int endp_power_on_scale = (endp_data >> 16) & 0x3; |
| 393 | unsigned int endp_power_on_value = (endp_data >> 19) & 0x1f; |
| 394 | |
| 395 | L1SubStateSupport &= endp_L1SubStateSupport; |
| 396 | |
| 397 | if (L1SubStateSupport == 0) |
| 398 | return 0; |
| 399 | |
| 400 | if (power_on_value * mult[power_on_scale] < |
| 401 | endp_power_on_value * mult[endp_power_on_scale]) { |
| 402 | power_on_value = endp_power_on_value; |
| 403 | power_on_scale = endp_power_on_scale; |
| 404 | } |
| 405 | if (comm_mode_rst_time < endp_comm_mode_restore_time) |
| 406 | comm_mode_rst_time = endp_comm_mode_restore_time; |
| 407 | |
| 408 | *data = (comm_mode_rst_time << 8) | (power_on_scale << 16) |
| 409 | | (power_on_value << 19) | L1SubStateSupport; |
| 410 | |
| 411 | return 1; |
| 412 | } |
| 413 | |
Elyes HAOUAS | b1fa287 | 2018-05-02 21:11:38 +0200 | [diff] [blame] | 414 | static void pciexp_L1_substate_commit(struct device *root, struct device *dev, |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 415 | unsigned int root_cap, unsigned int end_cap) |
| 416 | { |
Elyes HAOUAS | b1fa287 | 2018-05-02 21:11:38 +0200 | [diff] [blame] | 417 | struct device *dev_t; |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 418 | unsigned char L1_ss_ok; |
Kyösti Mälkki | 91bfa8e | 2016-11-20 20:39:56 +0200 | [diff] [blame] | 419 | unsigned int rp_L1_support = pci_read_config32(root, root_cap + 4); |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 420 | unsigned int L1SubStateSupport; |
| 421 | unsigned int comm_mode_rst_time; |
| 422 | unsigned int power_on_scale; |
| 423 | unsigned int endp_power_on_value; |
| 424 | |
| 425 | for (dev_t = dev; dev_t; dev_t = dev_t->sibling) { |
| 426 | /* |
| 427 | * rp_L1_support is init'd above from root port. |
| 428 | * it needs coordination with endpoints to reach in common. |
| 429 | * if certain endpoint doesn't support L1 Sub-State, abort |
| 430 | * this feature enabling. |
| 431 | */ |
| 432 | L1_ss_ok = pciexp_L1_substate_cal(dev_t, end_cap, |
| 433 | &rp_L1_support); |
| 434 | if (!L1_ss_ok) |
| 435 | return; |
| 436 | } |
| 437 | |
| 438 | L1SubStateSupport = rp_L1_support & 0xf; |
| 439 | comm_mode_rst_time = (rp_L1_support >> 8) & 0xff; |
| 440 | power_on_scale = (rp_L1_support >> 16) & 0x3; |
| 441 | endp_power_on_value = (rp_L1_support >> 19) & 0x1f; |
| 442 | |
| 443 | printk(BIOS_INFO, "L1 Sub-State supported from root port %d\n", |
| 444 | root->path.pci.devfn >> 3); |
| 445 | printk(BIOS_INFO, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport); |
| 446 | printk(BIOS_INFO, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time); |
| 447 | printk(BIOS_INFO, "Power On Value = 0x%x, Power On Scale = 0x%x\n", |
| 448 | endp_power_on_value, power_on_scale); |
| 449 | |
Kyösti Mälkki | 48c389e | 2013-07-26 08:53:59 +0300 | [diff] [blame] | 450 | pci_update_config32(root, root_cap + 0x08, ~0xff00, |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 451 | (comm_mode_rst_time << 8)); |
| 452 | |
Elyes HAOUAS | a342f39 | 2018-10-17 10:56:26 +0200 | [diff] [blame] | 453 | pci_update_config32(root, root_cap + 0x0c, 0xffffff04, |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 454 | (endp_power_on_value << 3) | (power_on_scale)); |
| 455 | |
Patrick Georgi | 9adcbfe | 2017-12-05 16:36:30 -0500 | [diff] [blame] | 456 | /* TODO: 0xa0, 2 are values that work on some chipsets but really |
| 457 | * should be determined dynamically by looking at downstream devices. |
| 458 | */ |
| 459 | pci_update_config32(root, root_cap + 0x08, |
| 460 | ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK | |
| 461 | ASPM_LTR_L12_THRESHOLD_SCALE_MASK), |
| 462 | (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) | |
| 463 | (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET)); |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 464 | |
Kyösti Mälkki | 48c389e | 2013-07-26 08:53:59 +0300 | [diff] [blame] | 465 | pci_update_config32(root, root_cap + 0x08, ~0x1f, |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 466 | L1SubStateSupport); |
| 467 | |
| 468 | for (dev_t = dev; dev_t; dev_t = dev_t->sibling) { |
Elyes HAOUAS | a342f39 | 2018-10-17 10:56:26 +0200 | [diff] [blame] | 469 | pci_update_config32(dev_t, end_cap + 0x0c, 0xffffff04, |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 470 | (endp_power_on_value << 3) | (power_on_scale)); |
| 471 | |
Patrick Georgi | 9adcbfe | 2017-12-05 16:36:30 -0500 | [diff] [blame] | 472 | pci_update_config32(dev_t, end_cap + 0x08, |
| 473 | ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK | |
| 474 | ASPM_LTR_L12_THRESHOLD_SCALE_MASK), |
| 475 | (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) | |
| 476 | (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET)); |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 477 | |
Kyösti Mälkki | 48c389e | 2013-07-26 08:53:59 +0300 | [diff] [blame] | 478 | pci_update_config32(dev_t, end_cap + 0x08, ~0x1f, |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 479 | L1SubStateSupport); |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 480 | } |
| 481 | } |
| 482 | |
Elyes HAOUAS | b1fa287 | 2018-05-02 21:11:38 +0200 | [diff] [blame] | 483 | static void pciexp_config_L1_sub_state(struct device *root, struct device *dev) |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 484 | { |
| 485 | unsigned int root_cap, end_cap; |
| 486 | |
| 487 | /* Do it for function 0 only */ |
| 488 | if (dev->path.pci.devfn & 0x7) |
| 489 | return; |
| 490 | |
Nico Huber | 5ffc2c8 | 2022-08-05 12:58:18 +0200 | [diff] [blame] | 491 | root_cap = pciexp_find_extended_cap(root, PCIE_EXT_CAP_L1SS_ID, 0); |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 492 | if (!root_cap) |
| 493 | return; |
| 494 | |
Nico Huber | 5ffc2c8 | 2022-08-05 12:58:18 +0200 | [diff] [blame] | 495 | end_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_L1SS_ID, 0); |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 496 | if (!end_cap) { |
Nico Huber | bba9735 | 2022-08-05 13:09:25 +0200 | [diff] [blame] | 497 | if (dev->vendor != PCI_VID_INTEL) |
| 498 | return; |
| 499 | |
| 500 | end_cap = pciexp_find_ext_vendor_cap(dev, 0xcafe, 0); |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 501 | if (!end_cap) |
| 502 | return; |
| 503 | } |
| 504 | |
| 505 | pciexp_L1_substate_commit(root, dev, root_cap, end_cap); |
| 506 | } |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 507 | |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 508 | /* |
| 509 | * Determine the ASPM L0s or L1 exit latency for a link |
| 510 | * by checking both root port and endpoint and returning |
| 511 | * the highest latency value. |
| 512 | */ |
Martin Roth | 38ddbfb | 2019-10-23 21:41:00 -0600 | [diff] [blame] | 513 | static int pciexp_aspm_latency(struct device *root, unsigned int root_cap, |
| 514 | struct device *endp, unsigned int endp_cap, |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 515 | enum aspm_type type) |
| 516 | { |
| 517 | int root_lat = 0, endp_lat = 0; |
| 518 | u32 root_lnkcap, endp_lnkcap; |
| 519 | |
| 520 | root_lnkcap = pci_read_config32(root, root_cap + PCI_EXP_LNKCAP); |
| 521 | endp_lnkcap = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP); |
| 522 | |
| 523 | /* Make sure the link supports this ASPM type by checking |
| 524 | * capability bits 11:10 with aspm_type offset by 1 */ |
| 525 | if (!(root_lnkcap & (1 << (type + 9))) || |
| 526 | !(endp_lnkcap & (1 << (type + 9)))) |
| 527 | return -1; |
| 528 | |
| 529 | /* Find the one with higher latency */ |
| 530 | switch (type) { |
| 531 | case PCIE_ASPM_L0S: |
| 532 | root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12; |
| 533 | endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12; |
| 534 | break; |
| 535 | case PCIE_ASPM_L1: |
| 536 | root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15; |
| 537 | endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15; |
| 538 | break; |
| 539 | default: |
| 540 | return -1; |
| 541 | } |
| 542 | |
| 543 | return (endp_lat > root_lat) ? endp_lat : root_lat; |
| 544 | } |
| 545 | |
| 546 | /* |
| 547 | * Enable ASPM on PCIe root port and endpoint. |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 548 | */ |
Martin Roth | 38ddbfb | 2019-10-23 21:41:00 -0600 | [diff] [blame] | 549 | static void pciexp_enable_aspm(struct device *root, unsigned int root_cap, |
| 550 | struct device *endp, unsigned int endp_cap) |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 551 | { |
| 552 | const char *aspm_type_str[] = { "None", "L0s", "L1", "L0s and L1" }; |
| 553 | enum aspm_type apmc = PCIE_ASPM_NONE; |
| 554 | int exit_latency, ok_latency; |
| 555 | u16 lnkctl; |
| 556 | u32 devcap; |
| 557 | |
Nico Huber | 570b183 | 2017-08-30 13:38:50 +0200 | [diff] [blame] | 558 | if (endp->disable_pcie_aspm) |
| 559 | return; |
| 560 | |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 561 | /* Get endpoint device capabilities for acceptable limits */ |
| 562 | devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP); |
| 563 | |
| 564 | /* Enable L0s if it is within endpoint acceptable limit */ |
| 565 | ok_latency = (devcap & PCI_EXP_DEVCAP_L0S) >> 6; |
| 566 | exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap, |
| 567 | PCIE_ASPM_L0S); |
| 568 | if (exit_latency >= 0 && exit_latency <= ok_latency) |
| 569 | apmc |= PCIE_ASPM_L0S; |
| 570 | |
| 571 | /* Enable L1 if it is within endpoint acceptable limit */ |
| 572 | ok_latency = (devcap & PCI_EXP_DEVCAP_L1) >> 9; |
| 573 | exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap, |
| 574 | PCIE_ASPM_L1); |
| 575 | if (exit_latency >= 0 && exit_latency <= ok_latency) |
| 576 | apmc |= PCIE_ASPM_L1; |
| 577 | |
| 578 | if (apmc != PCIE_ASPM_NONE) { |
| 579 | /* Set APMC in root port first */ |
| 580 | lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL); |
| 581 | lnkctl |= apmc; |
| 582 | pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl); |
| 583 | |
| 584 | /* Set APMC in endpoint device next */ |
| 585 | lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL); |
| 586 | lnkctl |= apmc; |
| 587 | pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl); |
| 588 | } |
| 589 | |
| 590 | printk(BIOS_INFO, "ASPM: Enabled %s\n", aspm_type_str[apmc]); |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 591 | } |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 592 | |
Michał Żygowski | 9f0443c | 2024-01-31 13:09:37 +0100 | [diff] [blame] | 593 | static void pciexp_dev_set_max_payload_size(struct device *dev, unsigned int max_payload) |
Kyösti Mälkki | 94ce79d | 2019-12-16 17:21:13 +0200 | [diff] [blame] | 594 | { |
Michał Żygowski | 9f0443c | 2024-01-31 13:09:37 +0100 | [diff] [blame] | 595 | u16 devctl; |
| 596 | unsigned int pcie_cap = pci_find_capability(dev, PCI_CAP_ID_PCIE); |
Kyösti Mälkki | 94ce79d | 2019-12-16 17:21:13 +0200 | [diff] [blame] | 597 | |
Michał Żygowski | 9f0443c | 2024-01-31 13:09:37 +0100 | [diff] [blame] | 598 | if (!pcie_cap) |
| 599 | return; |
Kyösti Mälkki | 94ce79d | 2019-12-16 17:21:13 +0200 | [diff] [blame] | 600 | |
Michał Żygowski | 9f0443c | 2024-01-31 13:09:37 +0100 | [diff] [blame] | 601 | devctl = pci_read_config16(dev, pcie_cap + PCI_EXP_DEVCTL); |
| 602 | devctl &= ~PCI_EXP_DEVCTL_PAYLOAD; |
| 603 | /* |
| 604 | * Should never overflow to higher bits, due to how max_payload is |
| 605 | * guarded in this file. |
| 606 | */ |
| 607 | devctl |= max_payload << 5; |
| 608 | pci_write_config16(dev, pcie_cap + PCI_EXP_DEVCTL, devctl); |
| 609 | } |
Kyösti Mälkki | 94ce79d | 2019-12-16 17:21:13 +0200 | [diff] [blame] | 610 | |
Michał Żygowski | 9f0443c | 2024-01-31 13:09:37 +0100 | [diff] [blame] | 611 | static unsigned int pciexp_dev_get_current_max_payload_size(struct device *dev) |
| 612 | { |
| 613 | u16 devctl; |
| 614 | unsigned int pcie_cap = pci_find_capability(dev, PCI_CAP_ID_PCIE); |
| 615 | |
| 616 | if (!pcie_cap) |
| 617 | return 0; |
| 618 | |
| 619 | devctl = pci_read_config16(dev, pcie_cap + PCI_EXP_DEVCTL); |
| 620 | devctl &= PCI_EXP_DEVCTL_PAYLOAD; |
| 621 | return (devctl >> 5); |
| 622 | } |
| 623 | |
| 624 | static unsigned int pciexp_dev_get_max_payload_size_cap(struct device *dev) |
| 625 | { |
| 626 | u16 devcap; |
| 627 | unsigned int pcie_cap = pci_find_capability(dev, PCI_CAP_ID_PCIE); |
| 628 | |
| 629 | if (!pcie_cap) |
| 630 | return 0; |
| 631 | |
| 632 | devcap = pci_read_config16(dev, pcie_cap + PCI_EXP_DEVCAP); |
| 633 | return (devcap & PCI_EXP_DEVCAP_PAYLOAD); |
| 634 | } |
| 635 | |
| 636 | /* |
| 637 | * Set max payload size of a parent based on max payload size capability of the child. |
| 638 | */ |
| 639 | static void pciexp_configure_max_payload_size(struct device *parent, struct device *child) |
| 640 | { |
| 641 | unsigned int child_max_payload, parent_max_payload, max_payload; |
| 642 | |
| 643 | /* Get max payload size supported by child */ |
| 644 | child_max_payload = pciexp_dev_get_current_max_payload_size(child); |
| 645 | /* Get max payload size configured by parent */ |
| 646 | parent_max_payload = pciexp_dev_get_current_max_payload_size(parent); |
| 647 | /* Set max payload to smaller of the reported device capability or parent config. */ |
| 648 | max_payload = MIN(child_max_payload, parent_max_payload); |
| 649 | |
Kyösti Mälkki | 94ce79d | 2019-12-16 17:21:13 +0200 | [diff] [blame] | 650 | if (max_payload > 5) { |
| 651 | /* Values 6 and 7 are reserved in PCIe 3.0 specs. */ |
| 652 | printk(BIOS_ERR, "PCIe: Max_Payload_Size field restricted from %d to 5\n", |
| 653 | max_payload); |
| 654 | max_payload = 5; |
| 655 | } |
| 656 | |
Michał Żygowski | 9f0443c | 2024-01-31 13:09:37 +0100 | [diff] [blame] | 657 | if (max_payload != parent_max_payload) { |
| 658 | pciexp_dev_set_max_payload_size(parent, max_payload); |
| 659 | printk(BIOS_INFO, "%s: Max_Payload_Size adjusted to %d\n", dev_path(parent), |
| 660 | (1 << (max_payload + 7))); |
| 661 | } |
Kyösti Mälkki | 94ce79d | 2019-12-16 17:21:13 +0200 | [diff] [blame] | 662 | } |
| 663 | |
Wilson Chou | c8a8695 | 2022-08-29 02:08:24 +0000 | [diff] [blame] | 664 | /* |
| 665 | * Clear Lane Error State at the end of PCIe link training. |
| 666 | * Lane error status is cleared if PCIEXP_LANE_ERR_STAT_CLEAR is set. |
| 667 | * Lane error is normal during link training, so we need to clear it. |
| 668 | * At this moment, link has been used, but for a very short duration. |
| 669 | */ |
| 670 | static void clear_lane_error_status(struct device *dev) |
| 671 | { |
| 672 | u32 reg32; |
| 673 | u16 pos; |
| 674 | |
| 675 | pos = pciexp_find_extended_cap(dev, PCI_EXP_SEC_CAP_ID, 0); |
| 676 | if (pos == 0) |
| 677 | return; |
| 678 | |
| 679 | reg32 = pci_read_config32(dev, pos + PCI_EXP_SEC_LANE_ERR_STATUS); |
| 680 | if (reg32 == 0) |
| 681 | return; |
| 682 | |
| 683 | printk(BIOS_DEBUG, "%s: Clear Lane Error Status.\n", dev_path(dev)); |
| 684 | printk(BIOS_DEBUG, "LaneErrStat:0x%x\n", reg32); |
| 685 | pci_write_config32(dev, pos + PCI_EXP_SEC_LANE_ERR_STATUS, reg32); |
| 686 | } |
| 687 | |
Elyes HAOUAS | b1fa287 | 2018-05-02 21:11:38 +0200 | [diff] [blame] | 688 | static void pciexp_tune_dev(struct device *dev) |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 689 | { |
Arthur Heymans | 7fcd4d5 | 2023-08-24 15:12:19 +0200 | [diff] [blame] | 690 | struct device *root = dev->upstream->dev; |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 691 | unsigned int root_cap, cap; |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 692 | |
| 693 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIE); |
Uwe Hermann | d453dd0 | 2010-10-18 00:00:57 +0000 | [diff] [blame] | 694 | if (!cap) |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 695 | return; |
Uwe Hermann | d453dd0 | 2010-10-18 00:00:57 +0000 | [diff] [blame] | 696 | |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 697 | root_cap = pci_find_capability(root, PCI_CAP_ID_PCIE); |
| 698 | if (!root_cap) |
| 699 | return; |
Stefan Reinauer | f6eb88a | 2010-01-17 13:54:08 +0000 | [diff] [blame] | 700 | |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 701 | /* Check for and enable Common Clock */ |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 702 | if (CONFIG(PCIEXP_COMMON_CLOCK)) |
Kyösti Mälkki | 91bfa8e | 2016-11-20 20:39:56 +0200 | [diff] [blame] | 703 | pciexp_enable_common_clock(root, root_cap, dev, cap); |
Uwe Hermann | e487047 | 2010-11-04 23:23:47 +0000 | [diff] [blame] | 704 | |
Kane Chen | 18cb134 | 2014-10-01 11:13:54 +0800 | [diff] [blame] | 705 | /* Check if per port CLK req is supported by endpoint*/ |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 706 | if (CONFIG(PCIEXP_CLK_PM)) |
Kyösti Mälkki | 91bfa8e | 2016-11-20 20:39:56 +0200 | [diff] [blame] | 707 | pciexp_enable_clock_power_pm(dev, cap); |
Kane Chen | 18cb134 | 2014-10-01 11:13:54 +0800 | [diff] [blame] | 708 | |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 709 | /* Enable L1 Sub-State when both root port and endpoint support */ |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 710 | if (CONFIG(PCIEXP_L1_SUB_STATE)) |
Kyösti Mälkki | 91bfa8e | 2016-11-20 20:39:56 +0200 | [diff] [blame] | 711 | pciexp_config_L1_sub_state(root, dev); |
Kenji Chen | 31c6e63 | 2014-10-04 01:14:44 +0800 | [diff] [blame] | 712 | |
Duncan Laurie | 90dcdd4 | 2011-10-25 14:15:11 -0700 | [diff] [blame] | 713 | /* Check for and enable ASPM */ |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 714 | if (CONFIG(PCIEXP_ASPM)) |
Kyösti Mälkki | 91bfa8e | 2016-11-20 20:39:56 +0200 | [diff] [blame] | 715 | pciexp_enable_aspm(root, root_cap, dev, cap); |
Kyösti Mälkki | 94ce79d | 2019-12-16 17:21:13 +0200 | [diff] [blame] | 716 | |
Wilson Chou | c8a8695 | 2022-08-29 02:08:24 +0000 | [diff] [blame] | 717 | /* Clear PCIe Lane Error Status */ |
| 718 | if (CONFIG(PCIEXP_LANE_ERR_STAT_CLEAR)) |
| 719 | clear_lane_error_status(root); |
| 720 | |
Michał Żygowski | 9f0443c | 2024-01-31 13:09:37 +0100 | [diff] [blame] | 721 | /* Set the Max Payload Size to the maximum supported capability for this device */ |
| 722 | if (pcie_is_endpoint(dev)) |
| 723 | pciexp_dev_set_max_payload_size(dev, pciexp_dev_get_max_payload_size_cap(dev)); |
| 724 | |
| 725 | /* Limit the parent's Max Payload Size if needed */ |
| 726 | pciexp_configure_max_payload_size(root, dev); |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 727 | |
| 728 | pciexp_configure_ltr(root, root_cap, dev, cap); |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 729 | } |
| 730 | |
Michał Żygowski | 9f0443c | 2024-01-31 13:09:37 +0100 | [diff] [blame] | 731 | static void pciexp_sync_max_payload_size(struct bus *bus, unsigned int max_payload) |
| 732 | { |
| 733 | struct device *child; |
| 734 | |
| 735 | /* Set the max payload for children on the bus and their children, etc. */ |
| 736 | for (child = bus->children; child; child = child->sibling) { |
| 737 | if (!is_pci(child)) |
| 738 | continue; |
| 739 | |
| 740 | pciexp_dev_set_max_payload_size(child, max_payload); |
| 741 | |
| 742 | if (child->downstream) |
| 743 | pciexp_sync_max_payload_size(child->downstream, max_payload); |
| 744 | } |
| 745 | } |
| 746 | |
Kyösti Mälkki | de271a8 | 2015-03-18 13:09:47 +0200 | [diff] [blame] | 747 | void pciexp_scan_bus(struct bus *bus, unsigned int min_devfn, |
| 748 | unsigned int max_devfn) |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 749 | { |
Elyes HAOUAS | b1fa287 | 2018-05-02 21:11:38 +0200 | [diff] [blame] | 750 | struct device *child; |
Michał Żygowski | 9f0443c | 2024-01-31 13:09:37 +0100 | [diff] [blame] | 751 | unsigned int max_payload; |
Nico Huber | 968ef75 | 2021-03-07 01:39:18 +0100 | [diff] [blame] | 752 | |
| 753 | pciexp_enable_ltr(bus->dev); |
| 754 | |
Michał Żygowski | 9f0443c | 2024-01-31 13:09:37 +0100 | [diff] [blame] | 755 | /* |
| 756 | * Set the Max Payload Size to the maximum supported capability for this bridge. |
| 757 | * This value will be used in pciexp_tune_dev to limit the Max Payload size if needed. |
| 758 | */ |
| 759 | max_payload = pciexp_dev_get_max_payload_size_cap(bus->dev); |
| 760 | pciexp_dev_set_max_payload_size(bus->dev, max_payload); |
| 761 | |
Kyösti Mälkki | de271a8 | 2015-03-18 13:09:47 +0200 | [diff] [blame] | 762 | pci_scan_bus(bus, min_devfn, max_devfn); |
Uwe Hermann | d453dd0 | 2010-10-18 00:00:57 +0000 | [diff] [blame] | 763 | |
| 764 | for (child = bus->children; child; child = child->sibling) { |
Duncan Laurie | bf69622 | 2020-10-18 15:10:00 -0700 | [diff] [blame] | 765 | if (child->path.type != DEVICE_PATH_PCI) |
| 766 | continue; |
Uwe Hermann | d453dd0 | 2010-10-18 00:00:57 +0000 | [diff] [blame] | 767 | if ((child->path.pci.devfn < min_devfn) || |
| 768 | (child->path.pci.devfn > max_devfn)) { |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 769 | continue; |
| 770 | } |
| 771 | pciexp_tune_dev(child); |
| 772 | } |
Michał Żygowski | 9f0443c | 2024-01-31 13:09:37 +0100 | [diff] [blame] | 773 | |
| 774 | /* |
| 775 | * Now the root port's Max Payload Size should be set to the highest |
| 776 | * possible value supported by all devices under a given root port. |
| 777 | * Propagate that value down from root port to all devices, so the Max |
| 778 | * Payload Size is equal on all devices, as some devices may have |
| 779 | * different capabilities and the programmed value depends on the |
| 780 | * order of device population the in the subtree. |
| 781 | */ |
| 782 | if (pcie_is_root_port(bus->dev)) { |
| 783 | max_payload = pciexp_dev_get_current_max_payload_size(bus->dev); |
| 784 | |
| 785 | printk(BIOS_INFO, "%s: Setting Max_Payload_Size to %d for devices under this" |
| 786 | " root port\n", dev_path(bus->dev), 1 << (max_payload + 7)); |
| 787 | |
| 788 | pciexp_sync_max_payload_size(bus, max_payload); |
| 789 | } |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 790 | } |
| 791 | |
Elyes HAOUAS | b1fa287 | 2018-05-02 21:11:38 +0200 | [diff] [blame] | 792 | void pciexp_scan_bridge(struct device *dev) |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 793 | { |
Kyösti Mälkki | 580e722 | 2015-03-19 21:04:23 +0200 | [diff] [blame] | 794 | do_pci_scan_bridge(dev, pciexp_scan_bus); |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 795 | } |
| 796 | |
| 797 | /** Default device operations for PCI Express bridges */ |
| 798 | static struct pci_operations pciexp_bus_ops_pci = { |
| 799 | .set_subsystem = 0, |
| 800 | }; |
| 801 | |
| 802 | struct device_operations default_pciexp_ops_bus = { |
| 803 | .read_resources = pci_bus_read_resources, |
| 804 | .set_resources = pci_dev_set_resources, |
| 805 | .enable_resources = pci_bus_enable_resources, |
Uwe Hermann | d453dd0 | 2010-10-18 00:00:57 +0000 | [diff] [blame] | 806 | .scan_bus = pciexp_scan_bridge, |
Yinghai Lu | 13f1c2a | 2005-07-08 02:49:49 +0000 | [diff] [blame] | 807 | .reset_bus = pci_bus_reset, |
| 808 | .ops_pci = &pciexp_bus_ops_pci, |
| 809 | }; |
Jeremy Soller | cf2ac54 | 2019-10-09 21:40:36 -0600 | [diff] [blame] | 810 | |
Jeremy Soller | cf2ac54 | 2019-10-09 21:40:36 -0600 | [diff] [blame] | 811 | static void pciexp_hotplug_dummy_read_resources(struct device *dev) |
| 812 | { |
| 813 | struct resource *resource; |
| 814 | |
Furquan Shaikh | 32f385e | 2020-05-15 23:35:00 -0700 | [diff] [blame] | 815 | /* Add extra memory space */ |
Jeremy Soller | cf2ac54 | 2019-10-09 21:40:36 -0600 | [diff] [blame] | 816 | resource = new_resource(dev, 0x10); |
| 817 | resource->size = CONFIG_PCIEXP_HOTPLUG_MEM; |
| 818 | resource->align = 12; |
| 819 | resource->gran = 12; |
| 820 | resource->limit = 0xffffffff; |
| 821 | resource->flags |= IORESOURCE_MEM; |
| 822 | |
Furquan Shaikh | 32f385e | 2020-05-15 23:35:00 -0700 | [diff] [blame] | 823 | /* Add extra prefetchable memory space */ |
Jeremy Soller | cf2ac54 | 2019-10-09 21:40:36 -0600 | [diff] [blame] | 824 | resource = new_resource(dev, 0x14); |
| 825 | resource->size = CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM; |
| 826 | resource->align = 12; |
| 827 | resource->gran = 12; |
| 828 | resource->limit = 0xffffffffffffffff; |
| 829 | resource->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; |
| 830 | |
Furquan Shaikh | 32f385e | 2020-05-15 23:35:00 -0700 | [diff] [blame] | 831 | /* Set resource flag requesting allocation above 4G boundary. */ |
| 832 | if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G)) |
| 833 | resource->flags |= IORESOURCE_ABOVE_4G; |
| 834 | |
| 835 | /* Add extra I/O space */ |
Jeremy Soller | cf2ac54 | 2019-10-09 21:40:36 -0600 | [diff] [blame] | 836 | resource = new_resource(dev, 0x18); |
| 837 | resource->size = CONFIG_PCIEXP_HOTPLUG_IO; |
| 838 | resource->align = 12; |
| 839 | resource->gran = 12; |
| 840 | resource->limit = 0xffff; |
| 841 | resource->flags |= IORESOURCE_IO; |
| 842 | } |
| 843 | |
| 844 | static struct device_operations pciexp_hotplug_dummy_ops = { |
| 845 | .read_resources = pciexp_hotplug_dummy_read_resources, |
John Su | 3ecc777 | 2022-03-25 10:37:52 +0800 | [diff] [blame] | 846 | .set_resources = noop_set_resources, |
Jeremy Soller | cf2ac54 | 2019-10-09 21:40:36 -0600 | [diff] [blame] | 847 | }; |
| 848 | |
| 849 | void pciexp_hotplug_scan_bridge(struct device *dev) |
| 850 | { |
Nico Huber | 577c6b9 | 2022-08-15 00:08:58 +0200 | [diff] [blame] | 851 | dev->hotplug_port = 1; |
Jeremy Soller | cf2ac54 | 2019-10-09 21:40:36 -0600 | [diff] [blame] | 852 | dev->hotplug_buses = CONFIG_PCIEXP_HOTPLUG_BUSES; |
| 853 | |
| 854 | /* Normal PCIe Scan */ |
| 855 | pciexp_scan_bridge(dev); |
| 856 | |
| 857 | /* Add dummy slot to preserve resources, must happen after bus scan */ |
| 858 | struct device *dummy; |
| 859 | struct device_path dummy_path = { .type = DEVICE_PATH_NONE }; |
Arthur Heymans | 7fcd4d5 | 2023-08-24 15:12:19 +0200 | [diff] [blame] | 860 | dummy = alloc_dev(dev->downstream, &dummy_path); |
Jeremy Soller | cf2ac54 | 2019-10-09 21:40:36 -0600 | [diff] [blame] | 861 | dummy->ops = &pciexp_hotplug_dummy_ops; |
| 862 | } |
| 863 | |
| 864 | struct device_operations default_pciexp_hotplug_ops_bus = { |
| 865 | .read_resources = pci_bus_read_resources, |
| 866 | .set_resources = pci_dev_set_resources, |
| 867 | .enable_resources = pci_bus_enable_resources, |
| 868 | .scan_bus = pciexp_hotplug_scan_bridge, |
| 869 | .reset_bus = pci_bus_reset, |
| 870 | .ops_pci = &pciexp_bus_ops_pci, |
| 871 | }; |