blob: db351efd49f257304a5b2bab6195347b021bdd7e [file] [log] [blame]
Angel Ponsc74dae92020-04-02 23:48:16 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00002
3#include <console/console.h>
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +02004#include <commonlib/helpers.h>
Duncan Laurie90dcdd42011-10-25 14:15:11 -07005#include <delay.h>
Yinghai Lu13f1c2a2005-07-08 02:49:49 +00006#include <device/device.h>
7#include <device/pci.h>
Nico Huberbba97352022-08-05 13:09:25 +02008#include <device/pci_ids.h>
Patrick Rudolphe56189c2018-04-18 10:11:59 +02009#include <device/pci_ops.h>
Yinghai Lu13f1c2a2005-07-08 02:49:49 +000010#include <device/pciexp.h>
11
Nico Huber077dc2e2022-08-05 14:47:35 +020012static unsigned int ext_cap_id(unsigned int cap)
13{
14 return cap & 0xffff;
15}
16
17static unsigned int ext_cap_next_offset(unsigned int cap)
18{
Nico Huber5f7cfb32022-08-05 14:50:06 +020019 return cap >> 20 & 0xffc;
Nico Huber077dc2e2022-08-05 14:47:35 +020020}
21
22static unsigned int find_ext_cap_offset(const struct device *dev, unsigned int cap_id,
23 unsigned int offset)
Kenji Chen31c6e632014-10-04 01:14:44 +080024{
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060025 unsigned int this_cap_offset = offset;
Nico Huber077dc2e2022-08-05 14:47:35 +020026
Nico Huber5f7cfb32022-08-05 14:50:06 +020027 while (this_cap_offset >= PCIE_EXT_CAP_OFFSET) {
Nico Huber077dc2e2022-08-05 14:47:35 +020028 const unsigned int this_cap = pci_read_config32(dev, this_cap_offset);
29
Bill XIE385e4322022-08-04 21:52:05 +080030 /* Bail out when this request is unsupported */
31 if (this_cap == 0xffffffff)
32 break;
Nico Huber077dc2e2022-08-05 14:47:35 +020033
34 if (ext_cap_id(this_cap) == cap_id)
Kenji Chen31c6e632014-10-04 01:14:44 +080035 return this_cap_offset;
Nico Huber077dc2e2022-08-05 14:47:35 +020036
37 this_cap_offset = ext_cap_next_offset(this_cap);
Nico Huber4b864e52022-08-05 12:44:11 +020038 }
Kenji Chen31c6e632014-10-04 01:14:44 +080039
40 return 0;
41}
Kenji Chen31c6e632014-10-04 01:14:44 +080042
Nico Huber5ffc2c82022-08-05 12:58:18 +020043/*
44 * Search for an extended capability with the ID `cap`.
45 *
46 * Returns the offset of the first matching extended
47 * capability if found, or 0 otherwise.
48 *
49 * A new search is started with `offset == 0`.
50 * To continue a search, the prior return value
51 * should be passed as `offset`.
52 */
53unsigned int pciexp_find_extended_cap(const struct device *dev, unsigned int cap,
54 unsigned int offset)
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060055{
Nico Huber5ffc2c82022-08-05 12:58:18 +020056 unsigned int next_cap_offset;
57
58 if (offset)
Nico Huber077dc2e2022-08-05 14:47:35 +020059 next_cap_offset = ext_cap_next_offset(pci_read_config32(dev, offset));
Nico Huber5ffc2c82022-08-05 12:58:18 +020060 else
61 next_cap_offset = PCIE_EXT_CAP_OFFSET;
62
Nico Huber077dc2e2022-08-05 14:47:35 +020063 return find_ext_cap_offset(dev, cap, next_cap_offset);
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060064}
65
Nico Huber9099fea2022-08-05 13:02:52 +020066/*
67 * Search for a vendor-specific extended capability,
68 * with the vendor-specific ID `cap`.
69 *
70 * Returns the offset of the vendor-specific header,
71 * i.e. the offset of the extended capability + 4,
72 * or 0 if none is found.
73 *
74 * A new search is started with `offset == 0`.
75 * To continue a search, the prior return value
76 * should be passed as `offset`.
77 */
78unsigned int pciexp_find_ext_vendor_cap(const struct device *dev, unsigned int cap,
79 unsigned int offset)
80{
81 /* Reconstruct capability offset from vendor-specific header offset. */
82 if (offset >= 4)
83 offset -= 4;
84
85 for (;;) {
86 offset = pciexp_find_extended_cap(dev, PCI_EXT_CAP_ID_VNDR, offset);
87 if (!offset)
88 return 0;
89
90 const unsigned int vndr_cap = pci_read_config32(dev, offset + 4);
91 if ((vndr_cap & 0xffff) == cap)
92 return offset + 4;
93 }
94}
Tim Wawrzynczak3d121ae12021-09-16 20:18:16 -060095
Jonathan Zhang1864f122022-10-10 16:27:48 -070096/**
97 * Find a PCIe device with a given serial number, and a given VID if applicable
98 *
99 * @param serial The serial number of the device.
100 * @param vid Vendor ID of the device, may be 0 if not applicable.
101 * @param from Pointer to the device structure, used as a starting point in
102 * the linked list of all_devices, which can be 0 to start at the
103 * head of the list (i.e. all_devices).
104 * @return Pointer to the device struct.
105 */
106struct device *pcie_find_dsn(const uint64_t serial, const uint16_t vid,
107 struct device *from)
108{
109 union dsn {
110 struct {
111 uint32_t dsn_low;
112 uint32_t dsn_high;
113 };
114 uint64_t dsn;
115 } dsn;
116 unsigned int cap;
117 uint16_t vendor_id;
118
119 if (!from)
120 from = all_devices;
121 else
122 from = from->next;
123
124 while (from) {
125 if (from->path.type == DEVICE_PATH_PCI) {
126 cap = pciexp_find_extended_cap(from, PCI_EXT_CAP_ID_DSN, 0);
127 /*
128 * For PCIe device, find extended capability for serial number.
129 * The capability header is 4 bytes, followed by lower 4 bytes
130 * of serial number, then higher 4 byes of serial number.
131 */
132 if (cap != 0) {
133 dsn.dsn_low = pci_read_config32(from, cap + 4);
134 dsn.dsn_high = pci_read_config32(from, cap + 8);
135 vendor_id = pci_read_config16(from, PCI_VENDOR_ID);
136 if ((dsn.dsn == serial) && (vid == 0 || vendor_id == vid))
137 return from;
138 }
139 }
140
141 from = from->next;
142 }
143
144 return from;
145}
146
Michał Żygowski9f0443c2024-01-31 13:09:37 +0100147static bool pcie_is_root_port(struct device *dev)
148{
149 unsigned int pcie_pos, pcie_type;
150
151 pcie_pos = pci_find_capability(dev, PCI_CAP_ID_PCIE);
152 if (!pcie_pos)
153 return false;
154
155 pcie_type = pci_read_config16(dev, pcie_pos + PCI_EXP_FLAGS) & PCI_EXP_FLAGS_TYPE;
156 pcie_type >>= 4;
157
158 return (pcie_type == PCI_EXP_TYPE_ROOT_PORT);
159}
160
161static bool pcie_is_endpoint(struct device *dev)
162{
163 unsigned int pcie_pos, pcie_type;
164
165 pcie_pos = pci_find_capability(dev, PCI_CAP_ID_PCIE);
166 if (!pcie_pos)
167 return false;
168
169 pcie_type = pci_read_config16(dev, pcie_pos + PCI_EXP_FLAGS) & PCI_EXP_FLAGS_TYPE;
170 pcie_type >>= 4;
171
172 return ((pcie_type == PCI_EXP_TYPE_ENDPOINT) || (pcie_type == PCI_EXP_TYPE_LEG_END));
173}
174
175
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700176/*
177 * Re-train a PCIe link
178 */
179#define PCIE_TRAIN_RETRY 10000
Martin Roth38ddbfb2019-10-23 21:41:00 -0600180static int pciexp_retrain_link(struct device *dev, unsigned int cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700181{
Youness Alaouibb5fb642017-05-03 17:57:13 -0400182 unsigned int try;
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700183 u16 lnk;
184
Youness Alaouibb5fb642017-05-03 17:57:13 -0400185 /*
186 * Implementation note (page 633) in PCIe Specification 3.0 suggests
187 * polling the Link Training bit in the Link Status register until the
188 * value returned is 0 before setting the Retrain Link bit to 1.
189 * This is meant to avoid a race condition when using the
190 * Retrain Link mechanism.
191 */
192 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
193 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
194 if (!(lnk & PCI_EXP_LNKSTA_LT))
195 break;
196 udelay(100);
197 }
198 if (try == 0) {
199 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
200 return -1;
201 }
202
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700203 /* Start link retraining */
204 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKCTL);
205 lnk |= PCI_EXP_LNKCTL_RL;
206 pci_write_config16(dev, cap + PCI_EXP_LNKCTL, lnk);
207
208 /* Wait for training to complete */
Youness Alaouibb5fb642017-05-03 17:57:13 -0400209 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700210 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
211 if (!(lnk & PCI_EXP_LNKSTA_LT))
212 return 0;
213 udelay(100);
214 }
215
216 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
217 return -1;
218}
219
Werner Zehc83c9582023-02-27 07:08:59 +0100220static bool pciexp_is_ccc_active(struct device *root, unsigned int root_cap,
221 struct device *endp, unsigned int endp_cap)
222{
223 u16 root_ccc, endp_ccc;
224
225 root_ccc = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_CCC;
226 endp_ccc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_CCC;
227 if (root_ccc && endp_ccc) {
228 printk(BIOS_INFO, "PCIe: Common Clock Configuration already enabled\n");
229 return true;
230 }
231 return false;
232}
233
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700234/*
235 * Check the Slot Clock Configuration for root port and endpoint
236 * and enable Common Clock Configuration if possible. If CCC is
237 * enabled the link must be retrained.
238 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600239static void pciexp_enable_common_clock(struct device *root, unsigned int root_cap,
240 struct device *endp, unsigned int endp_cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700241{
242 u16 root_scc, endp_scc, lnkctl;
243
Werner Zehc83c9582023-02-27 07:08:59 +0100244 /* No need to enable common clock if it is already active. */
245 if (pciexp_is_ccc_active(root, root_cap, endp, endp_cap))
246 return;
247
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700248 /* Get Slot Clock Configuration for root port */
249 root_scc = pci_read_config16(root, root_cap + PCI_EXP_LNKSTA);
250 root_scc &= PCI_EXP_LNKSTA_SLC;
251
252 /* Get Slot Clock Configuration for endpoint */
253 endp_scc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKSTA);
254 endp_scc &= PCI_EXP_LNKSTA_SLC;
255
256 /* Enable Common Clock Configuration and retrain */
257 if (root_scc && endp_scc) {
258 printk(BIOS_INFO, "Enabling Common Clock Configuration\n");
259
260 /* Set in endpoint */
261 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
262 lnkctl |= PCI_EXP_LNKCTL_CCC;
263 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
264
265 /* Set in root port */
266 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
267 lnkctl |= PCI_EXP_LNKCTL_CCC;
268 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
269
270 /* Retrain link if CCC was enabled */
271 pciexp_retrain_link(root, root_cap);
272 }
273}
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700274
Martin Roth38ddbfb2019-10-23 21:41:00 -0600275static void pciexp_enable_clock_power_pm(struct device *endp, unsigned int endp_cap)
Kane Chen18cb1342014-10-01 11:13:54 +0800276{
Martin Roth74f18772023-09-03 21:38:29 -0600277 /* check if per port clkreq is supported in device */
Kane Chen18cb1342014-10-01 11:13:54 +0800278 u32 endp_ca;
279 u16 lnkctl;
280 endp_ca = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
281 if ((endp_ca & PCI_EXP_CLK_PM) == 0) {
Arthur Heymans330c46b2017-07-12 19:17:56 +0200282 printk(BIOS_INFO, "PCIE CLK PM is not supported by endpoint\n");
Kane Chen18cb1342014-10-01 11:13:54 +0800283 return;
284 }
285 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
286 lnkctl = lnkctl | PCI_EXP_EN_CLK_PM;
287 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
288}
Kane Chen18cb1342014-10-01 11:13:54 +0800289
Nico Huber968ef752021-03-07 01:39:18 +0100290static bool _pciexp_ltr_supported(struct device *dev, unsigned int cap)
Kenji Chen31c6e632014-10-04 01:14:44 +0800291{
Nico Huber968ef752021-03-07 01:39:18 +0100292 return pci_read_config16(dev, cap + PCI_EXP_DEVCAP2) & PCI_EXP_DEVCAP2_LTR;
Kenji Chen31c6e632014-10-04 01:14:44 +0800293}
294
Nico Huber968ef752021-03-07 01:39:18 +0100295static bool _pciexp_ltr_enabled(struct device *dev, unsigned int cap)
Aamir Bohra2188f572017-09-22 19:07:21 +0530296{
Nico Huber968ef752021-03-07 01:39:18 +0100297 return pci_read_config16(dev, cap + PCI_EXP_DEVCTL2) & PCI_EXP_DEV2_LTR;
Aamir Bohra2188f572017-09-22 19:07:21 +0530298}
299
Nico Huber968ef752021-03-07 01:39:18 +0100300static bool _pciexp_enable_ltr(struct device *parent, unsigned int parent_cap,
301 struct device *dev, unsigned int cap)
Kenji Chen31c6e632014-10-04 01:14:44 +0800302{
Nico Huber968ef752021-03-07 01:39:18 +0100303 if (!_pciexp_ltr_supported(dev, cap)) {
304 printk(BIOS_DEBUG, "%s: No LTR support\n", dev_path(dev));
305 return false;
Pratik Prajapati0cd0d282015-06-09 12:06:20 -0700306 }
Aamir Bohra2188f572017-09-22 19:07:21 +0530307
Nico Huber968ef752021-03-07 01:39:18 +0100308 if (_pciexp_ltr_enabled(dev, cap))
309 return true;
Aamir Bohra2188f572017-09-22 19:07:21 +0530310
Nico Huber968ef752021-03-07 01:39:18 +0100311 if (parent &&
Nico Huber49fc4e32022-08-17 21:57:46 +0200312 (!_pciexp_ltr_supported(parent, parent_cap) ||
Nico Huber968ef752021-03-07 01:39:18 +0100313 !_pciexp_ltr_enabled(parent, parent_cap)))
314 return false;
Aamir Bohra2188f572017-09-22 19:07:21 +0530315
Nico Huber968ef752021-03-07 01:39:18 +0100316 pci_or_config16(dev, cap + PCI_EXP_DEVCTL2, PCI_EXP_DEV2_LTR);
317 printk(BIOS_INFO, "%s: Enabled LTR\n", dev_path(dev));
318 return true;
Aamir Bohra2188f572017-09-22 19:07:21 +0530319}
320
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200321static void pciexp_enable_ltr(struct device *dev)
Aamir Bohra2188f572017-09-22 19:07:21 +0530322{
Nico Huber968ef752021-03-07 01:39:18 +0100323 const unsigned int cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
324 if (!cap)
325 return;
Aamir Bohra2188f572017-09-22 19:07:21 +0530326
Nico Huber968ef752021-03-07 01:39:18 +0100327 /*
328 * If we have get_ltr_max_latencies(), treat `dev` as the root.
329 * If not, let _pciexp_enable_ltr() query the parent's state.
330 */
331 struct device *parent = NULL;
332 unsigned int parent_cap = 0;
333 if (!dev->ops->ops_pci || !dev->ops->ops_pci->get_ltr_max_latencies) {
Arthur Heymans7fcd4d52023-08-24 15:12:19 +0200334 parent = dev->upstream->dev;
Nico Huber49fc4e32022-08-17 21:57:46 +0200335 if (parent->path.type != DEVICE_PATH_PCI)
336 return;
Bill XIEa43380e2022-08-03 00:18:14 +0800337 parent_cap = pci_find_capability(parent, PCI_CAP_ID_PCIE);
Nico Huber968ef752021-03-07 01:39:18 +0100338 if (!parent_cap)
339 return;
Aamir Bohra2188f572017-09-22 19:07:21 +0530340 }
Nico Huber968ef752021-03-07 01:39:18 +0100341
342 (void)_pciexp_enable_ltr(parent, parent_cap, dev, cap);
343}
344
Tim Wawrzynczaka62cb562021-12-08 21:16:43 -0700345bool pciexp_get_ltr_max_latencies(struct device *dev, u16 *max_snoop, u16 *max_nosnoop)
Nico Huber968ef752021-03-07 01:39:18 +0100346{
347 /* Walk the hierarchy up to find get_ltr_max_latencies(). */
348 do {
349 if (dev->ops->ops_pci && dev->ops->ops_pci->get_ltr_max_latencies)
350 break;
Arthur Heymans7fcd4d52023-08-24 15:12:19 +0200351 if (dev->upstream->dev == dev || dev->upstream->dev->path.type != DEVICE_PATH_PCI)
Nico Huber968ef752021-03-07 01:39:18 +0100352 return false;
Arthur Heymans7fcd4d52023-08-24 15:12:19 +0200353 dev = dev->upstream->dev;
Nico Huber968ef752021-03-07 01:39:18 +0100354 } while (true);
355
356 dev->ops->ops_pci->get_ltr_max_latencies(max_snoop, max_nosnoop);
357 return true;
358}
359
360static void pciexp_configure_ltr(struct device *parent, unsigned int parent_cap,
361 struct device *dev, unsigned int cap)
362{
363 if (!_pciexp_enable_ltr(parent, parent_cap, dev, cap))
364 return;
365
Nico Huber5ffc2c82022-08-05 12:58:18 +0200366 const unsigned int ltr_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_LTR_ID, 0);
Nico Huber968ef752021-03-07 01:39:18 +0100367 if (!ltr_cap)
368 return;
369
370 u16 max_snoop, max_nosnoop;
371 if (!pciexp_get_ltr_max_latencies(dev, &max_snoop, &max_nosnoop))
372 return;
373
374 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_SNOOP, max_snoop);
375 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_NOSNOOP, max_nosnoop);
376 printk(BIOS_INFO, "%s: Programmed LTR max latencies\n", dev_path(dev));
Kenji Chen31c6e632014-10-04 01:14:44 +0800377}
378
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200379static unsigned char pciexp_L1_substate_cal(struct device *dev, unsigned int endp_cap,
Kenji Chen31c6e632014-10-04 01:14:44 +0800380 unsigned int *data)
381{
382 unsigned char mult[4] = {2, 10, 100, 0};
383
384 unsigned int L1SubStateSupport = *data & 0xf;
385 unsigned int comm_mode_rst_time = (*data >> 8) & 0xff;
386 unsigned int power_on_scale = (*data >> 16) & 0x3;
387 unsigned int power_on_value = (*data >> 19) & 0x1f;
388
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200389 unsigned int endp_data = pci_read_config32(dev, endp_cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800390 unsigned int endp_L1SubStateSupport = endp_data & 0xf;
391 unsigned int endp_comm_mode_restore_time = (endp_data >> 8) & 0xff;
392 unsigned int endp_power_on_scale = (endp_data >> 16) & 0x3;
393 unsigned int endp_power_on_value = (endp_data >> 19) & 0x1f;
394
395 L1SubStateSupport &= endp_L1SubStateSupport;
396
397 if (L1SubStateSupport == 0)
398 return 0;
399
400 if (power_on_value * mult[power_on_scale] <
401 endp_power_on_value * mult[endp_power_on_scale]) {
402 power_on_value = endp_power_on_value;
403 power_on_scale = endp_power_on_scale;
404 }
405 if (comm_mode_rst_time < endp_comm_mode_restore_time)
406 comm_mode_rst_time = endp_comm_mode_restore_time;
407
408 *data = (comm_mode_rst_time << 8) | (power_on_scale << 16)
409 | (power_on_value << 19) | L1SubStateSupport;
410
411 return 1;
412}
413
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200414static void pciexp_L1_substate_commit(struct device *root, struct device *dev,
Kenji Chen31c6e632014-10-04 01:14:44 +0800415 unsigned int root_cap, unsigned int end_cap)
416{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200417 struct device *dev_t;
Kenji Chen31c6e632014-10-04 01:14:44 +0800418 unsigned char L1_ss_ok;
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200419 unsigned int rp_L1_support = pci_read_config32(root, root_cap + 4);
Kenji Chen31c6e632014-10-04 01:14:44 +0800420 unsigned int L1SubStateSupport;
421 unsigned int comm_mode_rst_time;
422 unsigned int power_on_scale;
423 unsigned int endp_power_on_value;
424
425 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
426 /*
427 * rp_L1_support is init'd above from root port.
428 * it needs coordination with endpoints to reach in common.
429 * if certain endpoint doesn't support L1 Sub-State, abort
430 * this feature enabling.
431 */
432 L1_ss_ok = pciexp_L1_substate_cal(dev_t, end_cap,
433 &rp_L1_support);
434 if (!L1_ss_ok)
435 return;
436 }
437
438 L1SubStateSupport = rp_L1_support & 0xf;
439 comm_mode_rst_time = (rp_L1_support >> 8) & 0xff;
440 power_on_scale = (rp_L1_support >> 16) & 0x3;
441 endp_power_on_value = (rp_L1_support >> 19) & 0x1f;
442
443 printk(BIOS_INFO, "L1 Sub-State supported from root port %d\n",
444 root->path.pci.devfn >> 3);
445 printk(BIOS_INFO, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport);
446 printk(BIOS_INFO, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time);
447 printk(BIOS_INFO, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
448 endp_power_on_value, power_on_scale);
449
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300450 pci_update_config32(root, root_cap + 0x08, ~0xff00,
Kenji Chen31c6e632014-10-04 01:14:44 +0800451 (comm_mode_rst_time << 8));
452
Elyes HAOUASa342f392018-10-17 10:56:26 +0200453 pci_update_config32(root, root_cap + 0x0c, 0xffffff04,
Kenji Chen31c6e632014-10-04 01:14:44 +0800454 (endp_power_on_value << 3) | (power_on_scale));
455
Patrick Georgi9adcbfe2017-12-05 16:36:30 -0500456 /* TODO: 0xa0, 2 are values that work on some chipsets but really
457 * should be determined dynamically by looking at downstream devices.
458 */
459 pci_update_config32(root, root_cap + 0x08,
460 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
461 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
462 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
463 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
Kenji Chen31c6e632014-10-04 01:14:44 +0800464
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300465 pci_update_config32(root, root_cap + 0x08, ~0x1f,
Kenji Chen31c6e632014-10-04 01:14:44 +0800466 L1SubStateSupport);
467
468 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
Elyes HAOUASa342f392018-10-17 10:56:26 +0200469 pci_update_config32(dev_t, end_cap + 0x0c, 0xffffff04,
Kenji Chen31c6e632014-10-04 01:14:44 +0800470 (endp_power_on_value << 3) | (power_on_scale));
471
Patrick Georgi9adcbfe2017-12-05 16:36:30 -0500472 pci_update_config32(dev_t, end_cap + 0x08,
473 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
474 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
475 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
476 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
Kenji Chen31c6e632014-10-04 01:14:44 +0800477
Kyösti Mälkki48c389e2013-07-26 08:53:59 +0300478 pci_update_config32(dev_t, end_cap + 0x08, ~0x1f,
Kenji Chen31c6e632014-10-04 01:14:44 +0800479 L1SubStateSupport);
Kenji Chen31c6e632014-10-04 01:14:44 +0800480 }
481}
482
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200483static void pciexp_config_L1_sub_state(struct device *root, struct device *dev)
Kenji Chen31c6e632014-10-04 01:14:44 +0800484{
485 unsigned int root_cap, end_cap;
486
487 /* Do it for function 0 only */
488 if (dev->path.pci.devfn & 0x7)
489 return;
490
Nico Huber5ffc2c82022-08-05 12:58:18 +0200491 root_cap = pciexp_find_extended_cap(root, PCIE_EXT_CAP_L1SS_ID, 0);
Kenji Chen31c6e632014-10-04 01:14:44 +0800492 if (!root_cap)
493 return;
494
Nico Huber5ffc2c82022-08-05 12:58:18 +0200495 end_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_L1SS_ID, 0);
Kenji Chen31c6e632014-10-04 01:14:44 +0800496 if (!end_cap) {
Nico Huberbba97352022-08-05 13:09:25 +0200497 if (dev->vendor != PCI_VID_INTEL)
498 return;
499
500 end_cap = pciexp_find_ext_vendor_cap(dev, 0xcafe, 0);
Kenji Chen31c6e632014-10-04 01:14:44 +0800501 if (!end_cap)
502 return;
503 }
504
505 pciexp_L1_substate_commit(root, dev, root_cap, end_cap);
506}
Kenji Chen31c6e632014-10-04 01:14:44 +0800507
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700508/*
509 * Determine the ASPM L0s or L1 exit latency for a link
510 * by checking both root port and endpoint and returning
511 * the highest latency value.
512 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600513static int pciexp_aspm_latency(struct device *root, unsigned int root_cap,
514 struct device *endp, unsigned int endp_cap,
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700515 enum aspm_type type)
516{
517 int root_lat = 0, endp_lat = 0;
518 u32 root_lnkcap, endp_lnkcap;
519
520 root_lnkcap = pci_read_config32(root, root_cap + PCI_EXP_LNKCAP);
521 endp_lnkcap = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
522
523 /* Make sure the link supports this ASPM type by checking
524 * capability bits 11:10 with aspm_type offset by 1 */
525 if (!(root_lnkcap & (1 << (type + 9))) ||
526 !(endp_lnkcap & (1 << (type + 9))))
527 return -1;
528
529 /* Find the one with higher latency */
530 switch (type) {
531 case PCIE_ASPM_L0S:
532 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
533 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
534 break;
535 case PCIE_ASPM_L1:
536 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
537 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
538 break;
539 default:
540 return -1;
541 }
542
543 return (endp_lat > root_lat) ? endp_lat : root_lat;
544}
545
546/*
547 * Enable ASPM on PCIe root port and endpoint.
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700548 */
Martin Roth38ddbfb2019-10-23 21:41:00 -0600549static void pciexp_enable_aspm(struct device *root, unsigned int root_cap,
550 struct device *endp, unsigned int endp_cap)
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700551{
552 const char *aspm_type_str[] = { "None", "L0s", "L1", "L0s and L1" };
553 enum aspm_type apmc = PCIE_ASPM_NONE;
554 int exit_latency, ok_latency;
555 u16 lnkctl;
556 u32 devcap;
557
Nico Huber570b1832017-08-30 13:38:50 +0200558 if (endp->disable_pcie_aspm)
559 return;
560
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700561 /* Get endpoint device capabilities for acceptable limits */
562 devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
563
564 /* Enable L0s if it is within endpoint acceptable limit */
565 ok_latency = (devcap & PCI_EXP_DEVCAP_L0S) >> 6;
566 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
567 PCIE_ASPM_L0S);
568 if (exit_latency >= 0 && exit_latency <= ok_latency)
569 apmc |= PCIE_ASPM_L0S;
570
571 /* Enable L1 if it is within endpoint acceptable limit */
572 ok_latency = (devcap & PCI_EXP_DEVCAP_L1) >> 9;
573 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
574 PCIE_ASPM_L1);
575 if (exit_latency >= 0 && exit_latency <= ok_latency)
576 apmc |= PCIE_ASPM_L1;
577
578 if (apmc != PCIE_ASPM_NONE) {
579 /* Set APMC in root port first */
580 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
581 lnkctl |= apmc;
582 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
583
584 /* Set APMC in endpoint device next */
585 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
586 lnkctl |= apmc;
587 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
588 }
589
590 printk(BIOS_INFO, "ASPM: Enabled %s\n", aspm_type_str[apmc]);
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700591}
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700592
Michał Żygowski9f0443c2024-01-31 13:09:37 +0100593static void pciexp_dev_set_max_payload_size(struct device *dev, unsigned int max_payload)
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200594{
Michał Żygowski9f0443c2024-01-31 13:09:37 +0100595 u16 devctl;
596 unsigned int pcie_cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200597
Michał Żygowski9f0443c2024-01-31 13:09:37 +0100598 if (!pcie_cap)
599 return;
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200600
Michał Żygowski9f0443c2024-01-31 13:09:37 +0100601 devctl = pci_read_config16(dev, pcie_cap + PCI_EXP_DEVCTL);
602 devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
603 /*
604 * Should never overflow to higher bits, due to how max_payload is
605 * guarded in this file.
606 */
607 devctl |= max_payload << 5;
608 pci_write_config16(dev, pcie_cap + PCI_EXP_DEVCTL, devctl);
609}
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200610
Michał Żygowski9f0443c2024-01-31 13:09:37 +0100611static unsigned int pciexp_dev_get_current_max_payload_size(struct device *dev)
612{
613 u16 devctl;
614 unsigned int pcie_cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
615
616 if (!pcie_cap)
617 return 0;
618
619 devctl = pci_read_config16(dev, pcie_cap + PCI_EXP_DEVCTL);
620 devctl &= PCI_EXP_DEVCTL_PAYLOAD;
621 return (devctl >> 5);
622}
623
624static unsigned int pciexp_dev_get_max_payload_size_cap(struct device *dev)
625{
626 u16 devcap;
627 unsigned int pcie_cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
628
629 if (!pcie_cap)
630 return 0;
631
632 devcap = pci_read_config16(dev, pcie_cap + PCI_EXP_DEVCAP);
633 return (devcap & PCI_EXP_DEVCAP_PAYLOAD);
634}
635
636/*
637 * Set max payload size of a parent based on max payload size capability of the child.
638 */
639static void pciexp_configure_max_payload_size(struct device *parent, struct device *child)
640{
641 unsigned int child_max_payload, parent_max_payload, max_payload;
642
643 /* Get max payload size supported by child */
644 child_max_payload = pciexp_dev_get_current_max_payload_size(child);
645 /* Get max payload size configured by parent */
646 parent_max_payload = pciexp_dev_get_current_max_payload_size(parent);
647 /* Set max payload to smaller of the reported device capability or parent config. */
648 max_payload = MIN(child_max_payload, parent_max_payload);
649
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200650 if (max_payload > 5) {
651 /* Values 6 and 7 are reserved in PCIe 3.0 specs. */
652 printk(BIOS_ERR, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
653 max_payload);
654 max_payload = 5;
655 }
656
Michał Żygowski9f0443c2024-01-31 13:09:37 +0100657 if (max_payload != parent_max_payload) {
658 pciexp_dev_set_max_payload_size(parent, max_payload);
659 printk(BIOS_INFO, "%s: Max_Payload_Size adjusted to %d\n", dev_path(parent),
660 (1 << (max_payload + 7)));
661 }
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200662}
663
Wilson Chouc8a86952022-08-29 02:08:24 +0000664/*
665 * Clear Lane Error State at the end of PCIe link training.
666 * Lane error status is cleared if PCIEXP_LANE_ERR_STAT_CLEAR is set.
667 * Lane error is normal during link training, so we need to clear it.
668 * At this moment, link has been used, but for a very short duration.
669 */
670static void clear_lane_error_status(struct device *dev)
671{
672 u32 reg32;
673 u16 pos;
674
675 pos = pciexp_find_extended_cap(dev, PCI_EXP_SEC_CAP_ID, 0);
676 if (pos == 0)
677 return;
678
679 reg32 = pci_read_config32(dev, pos + PCI_EXP_SEC_LANE_ERR_STATUS);
680 if (reg32 == 0)
681 return;
682
683 printk(BIOS_DEBUG, "%s: Clear Lane Error Status.\n", dev_path(dev));
684 printk(BIOS_DEBUG, "LaneErrStat:0x%x\n", reg32);
685 pci_write_config32(dev, pos + PCI_EXP_SEC_LANE_ERR_STATUS, reg32);
686}
687
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200688static void pciexp_tune_dev(struct device *dev)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000689{
Arthur Heymans7fcd4d52023-08-24 15:12:19 +0200690 struct device *root = dev->upstream->dev;
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700691 unsigned int root_cap, cap;
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000692
693 cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
Uwe Hermannd453dd02010-10-18 00:00:57 +0000694 if (!cap)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000695 return;
Uwe Hermannd453dd02010-10-18 00:00:57 +0000696
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700697 root_cap = pci_find_capability(root, PCI_CAP_ID_PCIE);
698 if (!root_cap)
699 return;
Stefan Reinauerf6eb88a2010-01-17 13:54:08 +0000700
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700701 /* Check for and enable Common Clock */
Julius Wernercd49cce2019-03-05 16:53:33 -0800702 if (CONFIG(PCIEXP_COMMON_CLOCK))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200703 pciexp_enable_common_clock(root, root_cap, dev, cap);
Uwe Hermanne4870472010-11-04 23:23:47 +0000704
Kane Chen18cb1342014-10-01 11:13:54 +0800705 /* Check if per port CLK req is supported by endpoint*/
Julius Wernercd49cce2019-03-05 16:53:33 -0800706 if (CONFIG(PCIEXP_CLK_PM))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200707 pciexp_enable_clock_power_pm(dev, cap);
Kane Chen18cb1342014-10-01 11:13:54 +0800708
Kenji Chen31c6e632014-10-04 01:14:44 +0800709 /* Enable L1 Sub-State when both root port and endpoint support */
Julius Wernercd49cce2019-03-05 16:53:33 -0800710 if (CONFIG(PCIEXP_L1_SUB_STATE))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200711 pciexp_config_L1_sub_state(root, dev);
Kenji Chen31c6e632014-10-04 01:14:44 +0800712
Duncan Laurie90dcdd42011-10-25 14:15:11 -0700713 /* Check for and enable ASPM */
Julius Wernercd49cce2019-03-05 16:53:33 -0800714 if (CONFIG(PCIEXP_ASPM))
Kyösti Mälkki91bfa8e2016-11-20 20:39:56 +0200715 pciexp_enable_aspm(root, root_cap, dev, cap);
Kyösti Mälkki94ce79d2019-12-16 17:21:13 +0200716
Wilson Chouc8a86952022-08-29 02:08:24 +0000717 /* Clear PCIe Lane Error Status */
718 if (CONFIG(PCIEXP_LANE_ERR_STAT_CLEAR))
719 clear_lane_error_status(root);
720
Michał Żygowski9f0443c2024-01-31 13:09:37 +0100721 /* Set the Max Payload Size to the maximum supported capability for this device */
722 if (pcie_is_endpoint(dev))
723 pciexp_dev_set_max_payload_size(dev, pciexp_dev_get_max_payload_size_cap(dev));
724
725 /* Limit the parent's Max Payload Size if needed */
726 pciexp_configure_max_payload_size(root, dev);
Nico Huber968ef752021-03-07 01:39:18 +0100727
728 pciexp_configure_ltr(root, root_cap, dev, cap);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000729}
730
Michał Żygowski9f0443c2024-01-31 13:09:37 +0100731static void pciexp_sync_max_payload_size(struct bus *bus, unsigned int max_payload)
732{
733 struct device *child;
734
735 /* Set the max payload for children on the bus and their children, etc. */
736 for (child = bus->children; child; child = child->sibling) {
737 if (!is_pci(child))
738 continue;
739
740 pciexp_dev_set_max_payload_size(child, max_payload);
741
742 if (child->downstream)
743 pciexp_sync_max_payload_size(child->downstream, max_payload);
744 }
745}
746
Kyösti Mälkkide271a82015-03-18 13:09:47 +0200747void pciexp_scan_bus(struct bus *bus, unsigned int min_devfn,
748 unsigned int max_devfn)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000749{
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200750 struct device *child;
Michał Żygowski9f0443c2024-01-31 13:09:37 +0100751 unsigned int max_payload;
Nico Huber968ef752021-03-07 01:39:18 +0100752
753 pciexp_enable_ltr(bus->dev);
754
Michał Żygowski9f0443c2024-01-31 13:09:37 +0100755 /*
756 * Set the Max Payload Size to the maximum supported capability for this bridge.
757 * This value will be used in pciexp_tune_dev to limit the Max Payload size if needed.
758 */
759 max_payload = pciexp_dev_get_max_payload_size_cap(bus->dev);
760 pciexp_dev_set_max_payload_size(bus->dev, max_payload);
761
Kyösti Mälkkide271a82015-03-18 13:09:47 +0200762 pci_scan_bus(bus, min_devfn, max_devfn);
Uwe Hermannd453dd02010-10-18 00:00:57 +0000763
764 for (child = bus->children; child; child = child->sibling) {
Duncan Lauriebf696222020-10-18 15:10:00 -0700765 if (child->path.type != DEVICE_PATH_PCI)
766 continue;
Uwe Hermannd453dd02010-10-18 00:00:57 +0000767 if ((child->path.pci.devfn < min_devfn) ||
768 (child->path.pci.devfn > max_devfn)) {
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000769 continue;
770 }
771 pciexp_tune_dev(child);
772 }
Michał Żygowski9f0443c2024-01-31 13:09:37 +0100773
774 /*
775 * Now the root port's Max Payload Size should be set to the highest
776 * possible value supported by all devices under a given root port.
777 * Propagate that value down from root port to all devices, so the Max
778 * Payload Size is equal on all devices, as some devices may have
779 * different capabilities and the programmed value depends on the
780 * order of device population the in the subtree.
781 */
782 if (pcie_is_root_port(bus->dev)) {
783 max_payload = pciexp_dev_get_current_max_payload_size(bus->dev);
784
785 printk(BIOS_INFO, "%s: Setting Max_Payload_Size to %d for devices under this"
786 " root port\n", dev_path(bus->dev), 1 << (max_payload + 7));
787
788 pciexp_sync_max_payload_size(bus, max_payload);
789 }
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000790}
791
Elyes HAOUASb1fa2872018-05-02 21:11:38 +0200792void pciexp_scan_bridge(struct device *dev)
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000793{
Kyösti Mälkki580e7222015-03-19 21:04:23 +0200794 do_pci_scan_bridge(dev, pciexp_scan_bus);
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000795}
796
797/** Default device operations for PCI Express bridges */
798static struct pci_operations pciexp_bus_ops_pci = {
799 .set_subsystem = 0,
800};
801
802struct device_operations default_pciexp_ops_bus = {
803 .read_resources = pci_bus_read_resources,
804 .set_resources = pci_dev_set_resources,
805 .enable_resources = pci_bus_enable_resources,
Uwe Hermannd453dd02010-10-18 00:00:57 +0000806 .scan_bus = pciexp_scan_bridge,
Yinghai Lu13f1c2a2005-07-08 02:49:49 +0000807 .reset_bus = pci_bus_reset,
808 .ops_pci = &pciexp_bus_ops_pci,
809};
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600810
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600811static void pciexp_hotplug_dummy_read_resources(struct device *dev)
812{
813 struct resource *resource;
814
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700815 /* Add extra memory space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600816 resource = new_resource(dev, 0x10);
817 resource->size = CONFIG_PCIEXP_HOTPLUG_MEM;
818 resource->align = 12;
819 resource->gran = 12;
820 resource->limit = 0xffffffff;
821 resource->flags |= IORESOURCE_MEM;
822
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700823 /* Add extra prefetchable memory space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600824 resource = new_resource(dev, 0x14);
825 resource->size = CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM;
826 resource->align = 12;
827 resource->gran = 12;
828 resource->limit = 0xffffffffffffffff;
829 resource->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
830
Furquan Shaikh32f385e2020-05-15 23:35:00 -0700831 /* Set resource flag requesting allocation above 4G boundary. */
832 if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G))
833 resource->flags |= IORESOURCE_ABOVE_4G;
834
835 /* Add extra I/O space */
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600836 resource = new_resource(dev, 0x18);
837 resource->size = CONFIG_PCIEXP_HOTPLUG_IO;
838 resource->align = 12;
839 resource->gran = 12;
840 resource->limit = 0xffff;
841 resource->flags |= IORESOURCE_IO;
842}
843
844static struct device_operations pciexp_hotplug_dummy_ops = {
845 .read_resources = pciexp_hotplug_dummy_read_resources,
John Su3ecc7772022-03-25 10:37:52 +0800846 .set_resources = noop_set_resources,
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600847};
848
849void pciexp_hotplug_scan_bridge(struct device *dev)
850{
Nico Huber577c6b92022-08-15 00:08:58 +0200851 dev->hotplug_port = 1;
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600852 dev->hotplug_buses = CONFIG_PCIEXP_HOTPLUG_BUSES;
853
854 /* Normal PCIe Scan */
855 pciexp_scan_bridge(dev);
856
857 /* Add dummy slot to preserve resources, must happen after bus scan */
858 struct device *dummy;
859 struct device_path dummy_path = { .type = DEVICE_PATH_NONE };
Arthur Heymans7fcd4d52023-08-24 15:12:19 +0200860 dummy = alloc_dev(dev->downstream, &dummy_path);
Jeremy Sollercf2ac542019-10-09 21:40:36 -0600861 dummy->ops = &pciexp_hotplug_dummy_ops;
862}
863
864struct device_operations default_pciexp_hotplug_ops_bus = {
865 .read_resources = pci_bus_read_resources,
866 .set_resources = pci_dev_set_resources,
867 .enable_resources = pci_bus_enable_resources,
868 .scan_bus = pciexp_hotplug_scan_bridge,
869 .reset_bus = pci_bus_reset,
870 .ops_pci = &pciexp_bus_ops_pci,
871};