10 #include <linux/kernel.h>
13 #include <linux/pci.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
18 #include <linux/string.h>
23 #include <linux/device.h>
26 #include <asm/setup.h>
30 "error",
"D0",
"D1",
"D2",
"D3hot",
"D3cold",
"unknown",
53 #define PME_TIMEOUT 1000
55 static void pci_dev_d3_sleep(
struct pci_dev *
dev)
65 #ifdef CONFIG_PCI_DOMAINS
66 int pci_domains_supported = 1;
69 #define DEFAULT_CARDBUS_IO_SIZE (256)
70 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
75 #define DEFAULT_HOTPLUG_IO_SIZE (256)
76 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
99 static bool pcie_ari_disabled;
111 unsigned char max,
n;
123 #ifdef CONFIG_HAS_IOMEM
139 #define PCI_FIND_CAP_TTL 48
141 static int __pci_find_next_cap_ttl(
struct pci_bus *
bus,
unsigned int devfn,
147 pci_bus_read_config_byte(bus, devfn, pos, &pos);
162 static int __pci_find_next_cap(
struct pci_bus *
bus,
unsigned int devfn,
167 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
172 return __pci_find_next_cap(dev->
bus, dev->
devfn,
177 static int __pci_bus_find_cap_start(
struct pci_bus *
bus,
182 pci_bus_read_config_word(bus, devfn,
PCI_STATUS, &status);
224 pos = __pci_find_next_cap(dev->
bus, dev->
devfn, pos, cap);
249 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
251 pos = __pci_find_next_cap(bus, devfn, pos, cap);
328 static int __pci_find_next_ht_cap(
struct pci_dev *
dev,
int pos,
int ht_cap)
338 pos = __pci_find_next_cap_ttl(dev->
bus, dev->
devfn, pos,
341 rc = pci_read_config_byte(dev, pos + 3, &cap);
345 if ((cap & mask) == ht_cap)
348 pos = __pci_find_next_cap_ttl(dev->
bus, dev->
devfn,
391 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
413 pci_bus_for_each_resource(bus, r, i) {
416 if (res->
start && !(res->
start >= r->start && res->
end <= r->end))
440 pci_restore_bars(
struct pci_dev *dev)
455 pci_platform_pm = ops;
459 static inline bool platform_pci_power_manageable(
struct pci_dev *dev)
461 return pci_platform_pm ? pci_platform_pm->
is_manageable(dev) :
false;
464 static inline int platform_pci_set_power_state(
struct pci_dev *dev,
472 return pci_platform_pm ?
476 static inline bool platform_pci_can_wakeup(
struct pci_dev *dev)
478 return pci_platform_pm ? pci_platform_pm->
can_wakeup(dev) :
false;
481 static inline int platform_pci_sleep_wake(
struct pci_dev *dev,
bool enable)
483 return pci_platform_pm ?
487 static inline int platform_pci_run_wake(
struct pci_dev *dev,
bool enable)
489 return pci_platform_pm ?
509 bool need_restore =
false;
527 dev_err(&dev->
dev,
"invalid power transition "
568 pci_dev_d3_sleep(dev);
575 dev_info(&dev->
dev,
"Refused to change power state, "
592 pci_restore_bars(dev);
634 if (platform_pci_power_manageable(dev))
635 platform_pci_set_power_state(dev,
PCI_D0);
637 pci_raw_set_power_state(dev,
PCI_D0);
650 if (platform_pci_power_manageable(dev)) {
651 error = platform_pci_set_power_state(dev, state);
675 pci_platform_power_transition(dev,
PCI_D0);
701 static int __pci_dev_set_current_state(
struct pci_dev *dev,
void *
data)
733 ret = pci_platform_power_transition(dev, state);
765 else if ((state ==
PCI_D1 || state ==
PCI_D2) && pci_no_d1d2(dev))
777 __pci_start_power_transition(dev, state);
788 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
797 if (!error && dev->
bus->self)
820 ret = platform_pci_choose_state(dev);
824 switch (state.
event) {
834 dev_info(&dev->
dev,
"unrecognized suspend event %d\n",
843 #define PCI_EXP_SAVE_REGS 7
853 if (tmp->
cap.cap_nr == cap)
859 static int pci_save_pcie_state(
struct pci_dev *dev)
865 if (!pci_is_pcie(dev))
870 dev_err(&dev->
dev,
"buffer not found in %s\n", __func__);
874 cap = (
u16 *)&save_state->
cap.data[0];
886 static void pci_restore_pcie_state(
struct pci_dev *dev)
896 cap = (
u16 *)&save_state->
cap.data[0];
907 static int pci_save_pcix_state(
struct pci_dev *dev)
918 dev_err(&dev->
dev,
"buffer not found in %s\n", __func__);
922 pci_read_config_word(dev, pos +
PCI_X_CMD,
923 (
u16 *)save_state->
cap.data);
928 static void pci_restore_pcix_state(
struct pci_dev *dev)
936 if (!save_state || pos <= 0)
938 cap = (
u16 *)&save_state->
cap.data[0];
940 pci_write_config_word(dev, pos +
PCI_X_CMD, cap[i++]);
953 for (i = 0; i < 16; i++)
956 if ((i = pci_save_pcie_state(dev)) != 0)
958 if ((i = pci_save_pcix_state(dev)) != 0)
963 static void pci_restore_config_dword(
struct pci_dev *pdev,
int offset,
968 pci_read_config_dword(pdev, offset, &val);
969 if (val == saved_val)
973 dev_dbg(&pdev->
dev,
"restoring config space at offset "
974 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
975 pci_write_config_dword(pdev, offset, saved_val);
979 pci_read_config_dword(pdev, offset, &val);
980 if (val == saved_val)
987 static void pci_restore_config_space_range(
struct pci_dev *pdev,
992 for (index = end; index >=
start; index--)
993 pci_restore_config_dword(pdev, 4 * index,
998 static void pci_restore_config_space(
struct pci_dev *pdev)
1001 pci_restore_config_space_range(pdev, 10, 15, 0);
1003 pci_restore_config_space_range(pdev, 4, 9, 10);
1004 pci_restore_config_space_range(pdev, 0, 3, 0);
1006 pci_restore_config_space_range(pdev, 0, 15, 0);
1020 pci_restore_pcie_state(dev);
1023 pci_restore_config_space(dev);
1025 pci_restore_pcix_state(dev);
1065 sizeof(state->config_space));
1070 memcpy(cap, &tmp->cap, len);
1100 tmp = pci_find_saved_cap(dev, cap->
cap_nr);
1101 if (!tmp || tmp->
cap.size != cap->
size)
1130 static int do_pci_enable_device(
struct pci_dev *dev,
int bars)
1135 if (err < 0 && err != -
EIO)
1154 if (pci_is_enabled(dev))
1159 static int __pci_enable_device_flags(
struct pci_dev *dev,
1182 if (dev->
resource[i].flags & flags)
1185 if (dev->
resource[i].flags & flags)
1188 err = do_pci_enable_device(dev, bars);
1217 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1233 return __pci_enable_device_flags(dev, IORESOURCE_MEM |
IORESOURCE_IO);
1250 static void pcim_release(
struct device *gendev,
void *
res)
1288 if (pci_is_managed(pdev))
1304 dr = get_pci_dr(pdev);
1330 dr = find_pci_dr(pdev);
1346 static void do_pci_disable_device(
struct pci_dev *dev)
1350 pci_read_config_word(dev,
PCI_COMMAND, &pci_command);
1352 pci_command &= ~PCI_COMMAND_MASTER;
1353 pci_write_config_word(dev,
PCI_COMMAND, pci_command);
1368 if (pci_is_enabled(dev))
1369 do_pci_disable_device(dev);
1387 dr = find_pci_dr(dev);
1394 do_pci_disable_device(dev);
1445 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1453 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1457 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1470 static int pci_pme_wakeup(
struct pci_dev *dev,
void *pme_poll_reset)
1472 if (pme_poll_reset && dev->
pme_poll)
1476 pci_wakeup_event(dev);
1477 pm_request_resume(&dev->
dev);
1499 pci_wakeup_event(pci_dev);
1500 pm_request_resume(&pci_dev->
dev);
1532 if (!list_empty(&pci_pme_list)) {
1534 if (pme_dev->
dev->pme_poll) {
1537 bridge = pme_dev->
dev->bus->self;
1545 pci_pme_wakeup(pme_dev->
dev,
NULL);
1551 if (!list_empty(&pci_pme_list))
1600 list_add(&pme_dev->
list, &pci_pme_list);
1601 if (list_is_singular(&pci_pme_list))
1608 if (pme_dev->
dev == dev) {
1619 dev_dbg(&dev->
dev,
"PME# %s\n", enable ?
"enabled" :
"disabled");
1643 bool runtime,
bool enable)
1647 if (enable && !runtime && !device_may_wakeup(&dev->
dev))
1667 error = runtime ? platform_pci_run_wake(dev,
true) :
1668 platform_pci_sleep_wake(dev,
true);
1675 platform_pci_run_wake(dev,
false);
1677 platform_pci_sleep_wake(dev,
false);
1704 pci_enable_wake(dev, PCI_D3hot, enable);
1719 if (platform_pci_power_manageable(dev)) {
1724 pci_power_t state = platform_pci_choose_state(dev);
1732 if (pci_no_d1d2(dev))
1735 target_state =
state;
1737 }
else if (!dev->
pm_cap) {
1739 }
else if (device_may_wakeup(&dev->
dev)) {
1752 return target_state;
1772 if (target_state > PCI_D3hot)
1775 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->
dev));
1780 pci_enable_wake(dev, target_state,
false);
1793 pci_enable_wake(dev,
PCI_D0,
false);
1838 if (device_run_wake(&dev->
dev))
1847 if (device_run_wake(&bridge->
dev))
1855 return device_run_wake(bus->
bridge);
1867 pm_runtime_get_sync(parent);
1868 pm_runtime_get_noresume(dev);
1880 pm_runtime_resume(dev);
1888 pm_runtime_put(dev);
1890 pm_runtime_put_sync(parent);
1903 device_enable_async_suspend(&dev->
dev);
1913 pci_read_config_word(dev, pm +
PCI_PM_PMC, &pmc);
1916 dev_err(&dev->
dev,
"unsupported PM cap regs version (%u)\n",
1917 pmc & PCI_PM_CAP_VER_MASK);
1928 if (!pci_no_d1d2(dev)) {
1943 "PME# supported from%s%s%s%s%s\n",
1975 if (!platform_pci_can_wakeup(dev))
1979 platform_pci_sleep_wake(dev,
false);
1994 static int pci_add_cap_save_buffer(
2004 save_state = kzalloc(
sizeof(*save_state) + size,
GFP_KERNEL);
2008 save_state->
cap.cap_nr =
cap;
2010 pci_add_saved_cap(dev, save_state);
2027 "unable to preallocate PCI Express save buffer\n");
2032 "unable to preallocate PCI-X save buffer\n");
2053 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->
devfn)
2059 bridge = dev->
bus->self;
2084 if (type & PCI_EXP_IDO_REQUEST)
2086 if (type & PCI_EXP_IDO_COMPLETION)
2102 if (type & PCI_EXP_IDO_REQUEST)
2104 if (type & PCI_EXP_IDO_COMPLETION)
2141 if (dev->
bus->self) {
2152 case PCI_EXP_OBFF_SIGNAL_L0:
2156 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2157 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2161 WARN(1,
"bad OBFF signal type\n");
2190 static bool pci_ltr_supported(
struct pci_dev *dev)
2217 if (!pci_ltr_supported(dev))
2221 if (dev->
bus->self) {
2241 if (!pci_ltr_supported(dev))
2248 static int __pci_ltr_scale(
int *val)
2252 while (*val > 1023) {
2253 *val = (*val + 31) / 32;
2269 int pos,
ret, snoop_scale, nosnoop_scale;
2272 if (!pci_ltr_supported(dev))
2275 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2276 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2304 static int pci_acs_enable;
2324 if (!pci_acs_enable)
2331 pci_read_config_word(dev, pos +
PCI_ACS_CAP, &cap);
2366 if (!pci_is_pcie(pdev))
2382 if ((ctrl & acs_flags) != acs_flags)
2409 if (pci_is_root_bus(pdev->
bus))
2410 return (end ==
NULL);
2412 parent = pdev->
bus->self;
2413 }
while (pdev != end);
2433 if (pci_ari_enabled(dev->
bus))
2438 return (((pin - 1) + slot) % 4) + 1;
2450 while (!pci_is_root_bus(dev->
bus)) {
2452 dev = dev->
bus->self;
2470 while (!pci_is_root_bus(dev->
bus)) {
2472 dev = dev->
bus->self;
2500 dr = find_pci_dr(pdev);
2524 static int __pci_request_region(
struct pci_dev *pdev,
int bar,
const char *res_name,
2544 dr = find_pci_dr(pdev);
2551 dev_warn(&pdev->
dev,
"BAR %d: can't reserve %pR\n", bar,
2572 return __pci_request_region(pdev, bar, res_name, 0);
2609 for (i = 0; i < 6; i++)
2610 if (bars & (1 << i))
2615 const char *res_name,
int excl)
2619 for (i = 0; i < 6; i++)
2620 if (bars & (1 << i))
2621 if (__pci_request_region(pdev, i, res_name, excl))
2627 if (bars & (1 << i))
2641 const char *res_name)
2647 int bars,
const char *res_name)
2704 ((1 << 6) - 1), res_name);
2707 static void __pci_set_master(
struct pci_dev *dev,
bool enable)
2716 if (cmd != old_cmd) {
2718 enable ?
"enabling" :
"disabling");
2749 if (pci_is_pcie(dev))
2759 dev_printk(
KERN_DEBUG, &dev->
dev,
"setting latency timer to %d\n", lat);
2772 __pci_set_master(dev,
true);
2782 __pci_set_master(dev,
false);
2813 if (cacheline_size == pci_cache_line_size)
2816 dev_printk(
KERN_DEBUG, &dev->
dev,
"cache line size of %d is not "
2817 "supported\n", pci_cache_line_size << 2);
2823 #ifdef PCI_DISABLE_MWI
2860 dev_dbg(&dev->
dev,
"enabling Mem-Wr-Inval\n");
2896 cmd &= ~PCI_COMMAND_INVALIDATE;
2912 u16 pci_command,
new;
2914 pci_read_config_word(pdev,
PCI_COMMAND, &pci_command);
2922 if (
new != pci_command) {
2927 dr = find_pci_dr(pdev);
2944 bool mask_supported =
false;
2962 if ((
new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2963 dev_err(&dev->
dev,
"Command register changed from "
2964 "0x%x to 0x%x: driver or hardware bug?\n", orig,
new);
2965 }
else if ((
new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2966 mask_supported =
true;
2971 return mask_supported;
2975 static bool pci_check_and_set_intx_mask(
struct pci_dev *dev,
bool mask)
2978 bool mask_updated =
true;
2979 u32 cmd_status_dword;
2980 u16 origcmd, newcmd;
2981 unsigned long flags;
3002 if (mask != irq_pending) {
3003 mask_updated =
false;
3007 origcmd = cmd_status_dword;
3011 if (newcmd != origcmd)
3017 return mask_updated;
3030 return pci_check_and_set_intx_mask(dev,
true);
3044 return pci_check_and_set_intx_mask(dev,
false);
3078 return dma_set_max_seg_size(&dev->
dev, size);
3084 return dma_set_seg_boundary(&dev->
dev, mask);
3088 static int pcie_flr(
struct pci_dev *dev,
int probe)
3102 for (i = 0; i < 4; i++) {
3104 msleep((1 << (i - 1)) * 100);
3111 dev_err(&dev->
dev,
"transaction is not cleared; "
3112 "proceeding with reset anyway\n");
3122 static int pci_af_flr(
struct pci_dev *dev,
int probe)
3133 pci_read_config_byte(dev, pos +
PCI_AF_CAP, &cap);
3141 for (i = 0; i < 4; i++) {
3143 msleep((1 << (i - 1)) * 100);
3150 dev_err(&dev->
dev,
"transaction is not cleared; "
3151 "proceeding with reset anyway\n");
3175 static int pci_pm_reset(
struct pci_dev *dev,
int probe)
3192 csr &= ~PCI_PM_CTRL_STATE_MASK;
3195 pci_dev_d3_sleep(dev);
3197 csr &= ~PCI_PM_CTRL_STATE_MASK;
3200 pci_dev_d3_sleep(dev);
3205 static int pci_parent_bus_reset(
struct pci_dev *dev,
int probe)
3222 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3225 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3226 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3242 rc = pcie_flr(dev, probe);
3246 rc = pci_af_flr(dev, probe);
3250 rc = pci_pm_reset(dev, probe);
3254 rc = pci_parent_bus_reset(dev, probe);
3259 static int pci_dev_reset(
struct pci_dev *dev,
int probe)
3266 device_lock(&dev->
dev);
3269 rc = __pci_dev_reset(dev, probe);
3272 device_unlock(&dev->
dev);
3296 return pci_dev_reset(dev, 0);
3321 return __pci_dev_reset(dev, 0);
3338 return pci_dev_reset(dev, 1);
3361 rc = pci_dev_reset(dev, 1);
3373 rc = pci_dev_reset(dev, 0);
3397 if (pci_read_config_dword(dev, cap +
PCI_X_STATUS, &stat))
3420 if (pci_read_config_word(dev, cap +
PCI_X_CMD, &cmd))
3445 v =
ffs(mmrbc) - 10;
3451 if (pci_read_config_dword(dev, cap +
PCI_X_STATUS, &stat))
3457 if (pci_read_config_word(dev, cap +
PCI_X_CMD, &cmd))
3467 if (pci_write_config_word(dev, cap +
PCI_X_CMD, cmd))
3521 v = (
ffs(rq) - 8) << 12;
3602 }
else if (resno < PCI_BRIDGE_RESOURCES) {
3609 dev_err(&dev->
dev,
"BAR %d: invalid resource\n", resno);
3614 static arch_set_vga_state_t arch_set_vga_state;
3618 arch_set_vga_state =
func;
3621 static int pci_set_vga_state_arch(
struct pci_dev *dev,
bool decode,
3622 unsigned int command_bits,
u32 flags)
3624 if (arch_set_vga_state)
3625 return arch_set_vga_state(dev, decode, command_bits,
3639 unsigned int command_bits,
u32 flags)
3649 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3653 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3656 cmd |= command_bits;
3658 cmd &= ~command_bits;
3662 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3669 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3675 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3683 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3700 spin_lock(&resource_alignment_lock);
3701 p = resource_alignment_param;
3704 if (
sscanf(p,
"%d%n", &align_order, &count) == 1 &&
3710 if (
sscanf(p,
"%x:%x:%x.%x%n",
3711 &seg, &bus, &slot, &func, &count) != 4) {
3713 if (
sscanf(p,
"%x:%x.%x%n",
3714 &bus, &slot, &func, &count) != 3) {
3716 printk(
KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3723 bus == dev->
bus->number &&
3726 if (align_order == -1) {
3729 align = 1 << align_order;
3734 if (*p !=
';' && *p !=
',') {
3740 spin_unlock(&resource_alignment_lock);
3776 "Can't reassign resources to host bridge.\n");
3781 "Disabling memory decoding and releasing memory resources.\n");
3789 if (!(r->
flags & IORESOURCE_MEM))
3791 size = resource_size(r);
3795 "Rounding up size of resource #%d to %#llx.\n",
3796 i, (
unsigned long long)size);
3809 if (!(r->
flags & IORESOURCE_MEM))
3811 r->
end = resource_size(r) - 1;
3822 spin_lock(&resource_alignment_lock);
3823 strncpy(resource_alignment_param, buf, count);
3824 resource_alignment_param[
count] =
'\0';
3825 spin_unlock(&resource_alignment_lock);
3832 spin_lock(&resource_alignment_lock);
3833 count =
snprintf(buf, size,
"%s", resource_alignment_param);
3834 spin_unlock(&resource_alignment_lock);
3850 pci_resource_alignment_store);
3852 static int __init pci_resource_alignment_sysfs_init(
void)
3855 &bus_attr_resource_alignment);
3860 static void __devinit pci_no_domains(
void)
3862 #ifdef CONFIG_PCI_DOMAINS
3863 pci_domains_supported = 0;
3892 if (!
strcmp(str,
"nomsi")) {
3894 }
else if (!
strcmp(str,
"noaer")) {
3896 }
else if (!
strncmp(str,
"realloc=", 8)) {
3898 }
else if (!
strncmp(str,
"realloc", 7)) {
3900 }
else if (!
strcmp(str,
"nodomains")) {
3902 }
else if (!
strncmp(str,
"noari", 5)) {
3903 pcie_ari_disabled =
true;
3904 }
else if (!
strncmp(str,
"cbiosize=", 9)) {
3906 }
else if (!
strncmp(str,
"cbmemsize=", 10)) {
3908 }
else if (!
strncmp(str,
"resource_alignment=", 19)) {
3911 }
else if (!
strncmp(str,
"ecrc=", 5)) {
3913 }
else if (!
strncmp(str,
"hpiosize=", 9)) {
3915 }
else if (!
strncmp(str,
"hpmemsize=", 10)) {
3917 }
else if (!
strncmp(str,
"pcie_bus_tune_off", 17)) {
3919 }
else if (!
strncmp(str,
"pcie_bus_safe", 13)) {
3921 }
else if (!
strncmp(str,
"pcie_bus_perf", 13)) {
3923 }
else if (!
strncmp(str,
"pcie_bus_peer2peer", 18)) {
3925 }
else if (!
strncmp(str,
"pcie_scan_all", 13)) {
3926 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);