Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pci.c
Go to the documentation of this file.
1 /*
2  * PCI Bus Services, see include/linux/pci.h for further explanation.
3  *
4  * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5  * David Mosberger-Tang
6  *
7  * Copyright 1997 -- 2000 Martin Mares <[email protected]>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
14 #include <linux/pm.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <asm-generic/pci-bridge.h>
26 #include <asm/setup.h>
27 #include "pci.h"
28 
29 const char *pci_power_names[] = {
30  "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
31 };
33 
36 
39 
40 unsigned int pci_pm_d3_delay;
41 
42 static void pci_pme_list_scan(struct work_struct *work);
43 
44 static LIST_HEAD(pci_pme_list);
45 static DEFINE_MUTEX(pci_pme_list_mutex);
46 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
47 
49  struct list_head list;
50  struct pci_dev *dev;
51 };
52 
53 #define PME_TIMEOUT 1000 /* How long between PME checks */
54 
55 static void pci_dev_d3_sleep(struct pci_dev *dev)
56 {
57  unsigned int delay = dev->d3_delay;
58 
59  if (delay < pci_pm_d3_delay)
60  delay = pci_pm_d3_delay;
61 
62  msleep(delay);
63 }
64 
65 #ifdef CONFIG_PCI_DOMAINS
66 int pci_domains_supported = 1;
67 #endif
68 
69 #define DEFAULT_CARDBUS_IO_SIZE (256)
70 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
71 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
74 
75 #define DEFAULT_HOTPLUG_IO_SIZE (256)
76 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
77 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
80 
81 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
82 
83 /*
84  * The default CLS is used if arch didn't set CLS explicitly and not
85  * all pci devices agree on the same value. Arch can override either
86  * the dfl or actual value as it sees fit. Don't forget this is
87  * measured in 32-bit words, not bytes.
88  */
91 
92 /*
93  * If we set up a device for bus mastering, we need to check the latency
94  * timer as certain BIOSes forget to set it properly.
95  */
96 unsigned int pcibios_max_latency = 255;
97 
98 /* If set, the PCIe ARI capability will not be used. */
99 static bool pcie_ari_disabled;
100 
108 unsigned char pci_bus_max_busnr(struct pci_bus* bus)
109 {
110  struct list_head *tmp;
111  unsigned char max, n;
112 
113  max = bus->busn_res.end;
114  list_for_each(tmp, &bus->children) {
115  n = pci_bus_max_busnr(pci_bus_b(tmp));
116  if(n > max)
117  max = n;
118  }
119  return max;
120 }
122 
123 #ifdef CONFIG_HAS_IOMEM
124 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
125 {
126  /*
127  * Make sure the BAR is actually a memory resource, not an IO resource
128  */
129  if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
130  WARN_ON(1);
131  return NULL;
132  }
133  return ioremap_nocache(pci_resource_start(pdev, bar),
134  pci_resource_len(pdev, bar));
135 }
137 #endif
138 
139 #define PCI_FIND_CAP_TTL 48
140 
141 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
142  u8 pos, int cap, int *ttl)
143 {
144  u8 id;
145 
146  while ((*ttl)--) {
147  pci_bus_read_config_byte(bus, devfn, pos, &pos);
148  if (pos < 0x40)
149  break;
150  pos &= ~3;
151  pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
152  &id);
153  if (id == 0xff)
154  break;
155  if (id == cap)
156  return pos;
157  pos += PCI_CAP_LIST_NEXT;
158  }
159  return 0;
160 }
161 
162 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
163  u8 pos, int cap)
164 {
165  int ttl = PCI_FIND_CAP_TTL;
166 
167  return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
168 }
169 
171 {
172  return __pci_find_next_cap(dev->bus, dev->devfn,
173  pos + PCI_CAP_LIST_NEXT, cap);
174 }
176 
177 static int __pci_bus_find_cap_start(struct pci_bus *bus,
178  unsigned int devfn, u8 hdr_type)
179 {
180  u16 status;
181 
182  pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
183  if (!(status & PCI_STATUS_CAP_LIST))
184  return 0;
185 
186  switch (hdr_type) {
189  return PCI_CAPABILITY_LIST;
191  return PCI_CB_CAPABILITY_LIST;
192  default:
193  return 0;
194  }
195 
196  return 0;
197 }
198 
219 {
220  int pos;
221 
222  pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
223  if (pos)
224  pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
225 
226  return pos;
227 }
228 
242 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
243 {
244  int pos;
245  u8 hdr_type;
246 
247  pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
248 
249  pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
250  if (pos)
251  pos = __pci_find_next_cap(bus, devfn, pos, cap);
252 
253  return pos;
254 }
255 
268 {
269  u32 header;
270  int ttl;
271  int pos = PCI_CFG_SPACE_SIZE;
272 
273  /* minimum 8 bytes per capability */
275 
276  if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
277  return 0;
278 
279  if (start)
280  pos = start;
281 
282  if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
283  return 0;
284 
285  /*
286  * If we have no capabilities, this is indicated by cap ID,
287  * cap version and next pointer all being 0.
288  */
289  if (header == 0)
290  return 0;
291 
292  while (ttl-- > 0) {
293  if (PCI_EXT_CAP_ID(header) == cap && pos != start)
294  return pos;
295 
296  pos = PCI_EXT_CAP_NEXT(header);
297  if (pos < PCI_CFG_SPACE_SIZE)
298  break;
299 
300  if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
301  break;
302  }
303 
304  return 0;
305 }
307 
323 {
324  return pci_find_next_ext_capability(dev, 0, cap);
325 }
327 
328 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
329 {
330  int rc, ttl = PCI_FIND_CAP_TTL;
331  u8 cap, mask;
332 
333  if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
334  mask = HT_3BIT_CAP_MASK;
335  else
336  mask = HT_5BIT_CAP_MASK;
337 
338  pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
339  PCI_CAP_ID_HT, &ttl);
340  while (pos) {
341  rc = pci_read_config_byte(dev, pos + 3, &cap);
342  if (rc != PCIBIOS_SUCCESSFUL)
343  return 0;
344 
345  if ((cap & mask) == ht_cap)
346  return pos;
347 
348  pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
349  pos + PCI_CAP_LIST_NEXT,
350  PCI_CAP_ID_HT, &ttl);
351  }
352 
353  return 0;
354 }
368 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
369 {
370  return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
371 }
373 
385 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
386 {
387  int pos;
388 
389  pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
390  if (pos)
391  pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
392 
393  return pos;
394 }
396 
406 struct resource *
407 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
408 {
409  const struct pci_bus *bus = dev->bus;
410  int i;
411  struct resource *best = NULL, *r;
412 
413  pci_bus_for_each_resource(bus, r, i) {
414  if (!r)
415  continue;
416  if (res->start && !(res->start >= r->start && res->end <= r->end))
417  continue; /* Not contained */
418  if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
419  continue; /* Wrong type */
420  if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
421  return r; /* Exact match */
422  /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
423  if (r->flags & IORESOURCE_PREFETCH)
424  continue;
425  /* .. but we can put a prefetchable resource inside a non-prefetchable one */
426  if (!best)
427  best = r;
428  }
429  return best;
430 }
431 
439 static void
440 pci_restore_bars(struct pci_dev *dev)
441 {
442  int i;
443 
444  for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
445  pci_update_resource(dev, i);
446 }
447 
448 static struct pci_platform_pm_ops *pci_platform_pm;
449 
451 {
452  if (!ops->is_manageable || !ops->set_state || !ops->choose_state
453  || !ops->sleep_wake || !ops->can_wakeup)
454  return -EINVAL;
455  pci_platform_pm = ops;
456  return 0;
457 }
458 
459 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
460 {
461  return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
462 }
463 
464 static inline int platform_pci_set_power_state(struct pci_dev *dev,
465  pci_power_t t)
466 {
467  return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
468 }
469 
470 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
471 {
472  return pci_platform_pm ?
473  pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
474 }
475 
476 static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
477 {
478  return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
479 }
480 
481 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
482 {
483  return pci_platform_pm ?
484  pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
485 }
486 
487 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
488 {
489  return pci_platform_pm ?
490  pci_platform_pm->run_wake(dev, enable) : -ENODEV;
491 }
492 
506 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
507 {
508  u16 pmcsr;
509  bool need_restore = false;
510 
511  /* Check if we're already there */
512  if (dev->current_state == state)
513  return 0;
514 
515  if (!dev->pm_cap)
516  return -EIO;
517 
519  return -EINVAL;
520 
521  /* Validate current state:
522  * Can enter D0 from any state, but if we can only go deeper
523  * to sleep if we're already in a low power state
524  */
525  if (state != PCI_D0 && dev->current_state <= PCI_D3cold
526  && dev->current_state > state) {
527  dev_err(&dev->dev, "invalid power transition "
528  "(from state %d to %d)\n", dev->current_state, state);
529  return -EINVAL;
530  }
531 
532  /* check if this device supports the desired state */
533  if ((state == PCI_D1 && !dev->d1_support)
534  || (state == PCI_D2 && !dev->d2_support))
535  return -EIO;
536 
537  pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
538 
539  /* If we're (effectively) in D3, force entire word to 0.
540  * This doesn't affect PME_Status, disables PME_En, and
541  * sets PowerState to 0.
542  */
543  switch (dev->current_state) {
544  case PCI_D0:
545  case PCI_D1:
546  case PCI_D2:
547  pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
548  pmcsr |= state;
549  break;
550  case PCI_D3hot:
551  case PCI_D3cold:
552  case PCI_UNKNOWN: /* Boot-up */
553  if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
554  && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
555  need_restore = true;
556  /* Fall-through: force to D0 */
557  default:
558  pmcsr = 0;
559  break;
560  }
561 
562  /* enter specified state */
563  pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
564 
565  /* Mandatory power management transition delays */
566  /* see PCI PM 1.1 5.6.1 table 18 */
567  if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
568  pci_dev_d3_sleep(dev);
569  else if (state == PCI_D2 || dev->current_state == PCI_D2)
571 
572  pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
573  dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
574  if (dev->current_state != state && printk_ratelimit())
575  dev_info(&dev->dev, "Refused to change power state, "
576  "currently in D%d\n", dev->current_state);
577 
578  /*
579  * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
580  * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
581  * from D3hot to D0 _may_ perform an internal reset, thereby
582  * going to "D0 Uninitialized" rather than "D0 Initialized".
583  * For example, at least some versions of the 3c905B and the
584  * 3c556B exhibit this behaviour.
585  *
586  * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
587  * devices in a D3hot state at boot. Consequently, we need to
588  * restore at least the BARs so that the device will be
589  * accessible to its driver.
590  */
591  if (need_restore)
592  pci_restore_bars(dev);
593 
594  if (dev->bus->self)
595  pcie_aspm_pm_state_change(dev->bus->self);
596 
597  return 0;
598 }
599 
607 {
608  if (dev->pm_cap) {
609  u16 pmcsr;
610 
611  /*
612  * Configuration space is not accessible for device in
613  * D3cold, so just keep or set D3cold for safety
614  */
615  if (dev->current_state == PCI_D3cold)
616  return;
617  if (state == PCI_D3cold) {
618  dev->current_state = PCI_D3cold;
619  return;
620  }
621  pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
622  dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
623  } else {
624  dev->current_state = state;
625  }
626 }
627 
632 void pci_power_up(struct pci_dev *dev)
633 {
634  if (platform_pci_power_manageable(dev))
635  platform_pci_set_power_state(dev, PCI_D0);
636 
637  pci_raw_set_power_state(dev, PCI_D0);
639 }
640 
646 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
647 {
648  int error;
649 
650  if (platform_pci_power_manageable(dev)) {
651  error = platform_pci_set_power_state(dev, state);
652  if (!error)
653  pci_update_current_state(dev, state);
654  /* Fall back to PCI_D0 if native PM is not supported */
655  if (!dev->pm_cap)
656  dev->current_state = PCI_D0;
657  } else {
658  error = -ENODEV;
659  /* Fall back to PCI_D0 if native PM is not supported */
660  if (!dev->pm_cap)
661  dev->current_state = PCI_D0;
662  }
663 
664  return error;
665 }
666 
672 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
673 {
674  if (state == PCI_D0) {
675  pci_platform_power_transition(dev, PCI_D0);
676  /*
677  * Mandatory power management transition delays, see
678  * PCI Express Base Specification Revision 2.0 Section
679  * 6.6.1: Conventional Reset. Do not delay for
680  * devices powered on/off by corresponding bridge,
681  * because have already delayed for the bridge.
682  */
683  if (dev->runtime_d3cold) {
684  msleep(dev->d3cold_delay);
685  /*
686  * When powering on a bridge from D3cold, the
687  * whole hierarchy may be powered on into
688  * D0uninitialized state, resume them to give
689  * them a chance to suspend again
690  */
692  }
693  }
694 }
695 
701 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
702 {
703  pci_power_t state = *(pci_power_t *)data;
704 
705  dev->current_state = state;
706  return 0;
707 }
708 
714 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
715 {
716  if (bus)
717  pci_walk_bus(bus, __pci_dev_set_current_state, &state);
718 }
719 
728 {
729  int ret;
730 
731  if (state <= PCI_D0)
732  return -EINVAL;
733  ret = pci_platform_power_transition(dev, state);
734  /* Power off the bridge may power off the whole hierarchy */
735  if (!ret && state == PCI_D3cold)
736  __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
737  return ret;
738 }
740 
756 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
757 {
758  int error;
759 
760  /* bound the state we're entering */
761  if (state > PCI_D3cold)
762  state = PCI_D3cold;
763  else if (state < PCI_D0)
764  state = PCI_D0;
765  else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
766  /*
767  * If the device or the parent bridge do not support PCI PM,
768  * ignore the request if we're doing anything other than putting
769  * it into D0 (which would only happen on boot).
770  */
771  return 0;
772 
773  /* Check if we're already there */
774  if (dev->current_state == state)
775  return 0;
776 
777  __pci_start_power_transition(dev, state);
778 
779  /* This device is quirked not to be put into D3, so
780  don't put it in D3 */
781  if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
782  return 0;
783 
784  /*
785  * To put device in D3cold, we put device into D3hot in native
786  * way, then put device into D3cold with platform ops
787  */
788  error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
789  PCI_D3hot : state);
790 
791  if (!__pci_complete_power_transition(dev, state))
792  error = 0;
793  /*
794  * When aspm_policy is "powersave" this call ensures
795  * that ASPM is configured.
796  */
797  if (!error && dev->bus->self)
799 
800  return error;
801 }
802 
814 {
816 
818  return PCI_D0;
819 
820  ret = platform_pci_choose_state(dev);
821  if (ret != PCI_POWER_ERROR)
822  return ret;
823 
824  switch (state.event) {
825  case PM_EVENT_ON:
826  return PCI_D0;
827  case PM_EVENT_FREEZE:
828  case PM_EVENT_PRETHAW:
829  /* REVISIT both freeze and pre-thaw "should" use D0 */
830  case PM_EVENT_SUSPEND:
831  case PM_EVENT_HIBERNATE:
832  return PCI_D3hot;
833  default:
834  dev_info(&dev->dev, "unrecognized suspend event %d\n",
835  state.event);
836  BUG();
837  }
838  return PCI_D0;
839 }
840 
842 
843 #define PCI_EXP_SAVE_REGS 7
844 
845 
846 static struct pci_cap_saved_state *pci_find_saved_cap(
847  struct pci_dev *pci_dev, char cap)
848 {
849  struct pci_cap_saved_state *tmp;
850  struct hlist_node *pos;
851 
852  hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
853  if (tmp->cap.cap_nr == cap)
854  return tmp;
855  }
856  return NULL;
857 }
858 
859 static int pci_save_pcie_state(struct pci_dev *dev)
860 {
861  int i = 0;
862  struct pci_cap_saved_state *save_state;
863  u16 *cap;
864 
865  if (!pci_is_pcie(dev))
866  return 0;
867 
868  save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
869  if (!save_state) {
870  dev_err(&dev->dev, "buffer not found in %s\n", __func__);
871  return -ENOMEM;
872  }
873 
874  cap = (u16 *)&save_state->cap.data[0];
875  pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
876  pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
877  pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
878  pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
882 
883  return 0;
884 }
885 
886 static void pci_restore_pcie_state(struct pci_dev *dev)
887 {
888  int i = 0;
889  struct pci_cap_saved_state *save_state;
890  u16 *cap;
891 
892  save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
893  if (!save_state)
894  return;
895 
896  cap = (u16 *)&save_state->cap.data[0];
904 }
905 
906 
907 static int pci_save_pcix_state(struct pci_dev *dev)
908 {
909  int pos;
910  struct pci_cap_saved_state *save_state;
911 
913  if (pos <= 0)
914  return 0;
915 
916  save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
917  if (!save_state) {
918  dev_err(&dev->dev, "buffer not found in %s\n", __func__);
919  return -ENOMEM;
920  }
921 
922  pci_read_config_word(dev, pos + PCI_X_CMD,
923  (u16 *)save_state->cap.data);
924 
925  return 0;
926 }
927 
928 static void pci_restore_pcix_state(struct pci_dev *dev)
929 {
930  int i = 0, pos;
931  struct pci_cap_saved_state *save_state;
932  u16 *cap;
933 
934  save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
936  if (!save_state || pos <= 0)
937  return;
938  cap = (u16 *)&save_state->cap.data[0];
939 
940  pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
941 }
942 
943 
948 int
950 {
951  int i;
952  /* XXX: 100% dword access ok here? */
953  for (i = 0; i < 16; i++)
954  pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
955  dev->state_saved = true;
956  if ((i = pci_save_pcie_state(dev)) != 0)
957  return i;
958  if ((i = pci_save_pcix_state(dev)) != 0)
959  return i;
960  return 0;
961 }
962 
963 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
964  u32 saved_val, int retry)
965 {
966  u32 val;
967 
968  pci_read_config_dword(pdev, offset, &val);
969  if (val == saved_val)
970  return;
971 
972  for (;;) {
973  dev_dbg(&pdev->dev, "restoring config space at offset "
974  "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
975  pci_write_config_dword(pdev, offset, saved_val);
976  if (retry-- <= 0)
977  return;
978 
979  pci_read_config_dword(pdev, offset, &val);
980  if (val == saved_val)
981  return;
982 
983  mdelay(1);
984  }
985 }
986 
987 static void pci_restore_config_space_range(struct pci_dev *pdev,
988  int start, int end, int retry)
989 {
990  int index;
991 
992  for (index = end; index >= start; index--)
993  pci_restore_config_dword(pdev, 4 * index,
994  pdev->saved_config_space[index],
995  retry);
996 }
997 
998 static void pci_restore_config_space(struct pci_dev *pdev)
999 {
1000  if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1001  pci_restore_config_space_range(pdev, 10, 15, 0);
1002  /* Restore BARs before the command register. */
1003  pci_restore_config_space_range(pdev, 4, 9, 10);
1004  pci_restore_config_space_range(pdev, 0, 3, 0);
1005  } else {
1006  pci_restore_config_space_range(pdev, 0, 15, 0);
1007  }
1008 }
1009 
1014 void pci_restore_state(struct pci_dev *dev)
1015 {
1016  if (!dev->state_saved)
1017  return;
1018 
1019  /* PCI Express register must be restored first */
1020  pci_restore_pcie_state(dev);
1021  pci_restore_ats_state(dev);
1022 
1023  pci_restore_config_space(dev);
1024 
1025  pci_restore_pcix_state(dev);
1026  pci_restore_msi_state(dev);
1027  pci_restore_iov_state(dev);
1028 
1029  dev->state_saved = false;
1030 }
1031 
1034  struct pci_cap_saved_data cap[0];
1035 };
1036 
1045 {
1046  struct pci_saved_state *state;
1047  struct pci_cap_saved_state *tmp;
1048  struct pci_cap_saved_data *cap;
1049  struct hlist_node *pos;
1050  size_t size;
1051 
1052  if (!dev->state_saved)
1053  return NULL;
1054 
1055  size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1056 
1057  hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1058  size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1059 
1060  state = kzalloc(size, GFP_KERNEL);
1061  if (!state)
1062  return NULL;
1063 
1064  memcpy(state->config_space, dev->saved_config_space,
1065  sizeof(state->config_space));
1066 
1067  cap = state->cap;
1068  hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1069  size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1070  memcpy(cap, &tmp->cap, len);
1071  cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1072  }
1073  /* Empty cap_save terminates list */
1074 
1075  return state;
1076 }
1078 
1084 int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1085 {
1086  struct pci_cap_saved_data *cap;
1087 
1088  dev->state_saved = false;
1089 
1090  if (!state)
1091  return 0;
1092 
1093  memcpy(dev->saved_config_space, state->config_space,
1094  sizeof(state->config_space));
1095 
1096  cap = state->cap;
1097  while (cap->size) {
1098  struct pci_cap_saved_state *tmp;
1099 
1100  tmp = pci_find_saved_cap(dev, cap->cap_nr);
1101  if (!tmp || tmp->cap.size != cap->size)
1102  return -EINVAL;
1103 
1104  memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1105  cap = (struct pci_cap_saved_data *)((u8 *)cap +
1106  sizeof(struct pci_cap_saved_data) + cap->size);
1107  }
1108 
1109  dev->state_saved = true;
1110  return 0;
1111 }
1113 
1121  struct pci_saved_state **state)
1122 {
1123  int ret = pci_load_saved_state(dev, *state);
1124  kfree(*state);
1125  *state = NULL;
1126  return ret;
1127 }
1129 
1130 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1131 {
1132  int err;
1133 
1134  err = pci_set_power_state(dev, PCI_D0);
1135  if (err < 0 && err != -EIO)
1136  return err;
1137  err = pcibios_enable_device(dev, bars);
1138  if (err < 0)
1139  return err;
1141 
1142  return 0;
1143 }
1144 
1153 {
1154  if (pci_is_enabled(dev))
1155  return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1156  return 0;
1157 }
1158 
1159 static int __pci_enable_device_flags(struct pci_dev *dev,
1161 {
1162  int err;
1163  int i, bars = 0;
1164 
1165  /*
1166  * Power state could be unknown at this point, either due to a fresh
1167  * boot or a device removal call. So get the current power state
1168  * so that things like MSI message writing will behave as expected
1169  * (e.g. if the device really is in D0 at enable time).
1170  */
1171  if (dev->pm_cap) {
1172  u16 pmcsr;
1173  pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1174  dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1175  }
1176 
1177  if (atomic_add_return(1, &dev->enable_cnt) > 1)
1178  return 0; /* already enabled */
1179 
1180  /* only skip sriov related */
1181  for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1182  if (dev->resource[i].flags & flags)
1183  bars |= (1 << i);
1184  for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1185  if (dev->resource[i].flags & flags)
1186  bars |= (1 << i);
1187 
1188  err = do_pci_enable_device(dev, bars);
1189  if (err < 0)
1190  atomic_dec(&dev->enable_cnt);
1191  return err;
1192 }
1193 
1203 {
1204  return __pci_enable_device_flags(dev, IORESOURCE_IO);
1205 }
1206 
1216 {
1217  return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1218 }
1219 
1231 int pci_enable_device(struct pci_dev *dev)
1232 {
1233  return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1234 }
1235 
1236 /*
1237  * Managed PCI resources. This manages device on/off, intx/msi/msix
1238  * on/off and BAR regions. pci_dev itself records msi/msix status, so
1239  * there's no need to track it separately. pci_devres is initialized
1240  * when a device is enabled using managed PCI device enable interface.
1241  */
1242 struct pci_devres {
1243  unsigned int enabled:1;
1244  unsigned int pinned:1;
1245  unsigned int orig_intx:1;
1246  unsigned int restore_intx:1;
1248 };
1249 
1250 static void pcim_release(struct device *gendev, void *res)
1251 {
1252  struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1253  struct pci_devres *this = res;
1254  int i;
1255 
1256  if (dev->msi_enabled)
1257  pci_disable_msi(dev);
1258  if (dev->msix_enabled)
1259  pci_disable_msix(dev);
1260 
1261  for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1262  if (this->region_mask & (1 << i))
1263  pci_release_region(dev, i);
1264 
1265  if (this->restore_intx)
1266  pci_intx(dev, this->orig_intx);
1267 
1268  if (this->enabled && !this->pinned)
1269  pci_disable_device(dev);
1270 }
1271 
1272 static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1273 {
1274  struct pci_devres *dr, *new_dr;
1275 
1276  dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1277  if (dr)
1278  return dr;
1279 
1280  new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1281  if (!new_dr)
1282  return NULL;
1283  return devres_get(&pdev->dev, new_dr, NULL, NULL);
1284 }
1285 
1286 static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1287 {
1288  if (pci_is_managed(pdev))
1289  return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1290  return NULL;
1291 }
1292 
1299 int pcim_enable_device(struct pci_dev *pdev)
1300 {
1301  struct pci_devres *dr;
1302  int rc;
1303 
1304  dr = get_pci_dr(pdev);
1305  if (unlikely(!dr))
1306  return -ENOMEM;
1307  if (dr->enabled)
1308  return 0;
1309 
1310  rc = pci_enable_device(pdev);
1311  if (!rc) {
1312  pdev->is_managed = 1;
1313  dr->enabled = 1;
1314  }
1315  return rc;
1316 }
1317 
1326 void pcim_pin_device(struct pci_dev *pdev)
1327 {
1328  struct pci_devres *dr;
1329 
1330  dr = find_pci_dr(pdev);
1331  WARN_ON(!dr || !dr->enabled);
1332  if (dr)
1333  dr->pinned = 1;
1334 }
1335 
1345 
1346 static void do_pci_disable_device(struct pci_dev *dev)
1347 {
1348  u16 pci_command;
1349 
1350  pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1351  if (pci_command & PCI_COMMAND_MASTER) {
1352  pci_command &= ~PCI_COMMAND_MASTER;
1353  pci_write_config_word(dev, PCI_COMMAND, pci_command);
1354  }
1355 
1357 }
1358 
1367 {
1368  if (pci_is_enabled(dev))
1369  do_pci_disable_device(dev);
1370 }
1371 
1382 void
1384 {
1385  struct pci_devres *dr;
1386 
1387  dr = find_pci_dr(dev);
1388  if (dr)
1389  dr->enabled = 0;
1390 
1391  if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1392  return;
1393 
1394  do_pci_disable_device(dev);
1395 
1396  dev->is_busmaster = 0;
1397 }
1398 
1409  enum pcie_reset_state state)
1410 {
1411  return -EINVAL;
1412 }
1413 
1423 {
1424  return pcibios_set_pcie_reset_state(dev, state);
1425 }
1426 
1436 {
1437  int pmcsr_pos;
1438  u16 pmcsr;
1439  bool ret = false;
1440 
1441  if (!dev->pm_cap)
1442  return false;
1443 
1444  pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1445  pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1446  if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1447  return false;
1448 
1449  /* Clear PME status. */
1450  pmcsr |= PCI_PM_CTRL_PME_STATUS;
1451  if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1452  /* Disable PME to avoid interrupt flood. */
1453  pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1454  ret = true;
1455  }
1456 
1457  pci_write_config_word(dev, pmcsr_pos, pmcsr);
1458 
1459  return ret;
1460 }
1461 
1470 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1471 {
1472  if (pme_poll_reset && dev->pme_poll)
1473  dev->pme_poll = false;
1474 
1475  if (pci_check_pme_status(dev)) {
1476  pci_wakeup_event(dev);
1477  pm_request_resume(&dev->dev);
1478  }
1479  return 0;
1480 }
1481 
1486 void pci_pme_wakeup_bus(struct pci_bus *bus)
1487 {
1488  if (bus)
1489  pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1490 }
1491 
1497 static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1498 {
1499  pci_wakeup_event(pci_dev);
1500  pm_request_resume(&pci_dev->dev);
1501  return 0;
1502 }
1503 
1508 void pci_wakeup_bus(struct pci_bus *bus)
1509 {
1510  if (bus)
1511  pci_walk_bus(bus, pci_wakeup, NULL);
1512 }
1513 
1519 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1520 {
1521  if (!dev->pm_cap)
1522  return false;
1523 
1524  return !!(dev->pme_support & (1 << state));
1525 }
1526 
1527 static void pci_pme_list_scan(struct work_struct *work)
1528 {
1529  struct pci_pme_device *pme_dev, *n;
1530 
1531  mutex_lock(&pci_pme_list_mutex);
1532  if (!list_empty(&pci_pme_list)) {
1533  list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1534  if (pme_dev->dev->pme_poll) {
1535  struct pci_dev *bridge;
1536 
1537  bridge = pme_dev->dev->bus->self;
1538  /*
1539  * If bridge is in low power state, the
1540  * configuration space of subordinate devices
1541  * may be not accessible
1542  */
1543  if (bridge && bridge->current_state != PCI_D0)
1544  continue;
1545  pci_pme_wakeup(pme_dev->dev, NULL);
1546  } else {
1547  list_del(&pme_dev->list);
1548  kfree(pme_dev);
1549  }
1550  }
1551  if (!list_empty(&pci_pme_list))
1552  schedule_delayed_work(&pci_pme_work,
1554  }
1555  mutex_unlock(&pci_pme_list_mutex);
1556 }
1557 
1566 void pci_pme_active(struct pci_dev *dev, bool enable)
1567 {
1568  u16 pmcsr;
1569 
1570  if (!dev->pm_cap)
1571  return;
1572 
1573  pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1574  /* Clear PME_Status by writing 1 to it and enable PME# */
1576  if (!enable)
1577  pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1578 
1579  pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1580 
1581  /* PCI (as opposed to PCIe) PME requires that the device have
1582  its PME# line hooked up correctly. Not all hardware vendors
1583  do this, so the PME never gets delivered and the device
1584  remains asleep. The easiest way around this is to
1585  periodically walk the list of suspended devices and check
1586  whether any have their PME flag set. The assumption is that
1587  we'll wake up often enough anyway that this won't be a huge
1588  hit, and the power savings from the devices will still be a
1589  win. */
1590 
1591  if (dev->pme_poll) {
1592  struct pci_pme_device *pme_dev;
1593  if (enable) {
1594  pme_dev = kmalloc(sizeof(struct pci_pme_device),
1595  GFP_KERNEL);
1596  if (!pme_dev)
1597  goto out;
1598  pme_dev->dev = dev;
1599  mutex_lock(&pci_pme_list_mutex);
1600  list_add(&pme_dev->list, &pci_pme_list);
1601  if (list_is_singular(&pci_pme_list))
1602  schedule_delayed_work(&pci_pme_work,
1604  mutex_unlock(&pci_pme_list_mutex);
1605  } else {
1606  mutex_lock(&pci_pme_list_mutex);
1607  list_for_each_entry(pme_dev, &pci_pme_list, list) {
1608  if (pme_dev->dev == dev) {
1609  list_del(&pme_dev->list);
1610  kfree(pme_dev);
1611  break;
1612  }
1613  }
1614  mutex_unlock(&pci_pme_list_mutex);
1615  }
1616  }
1617 
1618 out:
1619  dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1620 }
1621 
1642 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1643  bool runtime, bool enable)
1644 {
1645  int ret = 0;
1646 
1647  if (enable && !runtime && !device_may_wakeup(&dev->dev))
1648  return -EINVAL;
1649 
1650  /* Don't do the same thing twice in a row for one device. */
1651  if (!!enable == !!dev->wakeup_prepared)
1652  return 0;
1653 
1654  /*
1655  * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1656  * Anderson we should be doing PME# wake enable followed by ACPI wake
1657  * enable. To disable wake-up we call the platform first, for symmetry.
1658  */
1659 
1660  if (enable) {
1661  int error;
1662 
1663  if (pci_pme_capable(dev, state))
1664  pci_pme_active(dev, true);
1665  else
1666  ret = 1;
1667  error = runtime ? platform_pci_run_wake(dev, true) :
1668  platform_pci_sleep_wake(dev, true);
1669  if (ret)
1670  ret = error;
1671  if (!ret)
1672  dev->wakeup_prepared = true;
1673  } else {
1674  if (runtime)
1675  platform_pci_run_wake(dev, false);
1676  else
1677  platform_pci_sleep_wake(dev, false);
1678  pci_pme_active(dev, false);
1679  dev->wakeup_prepared = false;
1680  }
1681 
1682  return ret;
1683 }
1685 
1700 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1701 {
1702  return pci_pme_capable(dev, PCI_D3cold) ?
1703  pci_enable_wake(dev, PCI_D3cold, enable) :
1704  pci_enable_wake(dev, PCI_D3hot, enable);
1705 }
1706 
1716 {
1717  pci_power_t target_state = PCI_D3hot;
1718 
1719  if (platform_pci_power_manageable(dev)) {
1720  /*
1721  * Call the platform to choose the target state of the device
1722  * and enable wake-up from this state if supported.
1723  */
1724  pci_power_t state = platform_pci_choose_state(dev);
1725 
1726  switch (state) {
1727  case PCI_POWER_ERROR:
1728  case PCI_UNKNOWN:
1729  break;
1730  case PCI_D1:
1731  case PCI_D2:
1732  if (pci_no_d1d2(dev))
1733  break;
1734  default:
1735  target_state = state;
1736  }
1737  } else if (!dev->pm_cap) {
1738  target_state = PCI_D0;
1739  } else if (device_may_wakeup(&dev->dev)) {
1740  /*
1741  * Find the deepest state from which the device can generate
1742  * wake-up events, make it the target state and enable device
1743  * to generate PME#.
1744  */
1745  if (dev->pme_support) {
1746  while (target_state
1747  && !(dev->pme_support & (1 << target_state)))
1748  target_state--;
1749  }
1750  }
1751 
1752  return target_state;
1753 }
1754 
1764 {
1765  pci_power_t target_state = pci_target_state(dev);
1766  int error;
1767 
1768  if (target_state == PCI_POWER_ERROR)
1769  return -EIO;
1770 
1771  /* D3cold during system suspend/hibernate is not supported */
1772  if (target_state > PCI_D3hot)
1773  target_state = PCI_D3hot;
1774 
1775  pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1776 
1777  error = pci_set_power_state(dev, target_state);
1778 
1779  if (error)
1780  pci_enable_wake(dev, target_state, false);
1781 
1782  return error;
1783 }
1784 
1792 {
1793  pci_enable_wake(dev, PCI_D0, false);
1794  return pci_set_power_state(dev, PCI_D0);
1795 }
1796 
1805 {
1806  pci_power_t target_state = pci_target_state(dev);
1807  int error;
1808 
1809  if (target_state == PCI_POWER_ERROR)
1810  return -EIO;
1811 
1812  dev->runtime_d3cold = target_state == PCI_D3cold;
1813 
1814  __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1815 
1816  error = pci_set_power_state(dev, target_state);
1817 
1818  if (error) {
1819  __pci_enable_wake(dev, target_state, true, false);
1820  dev->runtime_d3cold = false;
1821  }
1822 
1823  return error;
1824 }
1825 
1834 bool pci_dev_run_wake(struct pci_dev *dev)
1835 {
1836  struct pci_bus *bus = dev->bus;
1837 
1838  if (device_run_wake(&dev->dev))
1839  return true;
1840 
1841  if (!dev->pme_support)
1842  return false;
1843 
1844  while (bus->parent) {
1845  struct pci_dev *bridge = bus->self;
1846 
1847  if (device_run_wake(&bridge->dev))
1848  return true;
1849 
1850  bus = bus->parent;
1851  }
1852 
1853  /* We have reached the root bus. */
1854  if (bus->bridge)
1855  return device_run_wake(bus->bridge);
1856 
1857  return false;
1858 }
1860 
1862 {
1863  struct device *dev = &pdev->dev;
1864  struct device *parent = dev->parent;
1865 
1866  if (parent)
1867  pm_runtime_get_sync(parent);
1868  pm_runtime_get_noresume(dev);
1869  /*
1870  * pdev->current_state is set to PCI_D3cold during suspending,
1871  * so wait until suspending completes
1872  */
1873  pm_runtime_barrier(dev);
1874  /*
1875  * Only need to resume devices in D3cold, because config
1876  * registers are still accessible for devices suspended but
1877  * not in D3cold.
1878  */
1879  if (pdev->current_state == PCI_D3cold)
1880  pm_runtime_resume(dev);
1881 }
1882 
1884 {
1885  struct device *dev = &pdev->dev;
1886  struct device *parent = dev->parent;
1887 
1888  pm_runtime_put(dev);
1889  if (parent)
1890  pm_runtime_put_sync(parent);
1891 }
1892 
1897 void pci_pm_init(struct pci_dev *dev)
1898 {
1899  int pm;
1900  u16 pmc;
1901 
1902  pm_runtime_forbid(&dev->dev);
1903  device_enable_async_suspend(&dev->dev);
1904  dev->wakeup_prepared = false;
1905 
1906  dev->pm_cap = 0;
1907 
1908  /* find PCI PM capability in list */
1910  if (!pm)
1911  return;
1912  /* Check device's ability to generate PME# */
1913  pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1914 
1915  if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1916  dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1917  pmc & PCI_PM_CAP_VER_MASK);
1918  return;
1919  }
1920 
1921  dev->pm_cap = pm;
1922  dev->d3_delay = PCI_PM_D3_WAIT;
1924  dev->d3cold_allowed = true;
1925 
1926  dev->d1_support = false;
1927  dev->d2_support = false;
1928  if (!pci_no_d1d2(dev)) {
1929  if (pmc & PCI_PM_CAP_D1)
1930  dev->d1_support = true;
1931  if (pmc & PCI_PM_CAP_D2)
1932  dev->d2_support = true;
1933 
1934  if (dev->d1_support || dev->d2_support)
1935  dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1936  dev->d1_support ? " D1" : "",
1937  dev->d2_support ? " D2" : "");
1938  }
1939 
1940  pmc &= PCI_PM_CAP_PME_MASK;
1941  if (pmc) {
1942  dev_printk(KERN_DEBUG, &dev->dev,
1943  "PME# supported from%s%s%s%s%s\n",
1944  (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1945  (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1946  (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1947  (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1948  (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1949  dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1950  dev->pme_poll = true;
1951  /*
1952  * Make device's PM flags reflect the wake-up capability, but
1953  * let the user space enable it to wake up the system as needed.
1954  */
1955  device_set_wakeup_capable(&dev->dev, true);
1956  /* Disable the PME# generation functionality */
1957  pci_pme_active(dev, false);
1958  } else {
1959  dev->pme_support = 0;
1960  }
1961 }
1962 
1974 {
1975  if (!platform_pci_can_wakeup(dev))
1976  return;
1977 
1978  device_set_wakeup_capable(&dev->dev, true);
1979  platform_pci_sleep_wake(dev, false);
1980 }
1981 
1982 static void pci_add_saved_cap(struct pci_dev *pci_dev,
1983  struct pci_cap_saved_state *new_cap)
1984 {
1985  hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1986 }
1987 
1994 static int pci_add_cap_save_buffer(
1995  struct pci_dev *dev, char cap, unsigned int size)
1996 {
1997  int pos;
1998  struct pci_cap_saved_state *save_state;
1999 
2000  pos = pci_find_capability(dev, cap);
2001  if (pos <= 0)
2002  return 0;
2003 
2004  save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2005  if (!save_state)
2006  return -ENOMEM;
2007 
2008  save_state->cap.cap_nr = cap;
2009  save_state->cap.size = size;
2010  pci_add_saved_cap(dev, save_state);
2011 
2012  return 0;
2013 }
2014 
2020 {
2021  int error;
2022 
2023  error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2024  PCI_EXP_SAVE_REGS * sizeof(u16));
2025  if (error)
2026  dev_err(&dev->dev,
2027  "unable to preallocate PCI Express save buffer\n");
2028 
2029  error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2030  if (error)
2031  dev_err(&dev->dev,
2032  "unable to preallocate PCI-X save buffer\n");
2033 }
2034 
2036 {
2037  struct pci_cap_saved_state *tmp;
2038  struct hlist_node *pos, *n;
2039 
2040  hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
2041  kfree(tmp);
2042 }
2043 
2048 void pci_enable_ari(struct pci_dev *dev)
2049 {
2050  u32 cap;
2051  struct pci_dev *bridge;
2052 
2053  if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2054  return;
2055 
2057  return;
2058 
2059  bridge = dev->bus->self;
2060  if (!bridge)
2061  return;
2062 
2064  if (!(cap & PCI_EXP_DEVCAP2_ARI))
2065  return;
2066 
2067  pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI);
2068  bridge->ari_enabled = 1;
2069 }
2070 
2080 void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2081 {
2082  u16 ctrl = 0;
2083 
2084  if (type & PCI_EXP_IDO_REQUEST)
2085  ctrl |= PCI_EXP_IDO_REQ_EN;
2086  if (type & PCI_EXP_IDO_COMPLETION)
2087  ctrl |= PCI_EXP_IDO_CMP_EN;
2088  if (ctrl)
2089  pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl);
2090 }
2092 
2098 void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2099 {
2100  u16 ctrl = 0;
2101 
2102  if (type & PCI_EXP_IDO_REQUEST)
2103  ctrl |= PCI_EXP_IDO_REQ_EN;
2104  if (type & PCI_EXP_IDO_COMPLETION)
2105  ctrl |= PCI_EXP_IDO_CMP_EN;
2106  if (ctrl)
2107  pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl);
2108 }
2110 
2130 int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2131 {
2132  u32 cap;
2133  u16 ctrl;
2134  int ret;
2135 
2137  if (!(cap & PCI_EXP_OBFF_MASK))
2138  return -ENOTSUPP; /* no OBFF support at all */
2139 
2140  /* Make sure the topology supports OBFF as well */
2141  if (dev->bus->self) {
2142  ret = pci_enable_obff(dev->bus->self, type);
2143  if (ret)
2144  return ret;
2145  }
2146 
2148  if (cap & PCI_EXP_OBFF_WAKE)
2149  ctrl |= PCI_EXP_OBFF_WAKE_EN;
2150  else {
2151  switch (type) {
2152  case PCI_EXP_OBFF_SIGNAL_L0:
2153  if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2154  ctrl |= PCI_EXP_OBFF_MSGA_EN;
2155  break;
2156  case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2157  ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2158  ctrl |= PCI_EXP_OBFF_MSGB_EN;
2159  break;
2160  default:
2161  WARN(1, "bad OBFF signal type\n");
2162  return -ENOTSUPP;
2163  }
2164  }
2166 
2167  return 0;
2168 }
2170 
2177 void pci_disable_obff(struct pci_dev *dev)
2178 {
2179  pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN);
2180 }
2182 
2190 static bool pci_ltr_supported(struct pci_dev *dev)
2191 {
2192  u32 cap;
2193 
2195 
2196  return cap & PCI_EXP_DEVCAP2_LTR;
2197 }
2198 
2209 int pci_enable_ltr(struct pci_dev *dev)
2210 {
2211  int ret;
2212 
2213  /* Only primary function can enable/disable LTR */
2214  if (PCI_FUNC(dev->devfn) != 0)
2215  return -EINVAL;
2216 
2217  if (!pci_ltr_supported(dev))
2218  return -ENOTSUPP;
2219 
2220  /* Enable upstream ports first */
2221  if (dev->bus->self) {
2222  ret = pci_enable_ltr(dev->bus->self);
2223  if (ret)
2224  return ret;
2225  }
2226 
2227  return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
2228 }
2230 
2235 void pci_disable_ltr(struct pci_dev *dev)
2236 {
2237  /* Only primary function can enable/disable LTR */
2238  if (PCI_FUNC(dev->devfn) != 0)
2239  return;
2240 
2241  if (!pci_ltr_supported(dev))
2242  return;
2243 
2244  pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
2245 }
2247 
2248 static int __pci_ltr_scale(int *val)
2249 {
2250  int scale = 0;
2251 
2252  while (*val > 1023) {
2253  *val = (*val + 31) / 32;
2254  scale++;
2255  }
2256  return scale;
2257 }
2258 
2267 int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2268 {
2269  int pos, ret, snoop_scale, nosnoop_scale;
2270  u16 val;
2271 
2272  if (!pci_ltr_supported(dev))
2273  return -ENOTSUPP;
2274 
2275  snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2276  nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2277 
2278  if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2279  nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2280  return -EINVAL;
2281 
2282  if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2283  (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2284  return -EINVAL;
2285 
2287  if (!pos)
2288  return -ENOTSUPP;
2289 
2290  val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2291  ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2292  if (ret != 4)
2293  return -EIO;
2294 
2295  val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2296  ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2297  if (ret != 4)
2298  return -EIO;
2299 
2300  return 0;
2301 }
2303 
2304 static int pci_acs_enable;
2305 
2310 {
2311  pci_acs_enable = 1;
2312 }
2313 
2318 void pci_enable_acs(struct pci_dev *dev)
2319 {
2320  int pos;
2321  u16 cap;
2322  u16 ctrl;
2323 
2324  if (!pci_acs_enable)
2325  return;
2326 
2328  if (!pos)
2329  return;
2330 
2331  pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2332  pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2333 
2334  /* Source Validation */
2335  ctrl |= (cap & PCI_ACS_SV);
2336 
2337  /* P2P Request Redirect */
2338  ctrl |= (cap & PCI_ACS_RR);
2339 
2340  /* P2P Completion Redirect */
2341  ctrl |= (cap & PCI_ACS_CR);
2342 
2343  /* Upstream Forwarding */
2344  ctrl |= (cap & PCI_ACS_UF);
2345 
2346  pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2347 }
2348 
2357 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2358 {
2359  int pos, ret;
2360  u16 ctrl;
2361 
2362  ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2363  if (ret >= 0)
2364  return ret > 0;
2365 
2366  if (!pci_is_pcie(pdev))
2367  return false;
2368 
2369  /* Filter out flags not applicable to multifunction */
2370  if (pdev->multifunction)
2371  acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
2373 
2374  if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM ||
2375  pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
2376  pdev->multifunction) {
2378  if (!pos)
2379  return false;
2380 
2381  pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2382  if ((ctrl & acs_flags) != acs_flags)
2383  return false;
2384  }
2385 
2386  return true;
2387 }
2388 
2398 bool pci_acs_path_enabled(struct pci_dev *start,
2399  struct pci_dev *end, u16 acs_flags)
2400 {
2401  struct pci_dev *pdev, *parent = start;
2402 
2403  do {
2404  pdev = parent;
2405 
2406  if (!pci_acs_enabled(pdev, acs_flags))
2407  return false;
2408 
2409  if (pci_is_root_bus(pdev->bus))
2410  return (end == NULL);
2411 
2412  parent = pdev->bus->self;
2413  } while (pdev != end);
2414 
2415  return true;
2416 }
2417 
2430 {
2431  int slot;
2432 
2433  if (pci_ari_enabled(dev->bus))
2434  slot = 0;
2435  else
2436  slot = PCI_SLOT(dev->devfn);
2437 
2438  return (((pin - 1) + slot) % 4) + 1;
2439 }
2440 
2441 int
2442 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2443 {
2444  u8 pin;
2445 
2446  pin = dev->pin;
2447  if (!pin)
2448  return -1;
2449 
2450  while (!pci_is_root_bus(dev->bus)) {
2451  pin = pci_swizzle_interrupt_pin(dev, pin);
2452  dev = dev->bus->self;
2453  }
2454  *bridge = dev;
2455  return pin;
2456 }
2457 
2466 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2467 {
2468  u8 pin = *pinp;
2469 
2470  while (!pci_is_root_bus(dev->bus)) {
2471  pin = pci_swizzle_interrupt_pin(dev, pin);
2472  dev = dev->bus->self;
2473  }
2474  *pinp = pin;
2475  return PCI_SLOT(dev->devfn);
2476 }
2477 
2487 void pci_release_region(struct pci_dev *pdev, int bar)
2488 {
2489  struct pci_devres *dr;
2490 
2491  if (pci_resource_len(pdev, bar) == 0)
2492  return;
2493  if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2495  pci_resource_len(pdev, bar));
2496  else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2498  pci_resource_len(pdev, bar));
2499 
2500  dr = find_pci_dr(pdev);
2501  if (dr)
2502  dr->region_mask &= ~(1 << bar);
2503 }
2504 
2524 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2525  int exclusive)
2526 {
2527  struct pci_devres *dr;
2528 
2529  if (pci_resource_len(pdev, bar) == 0)
2530  return 0;
2531 
2532  if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2533  if (!request_region(pci_resource_start(pdev, bar),
2534  pci_resource_len(pdev, bar), res_name))
2535  goto err_out;
2536  }
2537  else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2538  if (!__request_mem_region(pci_resource_start(pdev, bar),
2539  pci_resource_len(pdev, bar), res_name,
2540  exclusive))
2541  goto err_out;
2542  }
2543 
2544  dr = find_pci_dr(pdev);
2545  if (dr)
2546  dr->region_mask |= 1 << bar;
2547 
2548  return 0;
2549 
2550 err_out:
2551  dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2552  &pdev->resource[bar]);
2553  return -EBUSY;
2554 }
2555 
2570 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2571 {
2572  return __pci_request_region(pdev, bar, res_name, 0);
2573 }
2574 
2593 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2594 {
2595  return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2596 }
2605 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2606 {
2607  int i;
2608 
2609  for (i = 0; i < 6; i++)
2610  if (bars & (1 << i))
2611  pci_release_region(pdev, i);
2612 }
2613 
2614 int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2615  const char *res_name, int excl)
2616 {
2617  int i;
2618 
2619  for (i = 0; i < 6; i++)
2620  if (bars & (1 << i))
2621  if (__pci_request_region(pdev, i, res_name, excl))
2622  goto err_out;
2623  return 0;
2624 
2625 err_out:
2626  while(--i >= 0)
2627  if (bars & (1 << i))
2628  pci_release_region(pdev, i);
2629 
2630  return -EBUSY;
2631 }
2632 
2633 
2640 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2641  const char *res_name)
2642 {
2643  return __pci_request_selected_regions(pdev, bars, res_name, 0);
2644 }
2645 
2647  int bars, const char *res_name)
2648 {
2649  return __pci_request_selected_regions(pdev, bars, res_name,
2651 }
2652 
2662 void pci_release_regions(struct pci_dev *pdev)
2663 {
2664  pci_release_selected_regions(pdev, (1 << 6) - 1);
2665 }
2666 
2680 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2681 {
2682  return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2683 }
2684 
2701 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2702 {
2704  ((1 << 6) - 1), res_name);
2705 }
2706 
2707 static void __pci_set_master(struct pci_dev *dev, bool enable)
2708 {
2709  u16 old_cmd, cmd;
2710 
2711  pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2712  if (enable)
2713  cmd = old_cmd | PCI_COMMAND_MASTER;
2714  else
2715  cmd = old_cmd & ~PCI_COMMAND_MASTER;
2716  if (cmd != old_cmd) {
2717  dev_dbg(&dev->dev, "%s bus mastering\n",
2718  enable ? "enabling" : "disabling");
2719  pci_write_config_word(dev, PCI_COMMAND, cmd);
2720  }
2721  dev->is_busmaster = enable;
2722 }
2723 
2732 {
2733  return str;
2734 }
2735 
2745 {
2746  u8 lat;
2747 
2748  /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2749  if (pci_is_pcie(dev))
2750  return;
2751 
2752  pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2753  if (lat < 16)
2754  lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2755  else if (lat > pcibios_max_latency)
2756  lat = pcibios_max_latency;
2757  else
2758  return;
2759  dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2760  pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2761 }
2762 
2770 void pci_set_master(struct pci_dev *dev)
2771 {
2772  __pci_set_master(dev, true);
2773  pcibios_set_master(dev);
2774 }
2775 
2780 void pci_clear_master(struct pci_dev *dev)
2781 {
2782  __pci_set_master(dev, false);
2783 }
2784 
2796 {
2797  u8 cacheline_size;
2798 
2799  if (!pci_cache_line_size)
2800  return -EINVAL;
2801 
2802  /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2803  equal to or multiple of the right value. */
2804  pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2805  if (cacheline_size >= pci_cache_line_size &&
2806  (cacheline_size % pci_cache_line_size) == 0)
2807  return 0;
2808 
2809  /* Write the correct value. */
2810  pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2811  /* Read it back. */
2812  pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2813  if (cacheline_size == pci_cache_line_size)
2814  return 0;
2815 
2816  dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2817  "supported\n", pci_cache_line_size << 2);
2818 
2819  return -EINVAL;
2820 }
2822 
2823 #ifdef PCI_DISABLE_MWI
2824 int pci_set_mwi(struct pci_dev *dev)
2825 {
2826  return 0;
2827 }
2828 
2829 int pci_try_set_mwi(struct pci_dev *dev)
2830 {
2831  return 0;
2832 }
2833 
2834 void pci_clear_mwi(struct pci_dev *dev)
2835 {
2836 }
2837 
2838 #else
2839 
2848 int
2849 pci_set_mwi(struct pci_dev *dev)
2850 {
2851  int rc;
2852  u16 cmd;
2853 
2854  rc = pci_set_cacheline_size(dev);
2855  if (rc)
2856  return rc;
2857 
2858  pci_read_config_word(dev, PCI_COMMAND, &cmd);
2859  if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2860  dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2861  cmd |= PCI_COMMAND_INVALIDATE;
2862  pci_write_config_word(dev, PCI_COMMAND, cmd);
2863  }
2864 
2865  return 0;
2866 }
2867 
2877 int pci_try_set_mwi(struct pci_dev *dev)
2878 {
2879  int rc = pci_set_mwi(dev);
2880  return rc;
2881 }
2882 
2889 void
2891 {
2892  u16 cmd;
2893 
2894  pci_read_config_word(dev, PCI_COMMAND, &cmd);
2895  if (cmd & PCI_COMMAND_INVALIDATE) {
2896  cmd &= ~PCI_COMMAND_INVALIDATE;
2897  pci_write_config_word(dev, PCI_COMMAND, cmd);
2898  }
2899 }
2900 #endif /* ! PCI_DISABLE_MWI */
2901 
2909 void
2910 pci_intx(struct pci_dev *pdev, int enable)
2911 {
2912  u16 pci_command, new;
2913 
2914  pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2915 
2916  if (enable) {
2917  new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2918  } else {
2919  new = pci_command | PCI_COMMAND_INTX_DISABLE;
2920  }
2921 
2922  if (new != pci_command) {
2923  struct pci_devres *dr;
2924 
2925  pci_write_config_word(pdev, PCI_COMMAND, new);
2926 
2927  dr = find_pci_dr(pdev);
2928  if (dr && !dr->restore_intx) {
2929  dr->restore_intx = 1;
2930  dr->orig_intx = !enable;
2931  }
2932  }
2933 }
2934 
2943 {
2944  bool mask_supported = false;
2945  u16 orig, new;
2946 
2947  if (dev->broken_intx_masking)
2948  return false;
2949 
2950  pci_cfg_access_lock(dev);
2951 
2952  pci_read_config_word(dev, PCI_COMMAND, &orig);
2953  pci_write_config_word(dev, PCI_COMMAND,
2954  orig ^ PCI_COMMAND_INTX_DISABLE);
2955  pci_read_config_word(dev, PCI_COMMAND, &new);
2956 
2957  /*
2958  * There's no way to protect against hardware bugs or detect them
2959  * reliably, but as long as we know what the value should be, let's
2960  * go ahead and check it.
2961  */
2962  if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2963  dev_err(&dev->dev, "Command register changed from "
2964  "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2965  } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2966  mask_supported = true;
2967  pci_write_config_word(dev, PCI_COMMAND, orig);
2968  }
2969 
2970  pci_cfg_access_unlock(dev);
2971  return mask_supported;
2972 }
2974 
2975 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2976 {
2977  struct pci_bus *bus = dev->bus;
2978  bool mask_updated = true;
2979  u32 cmd_status_dword;
2980  u16 origcmd, newcmd;
2981  unsigned long flags;
2982  bool irq_pending;
2983 
2984  /*
2985  * We do a single dword read to retrieve both command and status.
2986  * Document assumptions that make this possible.
2987  */
2990 
2991  raw_spin_lock_irqsave(&pci_lock, flags);
2992 
2993  bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2994 
2995  irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2996 
2997  /*
2998  * Check interrupt status register to see whether our device
2999  * triggered the interrupt (when masking) or the next IRQ is
3000  * already pending (when unmasking).
3001  */
3002  if (mask != irq_pending) {
3003  mask_updated = false;
3004  goto done;
3005  }
3006 
3007  origcmd = cmd_status_dword;
3008  newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3009  if (mask)
3010  newcmd |= PCI_COMMAND_INTX_DISABLE;
3011  if (newcmd != origcmd)
3012  bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3013 
3014 done:
3015  raw_spin_unlock_irqrestore(&pci_lock, flags);
3016 
3017  return mask_updated;
3018 }
3019 
3029 {
3030  return pci_check_and_set_intx_mask(dev, true);
3031 }
3033 
3043 {
3044  return pci_check_and_set_intx_mask(dev, false);
3045 }
3047 
3056 void pci_msi_off(struct pci_dev *dev)
3057 {
3058  int pos;
3059  u16 control;
3060 
3061  pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3062  if (pos) {
3063  pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3064  control &= ~PCI_MSI_FLAGS_ENABLE;
3065  pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3066  }
3068  if (pos) {
3069  pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3070  control &= ~PCI_MSIX_FLAGS_ENABLE;
3071  pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3072  }
3073 }
3075 
3076 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3077 {
3078  return dma_set_max_seg_size(&dev->dev, size);
3079 }
3081 
3082 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3083 {
3084  return dma_set_seg_boundary(&dev->dev, mask);
3085 }
3087 
3088 static int pcie_flr(struct pci_dev *dev, int probe)
3089 {
3090  int i;
3091  u32 cap;
3092  u16 status;
3093 
3095  if (!(cap & PCI_EXP_DEVCAP_FLR))
3096  return -ENOTTY;
3097 
3098  if (probe)
3099  return 0;
3100 
3101  /* Wait for Transaction Pending bit clean */
3102  for (i = 0; i < 4; i++) {
3103  if (i)
3104  msleep((1 << (i - 1)) * 100);
3105 
3107  if (!(status & PCI_EXP_DEVSTA_TRPND))
3108  goto clear;
3109  }
3110 
3111  dev_err(&dev->dev, "transaction is not cleared; "
3112  "proceeding with reset anyway\n");
3113 
3114 clear:
3115  pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3116 
3117  msleep(100);
3118 
3119  return 0;
3120 }
3121 
3122 static int pci_af_flr(struct pci_dev *dev, int probe)
3123 {
3124  int i;
3125  int pos;
3126  u8 cap;
3127  u8 status;
3128 
3129  pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3130  if (!pos)
3131  return -ENOTTY;
3132 
3133  pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3134  if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3135  return -ENOTTY;
3136 
3137  if (probe)
3138  return 0;
3139 
3140  /* Wait for Transaction Pending bit clean */
3141  for (i = 0; i < 4; i++) {
3142  if (i)
3143  msleep((1 << (i - 1)) * 100);
3144 
3145  pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3146  if (!(status & PCI_AF_STATUS_TP))
3147  goto clear;
3148  }
3149 
3150  dev_err(&dev->dev, "transaction is not cleared; "
3151  "proceeding with reset anyway\n");
3152 
3153 clear:
3154  pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3155  msleep(100);
3156 
3157  return 0;
3158 }
3159 
3175 static int pci_pm_reset(struct pci_dev *dev, int probe)
3176 {
3177  u16 csr;
3178 
3179  if (!dev->pm_cap)
3180  return -ENOTTY;
3181 
3182  pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3183  if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3184  return -ENOTTY;
3185 
3186  if (probe)
3187  return 0;
3188 
3189  if (dev->current_state != PCI_D0)
3190  return -EINVAL;
3191 
3192  csr &= ~PCI_PM_CTRL_STATE_MASK;
3193  csr |= PCI_D3hot;
3194  pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3195  pci_dev_d3_sleep(dev);
3196 
3197  csr &= ~PCI_PM_CTRL_STATE_MASK;
3198  csr |= PCI_D0;
3199  pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3200  pci_dev_d3_sleep(dev);
3201 
3202  return 0;
3203 }
3204 
3205 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3206 {
3207  u16 ctrl;
3208  struct pci_dev *pdev;
3209 
3210  if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
3211  return -ENOTTY;
3212 
3213  list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3214  if (pdev != dev)
3215  return -ENOTTY;
3216 
3217  if (probe)
3218  return 0;
3219 
3220  pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3221  ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3222  pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3223  msleep(100);
3224 
3225  ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3226  pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3227  msleep(100);
3228 
3229  return 0;
3230 }
3231 
3232 static int __pci_dev_reset(struct pci_dev *dev, int probe)
3233 {
3234  int rc;
3235 
3236  might_sleep();
3237 
3238  rc = pci_dev_specific_reset(dev, probe);
3239  if (rc != -ENOTTY)
3240  goto done;
3241 
3242  rc = pcie_flr(dev, probe);
3243  if (rc != -ENOTTY)
3244  goto done;
3245 
3246  rc = pci_af_flr(dev, probe);
3247  if (rc != -ENOTTY)
3248  goto done;
3249 
3250  rc = pci_pm_reset(dev, probe);
3251  if (rc != -ENOTTY)
3252  goto done;
3253 
3254  rc = pci_parent_bus_reset(dev, probe);
3255 done:
3256  return rc;
3257 }
3258 
3259 static int pci_dev_reset(struct pci_dev *dev, int probe)
3260 {
3261  int rc;
3262 
3263  if (!probe) {
3264  pci_cfg_access_lock(dev);
3265  /* block PM suspend, driver probe, etc. */
3266  device_lock(&dev->dev);
3267  }
3268 
3269  rc = __pci_dev_reset(dev, probe);
3270 
3271  if (!probe) {
3272  device_unlock(&dev->dev);
3273  pci_cfg_access_unlock(dev);
3274  }
3275  return rc;
3276 }
3295 {
3296  return pci_dev_reset(dev, 0);
3297 }
3299 
3320 {
3321  return __pci_dev_reset(dev, 0);
3322 }
3324 
3337 {
3338  return pci_dev_reset(dev, 1);
3339 }
3340 
3357 int pci_reset_function(struct pci_dev *dev)
3358 {
3359  int rc;
3360 
3361  rc = pci_dev_reset(dev, 1);
3362  if (rc)
3363  return rc;
3364 
3365  pci_save_state(dev);
3366 
3367  /*
3368  * both INTx and MSI are disabled after the Interrupt Disable bit
3369  * is set and the Bus Master bit is cleared.
3370  */
3371  pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3372 
3373  rc = pci_dev_reset(dev, 0);
3374 
3375  pci_restore_state(dev);
3376 
3377  return rc;
3378 }
3380 
3388 int pcix_get_max_mmrbc(struct pci_dev *dev)
3389 {
3390  int cap;
3391  u32 stat;
3392 
3394  if (!cap)
3395  return -EINVAL;
3396 
3397  if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3398  return -EINVAL;
3399 
3400  return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3401 }
3403 
3411 int pcix_get_mmrbc(struct pci_dev *dev)
3412 {
3413  int cap;
3414  u16 cmd;
3415 
3417  if (!cap)
3418  return -EINVAL;
3419 
3420  if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3421  return -EINVAL;
3422 
3423  return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3424 }
3426 
3436 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3437 {
3438  int cap;
3439  u32 stat, v, o;
3440  u16 cmd;
3441 
3442  if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3443  return -EINVAL;
3444 
3445  v = ffs(mmrbc) - 10;
3446 
3448  if (!cap)
3449  return -EINVAL;
3450 
3451  if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3452  return -EINVAL;
3453 
3454  if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3455  return -E2BIG;
3456 
3457  if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3458  return -EINVAL;
3459 
3460  o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3461  if (o != v) {
3462  if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3463  return -EIO;
3464 
3465  cmd &= ~PCI_X_CMD_MAX_READ;
3466  cmd |= v << 2;
3467  if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3468  return -EIO;
3469  }
3470  return 0;
3471 }
3473 
3481 int pcie_get_readrq(struct pci_dev *dev)
3482 {
3483  u16 ctl;
3484 
3486 
3487  return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3488 }
3490 
3499 int pcie_set_readrq(struct pci_dev *dev, int rq)
3500 {
3501  u16 v;
3502 
3503  if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3504  return -EINVAL;
3505 
3506  /*
3507  * If using the "performance" PCIe config, we clamp the
3508  * read rq size to the max packet size to prevent the
3509  * host bridge generating requests larger than we can
3510  * cope with
3511  */
3512  if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3513  int mps = pcie_get_mps(dev);
3514 
3515  if (mps < 0)
3516  return mps;
3517  if (mps < rq)
3518  rq = mps;
3519  }
3520 
3521  v = (ffs(rq) - 8) << 12;
3522 
3525 }
3527 
3535 int pcie_get_mps(struct pci_dev *dev)
3536 {
3537  u16 ctl;
3538 
3540 
3541  return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3542 }
3543 
3552 int pcie_set_mps(struct pci_dev *dev, int mps)
3553 {
3554  u16 v;
3555 
3556  if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3557  return -EINVAL;
3558 
3559  v = ffs(mps) - 8;
3560  if (v > dev->pcie_mpss)
3561  return -EINVAL;
3562  v <<= 5;
3563 
3566 }
3567 
3575 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3576 {
3577  int i, bars = 0;
3578  for (i = 0; i < PCI_NUM_RESOURCES; i++)
3579  if (pci_resource_flags(dev, i) & flags)
3580  bars |= (1 << i);
3581  return bars;
3582 }
3583 
3592 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3593 {
3594  int reg;
3595 
3596  if (resno < PCI_ROM_RESOURCE) {
3597  *type = pci_bar_unknown;
3598  return PCI_BASE_ADDRESS_0 + 4 * resno;
3599  } else if (resno == PCI_ROM_RESOURCE) {
3600  *type = pci_bar_mem32;
3601  return dev->rom_base_reg;
3602  } else if (resno < PCI_BRIDGE_RESOURCES) {
3603  /* device specific resource */
3604  reg = pci_iov_resource_bar(dev, resno, type);
3605  if (reg)
3606  return reg;
3607  }
3608 
3609  dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3610  return 0;
3611 }
3612 
3613 /* Some architectures require additional programming to enable VGA */
3614 static arch_set_vga_state_t arch_set_vga_state;
3615 
3616 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3617 {
3618  arch_set_vga_state = func; /* NULL disables */
3619 }
3620 
3621 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3622  unsigned int command_bits, u32 flags)
3623 {
3624  if (arch_set_vga_state)
3625  return arch_set_vga_state(dev, decode, command_bits,
3626  flags);
3627  return 0;
3628 }
3629 
3638 int pci_set_vga_state(struct pci_dev *dev, bool decode,
3639  unsigned int command_bits, u32 flags)
3640 {
3641  struct pci_bus *bus;
3642  struct pci_dev *bridge;
3643  u16 cmd;
3644  int rc;
3645 
3646  WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3647 
3648  /* ARCH specific VGA enables */
3649  rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3650  if (rc)
3651  return rc;
3652 
3653  if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3654  pci_read_config_word(dev, PCI_COMMAND, &cmd);
3655  if (decode == true)
3656  cmd |= command_bits;
3657  else
3658  cmd &= ~command_bits;
3659  pci_write_config_word(dev, PCI_COMMAND, cmd);
3660  }
3661 
3662  if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3663  return 0;
3664 
3665  bus = dev->bus;
3666  while (bus) {
3667  bridge = bus->self;
3668  if (bridge) {
3669  pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3670  &cmd);
3671  if (decode == true)
3672  cmd |= PCI_BRIDGE_CTL_VGA;
3673  else
3674  cmd &= ~PCI_BRIDGE_CTL_VGA;
3675  pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3676  cmd);
3677  }
3678  bus = bus->parent;
3679  }
3680  return 0;
3681 }
3682 
3683 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3684 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3685 static DEFINE_SPINLOCK(resource_alignment_lock);
3686 
3695 {
3696  int seg, bus, slot, func, align_order, count;
3697  resource_size_t align = 0;
3698  char *p;
3699 
3700  spin_lock(&resource_alignment_lock);
3701  p = resource_alignment_param;
3702  while (*p) {
3703  count = 0;
3704  if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3705  p[count] == '@') {
3706  p += count + 1;
3707  } else {
3708  align_order = -1;
3709  }
3710  if (sscanf(p, "%x:%x:%x.%x%n",
3711  &seg, &bus, &slot, &func, &count) != 4) {
3712  seg = 0;
3713  if (sscanf(p, "%x:%x.%x%n",
3714  &bus, &slot, &func, &count) != 3) {
3715  /* Invalid format */
3716  printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3717  p);
3718  break;
3719  }
3720  }
3721  p += count;
3722  if (seg == pci_domain_nr(dev->bus) &&
3723  bus == dev->bus->number &&
3724  slot == PCI_SLOT(dev->devfn) &&
3725  func == PCI_FUNC(dev->devfn)) {
3726  if (align_order == -1) {
3727  align = PAGE_SIZE;
3728  } else {
3729  align = 1 << align_order;
3730  }
3731  /* Found */
3732  break;
3733  }
3734  if (*p != ';' && *p != ',') {
3735  /* End of param or invalid format */
3736  break;
3737  }
3738  p++;
3739  }
3740  spin_unlock(&resource_alignment_lock);
3741  return align;
3742 }
3743 
3751 int pci_is_reassigndev(struct pci_dev *dev)
3752 {
3753  return (pci_specified_resource_alignment(dev) != 0);
3754 }
3755 
3756 /*
3757  * This function disables memory decoding and releases memory resources
3758  * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3759  * It also rounds up size to specified alignment.
3760  * Later on, the kernel will assign page-aligned memory resource back
3761  * to the device.
3762  */
3764 {
3765  int i;
3766  struct resource *r;
3768  u16 command;
3769 
3770  if (!pci_is_reassigndev(dev))
3771  return;
3772 
3773  if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3774  (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3775  dev_warn(&dev->dev,
3776  "Can't reassign resources to host bridge.\n");
3777  return;
3778  }
3779 
3780  dev_info(&dev->dev,
3781  "Disabling memory decoding and releasing memory resources.\n");
3782  pci_read_config_word(dev, PCI_COMMAND, &command);
3783  command &= ~PCI_COMMAND_MEMORY;
3784  pci_write_config_word(dev, PCI_COMMAND, command);
3785 
3786  align = pci_specified_resource_alignment(dev);
3787  for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3788  r = &dev->resource[i];
3789  if (!(r->flags & IORESOURCE_MEM))
3790  continue;
3791  size = resource_size(r);
3792  if (size < align) {
3793  size = align;
3794  dev_info(&dev->dev,
3795  "Rounding up size of resource #%d to %#llx.\n",
3796  i, (unsigned long long)size);
3797  }
3798  r->end = size - 1;
3799  r->start = 0;
3800  }
3801  /* Need to disable bridge's resource window,
3802  * to enable the kernel to reassign new resource
3803  * window later on.
3804  */
3805  if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3806  (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3807  for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3808  r = &dev->resource[i];
3809  if (!(r->flags & IORESOURCE_MEM))
3810  continue;
3811  r->end = resource_size(r) - 1;
3812  r->start = 0;
3813  }
3815  }
3816 }
3817 
3819 {
3820  if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3821  count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3822  spin_lock(&resource_alignment_lock);
3823  strncpy(resource_alignment_param, buf, count);
3824  resource_alignment_param[count] = '\0';
3825  spin_unlock(&resource_alignment_lock);
3826  return count;
3827 }
3828 
3830 {
3831  size_t count;
3832  spin_lock(&resource_alignment_lock);
3833  count = snprintf(buf, size, "%s", resource_alignment_param);
3834  spin_unlock(&resource_alignment_lock);
3835  return count;
3836 }
3837 
3838 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3839 {
3841 }
3842 
3843 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3844  const char *buf, size_t count)
3845 {
3846  return pci_set_resource_alignment_param(buf, count);
3847 }
3848 
3849 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3850  pci_resource_alignment_store);
3851 
3852 static int __init pci_resource_alignment_sysfs_init(void)
3853 {
3854  return bus_create_file(&pci_bus_type,
3855  &bus_attr_resource_alignment);
3856 }
3857 
3858 late_initcall(pci_resource_alignment_sysfs_init);
3859 
3860 static void __devinit pci_no_domains(void)
3861 {
3862 #ifdef CONFIG_PCI_DOMAINS
3863  pci_domains_supported = 0;
3864 #endif
3865 }
3866 
3876 {
3877  return 1;
3878 }
3879 
3881 {
3882 }
3884 
3885 static int __init pci_setup(char *str)
3886 {
3887  while (str) {
3888  char *k = strchr(str, ',');
3889  if (k)
3890  *k++ = 0;
3891  if (*str && (str = pcibios_setup(str)) && *str) {
3892  if (!strcmp(str, "nomsi")) {
3893  pci_no_msi();
3894  } else if (!strcmp(str, "noaer")) {
3895  pci_no_aer();
3896  } else if (!strncmp(str, "realloc=", 8)) {
3897  pci_realloc_get_opt(str + 8);
3898  } else if (!strncmp(str, "realloc", 7)) {
3899  pci_realloc_get_opt("on");
3900  } else if (!strcmp(str, "nodomains")) {
3901  pci_no_domains();
3902  } else if (!strncmp(str, "noari", 5)) {
3903  pcie_ari_disabled = true;
3904  } else if (!strncmp(str, "cbiosize=", 9)) {
3905  pci_cardbus_io_size = memparse(str + 9, &str);
3906  } else if (!strncmp(str, "cbmemsize=", 10)) {
3907  pci_cardbus_mem_size = memparse(str + 10, &str);
3908  } else if (!strncmp(str, "resource_alignment=", 19)) {
3910  strlen(str + 19));
3911  } else if (!strncmp(str, "ecrc=", 5)) {
3912  pcie_ecrc_get_policy(str + 5);
3913  } else if (!strncmp(str, "hpiosize=", 9)) {
3914  pci_hotplug_io_size = memparse(str + 9, &str);
3915  } else if (!strncmp(str, "hpmemsize=", 10)) {
3916  pci_hotplug_mem_size = memparse(str + 10, &str);
3917  } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3918  pcie_bus_config = PCIE_BUS_TUNE_OFF;
3919  } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3920  pcie_bus_config = PCIE_BUS_SAFE;
3921  } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3922  pcie_bus_config = PCIE_BUS_PERFORMANCE;
3923  } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3924  pcie_bus_config = PCIE_BUS_PEER2PEER;
3925  } else if (!strncmp(str, "pcie_scan_all", 13)) {
3926  pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
3927  } else {
3928  printk(KERN_ERR "PCI: Unknown option `%s'\n",
3929  str);
3930  }
3931  }
3932  str = k;
3933  }
3934  return 0;
3935 }
3936 early_param("pci", pci_setup);
3937 
3965