Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bios32.c
Go to the documentation of this file.
1 /*
2  * linux/arch/arm/kernel/bios32.c
3  *
4  * PCI bios-type initialisation for PCI machines
5  *
6  * Bits taken from various places.
7  */
8 #include <linux/export.h>
9 #include <linux/kernel.h>
10 #include <linux/pci.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/io.h>
14 
15 #include <asm/mach-types.h>
16 #include <asm/mach/map.h>
17 #include <asm/mach/pci.h>
18 
19 static int debug_pci;
20 
21 /*
22  * We can't use pci_find_device() here since we are
23  * called from interrupt context.
24  */
25 static void pcibios_bus_report_status(struct pci_bus *bus, u_int status_mask, int warn)
26 {
27  struct pci_dev *dev;
28 
29  list_for_each_entry(dev, &bus->devices, bus_list) {
30  u16 status;
31 
32  /*
33  * ignore host bridge - we handle
34  * that separately
35  */
36  if (dev->bus->number == 0 && dev->devfn == 0)
37  continue;
38 
39  pci_read_config_word(dev, PCI_STATUS, &status);
40  if (status == 0xffff)
41  continue;
42 
43  if ((status & status_mask) == 0)
44  continue;
45 
46  /* clear the status errors */
47  pci_write_config_word(dev, PCI_STATUS, status & status_mask);
48 
49  if (warn)
50  printk("(%s: %04X) ", pci_name(dev), status);
51  }
52 
54  if (dev->subordinate)
55  pcibios_bus_report_status(dev->subordinate, status_mask, warn);
56 }
57 
58 void pcibios_report_status(u_int status_mask, int warn)
59 {
60  struct list_head *l;
61 
62  list_for_each(l, &pci_root_buses) {
63  struct pci_bus *bus = pci_bus_b(l);
64 
65  pcibios_bus_report_status(bus, status_mask, warn);
66  }
67 }
68 
69 /*
70  * We don't use this to fix the device, but initialisation of it.
71  * It's not the correct use for this, but it works.
72  * Note that the arbiter/ISA bridge appears to be buggy, specifically in
73  * the following area:
74  * 1. park on CPU
75  * 2. ISA bridge ping-pong
76  * 3. ISA bridge master handling of target RETRY
77  *
78  * Bug 3 is responsible for the sound DMA grinding to a halt. We now
79  * live with bug 2.
80  */
81 static void __devinit pci_fixup_83c553(struct pci_dev *dev)
82 {
83  /*
84  * Set memory region to start at address 0, and enable IO
85  */
86  pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_SPACE_MEMORY);
87  pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO);
88 
89  dev->resource[0].end -= dev->resource[0].start;
90  dev->resource[0].start = 0;
91 
92  /*
93  * All memory requests from ISA to be channelled to PCI
94  */
95  pci_write_config_byte(dev, 0x48, 0xff);
96 
97  /*
98  * Enable ping-pong on bus master to ISA bridge transactions.
99  * This improves the sound DMA substantially. The fixed
100  * priority arbiter also helps (see below).
101  */
102  pci_write_config_byte(dev, 0x42, 0x01);
103 
104  /*
105  * Enable PCI retry
106  */
107  pci_write_config_byte(dev, 0x40, 0x22);
108 
109  /*
110  * We used to set the arbiter to "park on last master" (bit
111  * 1 set), but unfortunately the CyberPro does not park the
112  * bus. We must therefore park on CPU. Unfortunately, this
113  * may trigger yet another bug in the 553.
114  */
115  pci_write_config_byte(dev, 0x83, 0x02);
116 
117  /*
118  * Make the ISA DMA request lowest priority, and disable
119  * rotating priorities completely.
120  */
121  pci_write_config_byte(dev, 0x80, 0x11);
122  pci_write_config_byte(dev, 0x81, 0x00);
123 
124  /*
125  * Route INTA input to IRQ 11, and set IRQ11 to be level
126  * sensitive.
127  */
128  pci_write_config_word(dev, 0x44, 0xb000);
129  outb(0x08, 0x4d1);
130 }
132 
133 static void __devinit pci_fixup_unassign(struct pci_dev *dev)
134 {
135  dev->resource[0].end -= dev->resource[0].start;
136  dev->resource[0].start = 0;
137 }
139 
140 /*
141  * Prevent the PCI layer from seeing the resources allocated to this device
142  * if it is the host bridge by marking it as such. These resources are of
143  * no consequence to the PCI layer (they are handled elsewhere).
144  */
145 static void __devinit pci_fixup_dec21285(struct pci_dev *dev)
146 {
147  int i;
148 
149  if (dev->devfn == 0) {
150  dev->class &= 0xff;
151  dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
152  for (i = 0; i < PCI_NUM_RESOURCES; i++) {
153  dev->resource[i].start = 0;
154  dev->resource[i].end = 0;
155  dev->resource[i].flags = 0;
156  }
157  }
158 }
160 
161 /*
162  * PCI IDE controllers use non-standard I/O port decoding, respect it.
163  */
164 static void __devinit pci_fixup_ide_bases(struct pci_dev *dev)
165 {
166  struct resource *r;
167  int i;
168 
169  if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
170  return;
171 
172  for (i = 0; i < PCI_NUM_RESOURCES; i++) {
173  r = dev->resource + i;
174  if ((r->start & ~0x80) == 0x374) {
175  r->start |= 2;
176  r->end = r->start;
177  }
178  }
179 }
180 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
181 
182 /*
183  * Put the DEC21142 to sleep
184  */
185 static void __devinit pci_fixup_dec21142(struct pci_dev *dev)
186 {
187  pci_write_config_dword(dev, 0x40, 0x80000000);
188 }
190 
191 /*
192  * The CY82C693 needs some rather major fixups to ensure that it does
193  * the right thing. Idea from the Alpha people, with a few additions.
194  *
195  * We ensure that the IDE base registers are set to 1f0/3f4 for the
196  * primary bus, and 170/374 for the secondary bus. Also, hide them
197  * from the PCI subsystem view as well so we won't try to perform
198  * our own auto-configuration on them.
199  *
200  * In addition, we ensure that the PCI IDE interrupts are routed to
201  * IRQ 14 and IRQ 15 respectively.
202  *
203  * The above gets us to a point where the IDE on this device is
204  * functional. However, The CY82C693U _does not work_ in bus
205  * master mode without locking the PCI bus solid.
206  */
207 static void __devinit pci_fixup_cy82c693(struct pci_dev *dev)
208 {
209  if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
210  u32 base0, base1;
211 
212  if (dev->class & 0x80) { /* primary */
213  base0 = 0x1f0;
214  base1 = 0x3f4;
215  } else { /* secondary */
216  base0 = 0x170;
217  base1 = 0x374;
218  }
219 
220  pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
221  base0 | PCI_BASE_ADDRESS_SPACE_IO);
222  pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
223  base1 | PCI_BASE_ADDRESS_SPACE_IO);
224 
225  dev->resource[0].start = 0;
226  dev->resource[0].end = 0;
227  dev->resource[0].flags = 0;
228 
229  dev->resource[1].start = 0;
230  dev->resource[1].end = 0;
231  dev->resource[1].flags = 0;
232  } else if (PCI_FUNC(dev->devfn) == 0) {
233  /*
234  * Setup IDE IRQ routing.
235  */
236  pci_write_config_byte(dev, 0x4b, 14);
237  pci_write_config_byte(dev, 0x4c, 15);
238 
239  /*
240  * Disable FREQACK handshake, enable USB.
241  */
242  pci_write_config_byte(dev, 0x4d, 0x41);
243 
244  /*
245  * Enable PCI retry, and PCI post-write buffer.
246  */
247  pci_write_config_byte(dev, 0x44, 0x17);
248 
249  /*
250  * Enable ISA master and DMA post write buffering.
251  */
252  pci_write_config_byte(dev, 0x45, 0x03);
253  }
254 }
256 
257 static void __devinit pci_fixup_it8152(struct pci_dev *dev)
258 {
259  int i;
260  /* fixup for ITE 8152 devices */
261  /* FIXME: add defines for class 0x68000 and 0x80103 */
262  if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST ||
263  dev->class == 0x68000 ||
264  dev->class == 0x80103) {
265  for (i = 0; i < PCI_NUM_RESOURCES; i++) {
266  dev->resource[i].start = 0;
267  dev->resource[i].end = 0;
268  dev->resource[i].flags = 0;
269  }
270  }
271 }
273 
274 /*
275  * If the bus contains any of these devices, then we must not turn on
276  * parity checking of any kind. Currently this is CyberPro 20x0 only.
277  */
278 static inline int pdev_bad_for_parity(struct pci_dev *dev)
279 {
280  return ((dev->vendor == PCI_VENDOR_ID_INTERG &&
282  dev->device == PCI_DEVICE_ID_INTERG_2010)) ||
283  (dev->vendor == PCI_VENDOR_ID_ITE &&
284  dev->device == PCI_DEVICE_ID_ITE_8152));
285 
286 }
287 
288 /*
289  * pcibios_fixup_bus - Called after each bus is probed,
290  * but before its children are examined.
291  */
292 void pcibios_fixup_bus(struct pci_bus *bus)
293 {
294  struct pci_dev *dev;
296 
297  /*
298  * Walk the devices on this bus, working out what we can
299  * and can't support.
300  */
301  list_for_each_entry(dev, &bus->devices, bus_list) {
302  u16 status;
303 
304  pci_read_config_word(dev, PCI_STATUS, &status);
305 
306  /*
307  * If any device on this bus does not support fast back
308  * to back transfers, then the bus as a whole is not able
309  * to support them. Having fast back to back transfers
310  * on saves us one PCI cycle per transaction.
311  */
312  if (!(status & PCI_STATUS_FAST_BACK))
313  features &= ~PCI_COMMAND_FAST_BACK;
314 
315  if (pdev_bad_for_parity(dev))
316  features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
317 
318  switch (dev->class >> 8) {
320  pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status);
323  pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status);
324  break;
325 
327  pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL, &status);
329  pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL, status);
330  break;
331  }
332  }
333 
334  /*
335  * Now walk the devices again, this time setting them up.
336  */
337  list_for_each_entry(dev, &bus->devices, bus_list) {
338  u16 cmd;
339 
340  pci_read_config_word(dev, PCI_COMMAND, &cmd);
341  cmd |= features;
342  pci_write_config_word(dev, PCI_COMMAND, cmd);
343 
344  pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
345  L1_CACHE_BYTES >> 2);
346  }
347 
348  /*
349  * Propagate the flags to the PCI bridge.
350  */
351  if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
352  if (features & PCI_COMMAND_FAST_BACK)
354  if (features & PCI_COMMAND_PARITY)
356  }
357 
358  /*
359  * Report what we did for this bus
360  */
361  printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
362  bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
363 }
364 #ifdef CONFIG_HOTPLUG
366 #endif
367 
368 /*
369  * Swizzle the device pin each time we cross a bridge. If a platform does
370  * not provide a swizzle function, we perform the standard PCI swizzling.
371  *
372  * The default swizzling walks up the bus tree one level at a time, applying
373  * the standard swizzle function at each step, stopping when it finds the PCI
374  * root bus. This will return the slot number of the bridge device on the
375  * root bus and the interrupt pin on that device which should correspond
376  * with the downstream device interrupt.
377  *
378  * Platforms may override this, in which case the slot and pin returned
379  * depend entirely on the platform code. However, please note that the
380  * PCI standard swizzle is implemented on plug-in cards and Cardbus based
381  * PCI extenders, so it can not be ignored.
382  */
383 static u8 __devinit pcibios_swizzle(struct pci_dev *dev, u8 *pin)
384 {
385  struct pci_sys_data *sys = dev->sysdata;
386  int slot, oldpin = *pin;
387 
388  if (sys->swizzle)
389  slot = sys->swizzle(dev, pin);
390  else
391  slot = pci_common_swizzle(dev, pin);
392 
393  if (debug_pci)
394  printk("PCI: %s swizzling pin %d => pin %d slot %d\n",
395  pci_name(dev), oldpin, *pin, slot);
396 
397  return slot;
398 }
399 
400 /*
401  * Map a slot/pin to an IRQ.
402  */
403 static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
404 {
405  struct pci_sys_data *sys = dev->sysdata;
406  int irq = -1;
407 
408  if (sys->map_irq)
409  irq = sys->map_irq(dev, slot, pin);
410 
411  if (debug_pci)
412  printk("PCI: %s mapping slot %d pin %d => irq %d\n",
413  pci_name(dev), slot, pin, irq);
414 
415  return irq;
416 }
417 
418 static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)
419 {
420  int ret;
422 
423  if (list_empty(&sys->resources)) {
425  &iomem_resource, sys->mem_offset);
426  }
427 
428  list_for_each_entry(window, &sys->resources, list) {
429  if (resource_type(window->res) == IORESOURCE_IO)
430  return 0;
431  }
432 
433  sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io;
434  sys->io_res.end = (busnr + 1) * SZ_64K - 1;
435  sys->io_res.flags = IORESOURCE_IO;
436  sys->io_res.name = sys->io_res_name;
437  sprintf(sys->io_res_name, "PCI%d I/O", busnr);
438 
439  ret = request_resource(&ioport_resource, &sys->io_res);
440  if (ret) {
441  pr_err("PCI: unable to allocate I/O port region (%d)\n", ret);
442  return ret;
443  }
445  sys->io_offset);
446 
447  return 0;
448 }
449 
450 static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
451 {
452  struct pci_sys_data *sys = NULL;
453  int ret;
454  int nr, busnr;
455 
456  for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
457  sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
458  if (!sys)
459  panic("PCI: unable to allocate sys data!");
460 
461 #ifdef CONFIG_PCI_DOMAINS
462  sys->domain = hw->domain;
463 #endif
464  sys->busnr = busnr;
465  sys->swizzle = hw->swizzle;
466  sys->map_irq = hw->map_irq;
467  INIT_LIST_HEAD(&sys->resources);
468 
469  ret = hw->setup(nr, sys);
470 
471  if (ret > 0) {
472  ret = pcibios_init_resources(nr, sys);
473  if (ret) {
474  kfree(sys);
475  break;
476  }
477 
478  if (hw->scan)
479  sys->bus = hw->scan(nr, sys);
480  else
481  sys->bus = pci_scan_root_bus(NULL, sys->busnr,
482  hw->ops, sys, &sys->resources);
483 
484  if (!sys->bus)
485  panic("PCI: unable to scan bus!");
486 
487  busnr = sys->bus->busn_res.end + 1;
488 
489  list_add(&sys->node, head);
490  } else {
491  kfree(sys);
492  if (ret < 0)
493  break;
494  }
495  }
496 }
497 
498 void __init pci_common_init(struct hw_pci *hw)
499 {
500  struct pci_sys_data *sys;
501  LIST_HEAD(head);
502 
503  pci_add_flags(PCI_REASSIGN_ALL_RSRC);
504  if (hw->preinit)
505  hw->preinit();
506  pcibios_init_hw(hw, &head);
507  if (hw->postinit)
508  hw->postinit();
509 
510  pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
511 
512  list_for_each_entry(sys, &head, node) {
513  struct pci_bus *bus = sys->bus;
514 
515  if (!pci_has_flag(PCI_PROBE_ONLY)) {
516  /*
517  * Size the bridge windows.
518  */
520 
521  /*
522  * Assign resources.
523  */
525 
526  /*
527  * Enable bridges
528  */
529  pci_enable_bridges(bus);
530  }
531 
532  /*
533  * Tell drivers about devices found.
534  */
535  pci_bus_add_devices(bus);
536  }
537 }
538 
539 #ifndef CONFIG_PCI_HOST_ITE8152
540 void pcibios_set_master(struct pci_dev *dev)
541 {
542  /* No special bus mastering setup handling */
543 }
544 #endif
545 
546 char * __init pcibios_setup(char *str)
547 {
548  if (!strcmp(str, "debug")) {
549  debug_pci = 1;
550  return NULL;
551  } else if (!strcmp(str, "firmware")) {
552  pci_add_flags(PCI_PROBE_ONLY);
553  return NULL;
554  }
555  return str;
556 }
557 
558 /*
559  * From arch/i386/kernel/pci-i386.c:
560  *
561  * We need to avoid collisions with `mirrored' VGA ports
562  * and other strange ISA hardware, so we always want the
563  * addresses to be allocated in the 0x000-0x0ff region
564  * modulo 0x400.
565  *
566  * Why? Because some silly external IO cards only decode
567  * the low 10 bits of the IO address. The 0x00-0xff region
568  * is reserved for motherboard devices that decode all 16
569  * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
570  * but we want to try to avoid allocating at 0x2900-0x2bff
571  * which might be mirrored at 0x0100-0x03ff..
572  */
575 {
576  resource_size_t start = res->start;
577 
578  if (res->flags & IORESOURCE_IO && start & 0x300)
579  start = (start + 0x3ff) & ~0x3ff;
580 
581  start = (start + align - 1) & ~(align - 1);
582 
583  return start;
584 }
585 
590 int pcibios_enable_device(struct pci_dev *dev, int mask)
591 {
592  u16 cmd, old_cmd;
593  int idx;
594  struct resource *r;
595 
596  pci_read_config_word(dev, PCI_COMMAND, &cmd);
597  old_cmd = cmd;
598  for (idx = 0; idx < 6; idx++) {
599  /* Only set up the requested stuff */
600  if (!(mask & (1 << idx)))
601  continue;
602 
603  r = dev->resource + idx;
604  if (!r->start && r->end) {
605  printk(KERN_ERR "PCI: Device %s not available because"
606  " of resource collisions\n", pci_name(dev));
607  return -EINVAL;
608  }
609  if (r->flags & IORESOURCE_IO)
610  cmd |= PCI_COMMAND_IO;
611  if (r->flags & IORESOURCE_MEM)
612  cmd |= PCI_COMMAND_MEMORY;
613  }
614 
615  /*
616  * Bridges (eg, cardbus bridges) need to be fully enabled
617  */
618  if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
620 
621  if (cmd != old_cmd) {
622  printk("PCI: enabling device %s (%04x -> %04x)\n",
623  pci_name(dev), old_cmd, cmd);
624  pci_write_config_word(dev, PCI_COMMAND, cmd);
625  }
626  return 0;
627 }
628 
629 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
630  enum pci_mmap_state mmap_state, int write_combine)
631 {
632  struct pci_sys_data *root = dev->sysdata;
633  unsigned long phys;
634 
635  if (mmap_state == pci_mmap_io) {
636  return -EINVAL;
637  } else {
638  phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT);
639  }
640 
641  /*
642  * Mark this as IO
643  */
645 
646  if (remap_pfn_range(vma, vma->vm_start, phys,
647  vma->vm_end - vma->vm_start,
648  vma->vm_page_prot))
649  return -EAGAIN;
650 
651  return 0;
652 }
653 
654 void __init pci_map_io_early(unsigned long pfn)
655 {
656  struct map_desc pci_io_desc = {
657  .virtual = PCI_IO_VIRT_BASE,
658  .type = MT_DEVICE,
659  .length = SZ_64K,
660  };
661 
662  pci_io_desc.pfn = pfn;
663  iotable_init(&pci_io_desc, 1);
664 }