Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
i386.c
Go to the documentation of this file.
1 /*
2  * Low-Level PCI Access for i386 machines
3  *
4  * Copyright 1993, 1994 Drew Eckhardt
5  * Visionary Computing
6  * (Unix and Linux consulting and custom programming)
8  * +1 (303) 786-7975
9  *
10  * Drew's work was sponsored by:
11  * iX Multiuser Multitasking Magazine
12  * Hannover, Germany
14  *
15  * Copyright 1997--2000 Martin Mares <[email protected]>
16  *
17  * For more information, please consult the following manuals (look at
18  * http://www.pcisig.com/ for how to get them):
19  *
20  * PCI BIOS Specification
21  * PCI Local Bus Specification
22  * PCI to PCI Bridge Specification
23  * PCI System Design Guide
24  *
25  */
26 
27 #include <linux/types.h>
28 #include <linux/kernel.h>
29 #include <linux/export.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/ioport.h>
33 #include <linux/errno.h>
34 #include <linux/bootmem.h>
35 
36 #include <asm/pat.h>
37 #include <asm/e820.h>
38 #include <asm/pci_x86.h>
39 #include <asm/io_apic.h>
40 
41 
42 /*
43  * This list of dynamic mappings is for temporarily maintaining
44  * original BIOS BAR addresses for possible reinstatement.
45  */
47  struct list_head list;
48  struct pci_dev *dev;
50 };
51 
52 static LIST_HEAD(pcibios_fwaddrmappings);
53 static DEFINE_SPINLOCK(pcibios_fwaddrmap_lock);
54 
55 /* Must be called with 'pcibios_fwaddrmap_lock' lock held. */
56 static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
57 {
58  struct pcibios_fwaddrmap *map;
59 
60  WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock));
61 
62  list_for_each_entry(map, &pcibios_fwaddrmappings, list)
63  if (map->dev == dev)
64  return map;
65 
66  return NULL;
67 }
68 
69 static void
70 pcibios_save_fw_addr(struct pci_dev *dev, int idx, resource_size_t fw_addr)
71 {
72  unsigned long flags;
73  struct pcibios_fwaddrmap *map;
74 
75  spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
76  map = pcibios_fwaddrmap_lookup(dev);
77  if (!map) {
78  spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
79  map = kzalloc(sizeof(*map), GFP_KERNEL);
80  if (!map)
81  return;
82 
83  map->dev = pci_dev_get(dev);
84  map->fw_addr[idx] = fw_addr;
85  INIT_LIST_HEAD(&map->list);
86 
87  spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
88  list_add_tail(&map->list, &pcibios_fwaddrmappings);
89  } else
90  map->fw_addr[idx] = fw_addr;
91  spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
92 }
93 
95 {
96  unsigned long flags;
97  struct pcibios_fwaddrmap *map;
98  resource_size_t fw_addr = 0;
99 
100  spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
101  map = pcibios_fwaddrmap_lookup(dev);
102  if (map)
103  fw_addr = map->fw_addr[idx];
104  spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
105 
106  return fw_addr;
107 }
108 
109 static void pcibios_fw_addr_list_del(void)
110 {
111  unsigned long flags;
112  struct pcibios_fwaddrmap *entry, *next;
113 
114  spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
115  list_for_each_entry_safe(entry, next, &pcibios_fwaddrmappings, list) {
116  list_del(&entry->list);
117  pci_dev_put(entry->dev);
118  kfree(entry);
119  }
120  spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
121 }
122 
123 static int
124 skip_isa_ioresource_align(struct pci_dev *dev) {
125 
127  !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
128  return 1;
129  return 0;
130 }
131 
132 /*
133  * We need to avoid collisions with `mirrored' VGA ports
134  * and other strange ISA hardware, so we always want the
135  * addresses to be allocated in the 0x000-0x0ff region
136  * modulo 0x400.
137  *
138  * Why? Because some silly external IO cards only decode
139  * the low 10 bits of the IO address. The 0x00-0xff region
140  * is reserved for motherboard devices that decode all 16
141  * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
142  * but we want to try to avoid allocating at 0x2900-0x2bff
143  * which might have be mirrored at 0x0100-0x03ff..
144  */
148 {
149  struct pci_dev *dev = data;
150  resource_size_t start = res->start;
151 
152  if (res->flags & IORESOURCE_IO) {
153  if (skip_isa_ioresource_align(dev))
154  return start;
155  if (start & 0x300)
156  start = (start + 0x3ff) & ~0x3ff;
157  }
158  return start;
159 }
161 
162 /*
163  * Handle resources of PCI devices. If the world were perfect, we could
164  * just allocate all the resource regions and do nothing more. It isn't.
165  * On the other hand, we cannot just re-allocate all devices, as it would
166  * require us to know lots of host bridge internals. So we attempt to
167  * keep as much of the original configuration as possible, but tweak it
168  * when it's found to be wrong.
169  *
170  * Known BIOS problems we have to work around:
171  * - I/O or memory regions not configured
172  * - regions configured, but not enabled in the command register
173  * - bogus I/O addresses above 64K used
174  * - expansion ROMs left enabled (this may sound harmless, but given
175  * the fact the PCI specs explicitly allow address decoders to be
176  * shared between expansion ROMs and other resource regions, it's
177  * at least dangerous)
178  * - bad resource sizes or overlaps with other regions
179  *
180  * Our solution:
181  * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
182  * This gives us fixed barriers on where we can allocate.
183  * (2) Allocate resources for all enabled devices. If there is
184  * a collision, just mark the resource as unallocated. Also
185  * disable expansion ROMs during this step.
186  * (3) Try to allocate resources for disabled devices. If the
187  * resources were assigned correctly, everything goes well,
188  * if they weren't, they won't disturb allocation of other
189  * resources.
190  * (4) Assign new addresses to resources which were either
191  * not configured at all or misconfigured. If explicitly
192  * requested by the user, configure expansion ROM address
193  * as well.
194  */
195 
197 {
198  struct pci_bus *bus;
199  struct pci_dev *dev;
200  int idx;
201  struct resource *r;
202 
203  /* Depth-First Search on bus tree */
204  list_for_each_entry(bus, bus_list, node) {
205  if ((dev = bus->self)) {
206  for (idx = PCI_BRIDGE_RESOURCES;
207  idx < PCI_NUM_RESOURCES; idx++) {
208  r = &dev->resource[idx];
209  if (!r->flags)
210  continue;
211  if (!r->start ||
212  pci_claim_resource(dev, idx) < 0) {
213  /*
214  * Something is wrong with the region.
215  * Invalidate the resource to prevent
216  * child resource allocations in this
217  * range.
218  */
219  r->start = r->end = 0;
220  r->flags = 0;
221  }
222  }
223  }
225  }
226 }
227 
229  int start;
230  int end;
231 };
232 
233 static void __init pcibios_allocate_resources(int pass)
234 {
235  struct pci_dev *dev = NULL;
236  int idx, disabled, i;
237  u16 command;
238  struct resource *r;
239 
240  struct pci_check_idx_range idx_range[] = {
242 #ifdef CONFIG_PCI_IOV
243  { PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
244 #endif
245  };
246 
247  for_each_pci_dev(dev) {
248  pci_read_config_word(dev, PCI_COMMAND, &command);
249  for (i = 0; i < ARRAY_SIZE(idx_range); i++)
250  for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
251  r = &dev->resource[idx];
252  if (r->parent) /* Already allocated */
253  continue;
254  if (!r->start) /* Address not assigned at all */
255  continue;
256  if (r->flags & IORESOURCE_IO)
257  disabled = !(command & PCI_COMMAND_IO);
258  else
259  disabled = !(command & PCI_COMMAND_MEMORY);
260  if (pass == disabled) {
261  dev_dbg(&dev->dev,
262  "BAR %d: reserving %pr (d=%d, p=%d)\n",
263  idx, r, disabled, pass);
264  if (pci_claim_resource(dev, idx) < 0) {
265  /* We'll assign a new address later */
266  pcibios_save_fw_addr(dev,
267  idx, r->start);
268  r->end -= r->start;
269  r->start = 0;
270  }
271  }
272  }
273  if (!pass) {
274  r = &dev->resource[PCI_ROM_RESOURCE];
275  if (r->flags & IORESOURCE_ROM_ENABLE) {
276  /* Turn the ROM off, leave the resource region,
277  * but keep it unregistered. */
278  u32 reg;
279  dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
281  pci_read_config_dword(dev,
282  dev->rom_base_reg, &reg);
283  pci_write_config_dword(dev, dev->rom_base_reg,
284  reg & ~PCI_ROM_ADDRESS_ENABLE);
285  }
286  }
287  }
288 }
289 
290 static int __init pcibios_assign_resources(void)
291 {
292  struct pci_dev *dev = NULL;
293  struct resource *r;
294 
295  if (!(pci_probe & PCI_ASSIGN_ROMS)) {
296  /*
297  * Try to use BIOS settings for ROMs, otherwise let
298  * pci_assign_unassigned_resources() allocate the new
299  * addresses.
300  */
301  for_each_pci_dev(dev) {
302  r = &dev->resource[PCI_ROM_RESOURCE];
303  if (!r->flags || !r->start)
304  continue;
305  if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
306  r->end -= r->start;
307  r->start = 0;
308  }
309  }
310  }
311 
313  pcibios_fw_addr_list_del();
314 
315  return 0;
316 }
317 
319 {
320  DBG("PCI: Allocating resources\n");
321  pcibios_allocate_bus_resources(&pci_root_buses);
322  pcibios_allocate_resources(0);
323  pcibios_allocate_resources(1);
324 
326  /*
327  * Insert the IO APIC resources after PCI initialization has
328  * occurred to handle IO APICS that are mapped in on a BAR in
329  * PCI space, but before trying to assign unassigned pci res.
330  */
332 }
333 
338 fs_initcall(pcibios_assign_resources);
339 
340 static const struct vm_operations_struct pci_mmap_ops = {
341  .access = generic_access_phys,
342 };
343 
344 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
345  enum pci_mmap_state mmap_state, int write_combine)
346 {
347  unsigned long prot;
348 
349  /* I/O space cannot be accessed via normal processor loads and
350  * stores on this platform.
351  */
352  if (mmap_state == pci_mmap_io)
353  return -EINVAL;
354 
355  prot = pgprot_val(vma->vm_page_prot);
356 
357  /*
358  * Return error if pat is not enabled and write_combine is requested.
359  * Caller can followup with UC MINUS request and add a WC mtrr if there
360  * is a free mtrr slot.
361  */
362  if (!pat_enabled && write_combine)
363  return -EINVAL;
364 
365  if (pat_enabled && write_combine)
366  prot |= _PAGE_CACHE_WC;
367  else if (pat_enabled || boot_cpu_data.x86 > 3)
368  /*
369  * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
370  * To avoid attribute conflicts, request UC MINUS here
371  * as well.
372  */
373  prot |= _PAGE_CACHE_UC_MINUS;
374 
375  prot |= _PAGE_IOMAP; /* creating a mapping for IO */
376 
377  vma->vm_page_prot = __pgprot(prot);
378 
379  if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
380  vma->vm_end - vma->vm_start,
381  vma->vm_page_prot))
382  return -EAGAIN;
383 
384  vma->vm_ops = &pci_mmap_ops;
385 
386  return 0;
387 }