Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
amd_gart_64.c
Go to the documentation of this file.
1 /*
2  * Dynamic DMA mapping support for AMD Hammer.
3  *
4  * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5  * This allows to use PCI devices that only support 32bit addresses on systems
6  * with more than 4GB.
7  *
8  * See Documentation/DMA-API-HOWTO.txt for the interface specification.
9  *
10  * Copyright 2002 Andi Kleen, SuSE Labs.
11  * Subject to the GNU General Public License v2 only.
12  */
13 
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
18 #include <linux/mm.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/pci.h>
23 #include <linux/module.h>
24 #include <linux/topology.h>
25 #include <linux/interrupt.h>
26 #include <linux/bitmap.h>
27 #include <linux/kdebug.h>
28 #include <linux/scatterlist.h>
29 #include <linux/iommu-helper.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/io.h>
32 #include <linux/gfp.h>
33 #include <linux/atomic.h>
34 #include <asm/mtrr.h>
35 #include <asm/pgtable.h>
36 #include <asm/proto.h>
37 #include <asm/iommu.h>
38 #include <asm/gart.h>
39 #include <asm/cacheflush.h>
40 #include <asm/swiotlb.h>
41 #include <asm/dma.h>
42 #include <asm/amd_nb.h>
43 #include <asm/x86_init.h>
44 #include <asm/iommu_table.h>
45 
46 static unsigned long iommu_bus_base; /* GART remapping area (physical) */
47 static unsigned long iommu_size; /* size of remapping area bytes */
48 static unsigned long iommu_pages; /* .. and in pages */
49 
50 static u32 *iommu_gatt_base; /* Remapping table */
51 
52 static dma_addr_t bad_dma_addr;
53 
54 /*
55  * If this is disabled the IOMMU will use an optimized flushing strategy
56  * of only flushing when an mapping is reused. With it true the GART is
57  * flushed for every mapping. Problem is that doing the lazy flush seems
58  * to trigger bugs with some popular PCI cards, in particular 3ware (but
59  * has been also also seen with Qlogic at least).
60  */
61 static int iommu_fullflush = 1;
62 
63 /* Allocation bitmap for the remapping area: */
64 static DEFINE_SPINLOCK(iommu_bitmap_lock);
65 /* Guarded by iommu_bitmap_lock: */
66 static unsigned long *iommu_gart_bitmap;
67 
68 static u32 gart_unmapped_entry;
69 
70 #define GPTE_VALID 1
71 #define GPTE_COHERENT 2
72 #define GPTE_ENCODE(x) \
73  (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
74 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
75 
76 #define EMERGENCY_PAGES 32 /* = 128KB */
77 
78 #ifdef CONFIG_AGP
79 #define AGPEXTERN extern
80 #else
81 #define AGPEXTERN
82 #endif
83 
84 /* GART can only remap to physical addresses < 1TB */
85 #define GART_MAX_PHYS_ADDR (1ULL << 40)
86 
87 /* backdoor interface to AGP driver */
90 
91 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
92 static bool need_flush; /* global flush state. set for each gart wrap */
93 
94 static unsigned long alloc_iommu(struct device *dev, int size,
95  unsigned long align_mask)
96 {
97  unsigned long offset, flags;
98  unsigned long boundary_size;
99  unsigned long base_index;
100 
101  base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
102  PAGE_SIZE) >> PAGE_SHIFT;
103  boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
104  PAGE_SIZE) >> PAGE_SHIFT;
105 
106  spin_lock_irqsave(&iommu_bitmap_lock, flags);
107  offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
108  size, base_index, boundary_size, align_mask);
109  if (offset == -1) {
110  need_flush = true;
111  offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
112  size, base_index, boundary_size,
113  align_mask);
114  }
115  if (offset != -1) {
116  next_bit = offset+size;
117  if (next_bit >= iommu_pages) {
118  next_bit = 0;
119  need_flush = true;
120  }
121  }
122  if (iommu_fullflush)
123  need_flush = true;
124  spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
125 
126  return offset;
127 }
128 
129 static void free_iommu(unsigned long offset, int size)
130 {
131  unsigned long flags;
132 
133  spin_lock_irqsave(&iommu_bitmap_lock, flags);
134  bitmap_clear(iommu_gart_bitmap, offset, size);
135  if (offset >= next_bit)
136  next_bit = offset + size;
137  spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
138 }
139 
140 /*
141  * Use global flush state to avoid races with multiple flushers.
142  */
143 static void flush_gart(void)
144 {
145  unsigned long flags;
146 
147  spin_lock_irqsave(&iommu_bitmap_lock, flags);
148  if (need_flush) {
149  amd_flush_garts();
150  need_flush = false;
151  }
152  spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
153 }
154 
155 #ifdef CONFIG_IOMMU_LEAK
156 /* Debugging aid for drivers that don't free their IOMMU tables */
157 static int leak_trace;
158 static int iommu_leak_pages = 20;
159 
160 static void dump_leak(void)
161 {
162  static int dump;
163 
164  if (dump)
165  return;
166  dump = 1;
167 
168  show_stack(NULL, NULL);
170 }
171 #endif
172 
173 static void iommu_full(struct device *dev, size_t size, int dir)
174 {
175  /*
176  * Ran out of IOMMU space for this operation. This is very bad.
177  * Unfortunately the drivers cannot handle this operation properly.
178  * Return some non mapped prereserved space in the aperture and
179  * let the Northbridge deal with it. This will result in garbage
180  * in the IO operation. When the size exceeds the prereserved space
181  * memory corruption will occur or random memory will be DMAed
182  * out. Hopefully no network devices use single mappings that big.
183  */
184 
185  dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
186 
187  if (size > PAGE_SIZE*EMERGENCY_PAGES) {
188  if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
189  panic("PCI-DMA: Memory would be corrupted\n");
190  if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
192  "PCI-DMA: Random memory would be DMAed\n");
193  }
194 #ifdef CONFIG_IOMMU_LEAK
195  dump_leak();
196 #endif
197 }
198 
199 static inline int
200 need_iommu(struct device *dev, unsigned long addr, size_t size)
201 {
202  return force_iommu || !dma_capable(dev, addr, size);
203 }
204 
205 static inline int
206 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
207 {
208  return !dma_capable(dev, addr, size);
209 }
210 
211 /* Map a single continuous physical area into the IOMMU.
212  * Caller needs to check if the iommu is needed and flush.
213  */
214 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
215  size_t size, int dir, unsigned long align_mask)
216 {
217  unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
218  unsigned long iommu_page;
219  int i;
220 
221  if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
222  return bad_dma_addr;
223 
224  iommu_page = alloc_iommu(dev, npages, align_mask);
225  if (iommu_page == -1) {
226  if (!nonforced_iommu(dev, phys_mem, size))
227  return phys_mem;
228  if (panic_on_overflow)
229  panic("dma_map_area overflow %lu bytes\n", size);
230  iommu_full(dev, size, dir);
231  return bad_dma_addr;
232  }
233 
234  for (i = 0; i < npages; i++) {
235  iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
236  phys_mem += PAGE_SIZE;
237  }
238  return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
239 }
240 
241 /* Map a single area into the IOMMU */
242 static dma_addr_t gart_map_page(struct device *dev, struct page *page,
243  unsigned long offset, size_t size,
244  enum dma_data_direction dir,
245  struct dma_attrs *attrs)
246 {
247  unsigned long bus;
249 
250  if (!dev)
251  dev = &x86_dma_fallback_dev;
252 
253  if (!need_iommu(dev, paddr, size))
254  return paddr;
255 
256  bus = dma_map_area(dev, paddr, size, dir, 0);
257  flush_gart();
258 
259  return bus;
260 }
261 
262 /*
263  * Free a DMA mapping.
264  */
265 static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
266  size_t size, enum dma_data_direction dir,
267  struct dma_attrs *attrs)
268 {
269  unsigned long iommu_page;
270  int npages;
271  int i;
272 
273  if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
274  dma_addr >= iommu_bus_base + iommu_size)
275  return;
276 
277  iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
278  npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
279  for (i = 0; i < npages; i++) {
280  iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
281  }
282  free_iommu(iommu_page, npages);
283 }
284 
285 /*
286  * Wrapper for pci_unmap_single working with scatterlists.
287  */
288 static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
289  enum dma_data_direction dir, struct dma_attrs *attrs)
290 {
291  struct scatterlist *s;
292  int i;
293 
294  for_each_sg(sg, s, nents, i) {
295  if (!s->dma_length || !s->length)
296  break;
297  gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
298  }
299 }
300 
301 /* Fallback for dma_map_sg in case of overflow */
302 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
303  int nents, int dir)
304 {
305  struct scatterlist *s;
306  int i;
307 
308 #ifdef CONFIG_IOMMU_DEBUG
309  pr_debug("dma_map_sg overflow\n");
310 #endif
311 
312  for_each_sg(sg, s, nents, i) {
313  unsigned long addr = sg_phys(s);
314 
315  if (nonforced_iommu(dev, addr, s->length)) {
316  addr = dma_map_area(dev, addr, s->length, dir, 0);
317  if (addr == bad_dma_addr) {
318  if (i > 0)
319  gart_unmap_sg(dev, sg, i, dir, NULL);
320  nents = 0;
321  sg[0].dma_length = 0;
322  break;
323  }
324  }
325  s->dma_address = addr;
326  s->dma_length = s->length;
327  }
328  flush_gart();
329 
330  return nents;
331 }
332 
333 /* Map multiple scatterlist entries continuous into the first. */
334 static int __dma_map_cont(struct device *dev, struct scatterlist *start,
335  int nelems, struct scatterlist *sout,
336  unsigned long pages)
337 {
338  unsigned long iommu_start = alloc_iommu(dev, pages, 0);
339  unsigned long iommu_page = iommu_start;
340  struct scatterlist *s;
341  int i;
342 
343  if (iommu_start == -1)
344  return -1;
345 
346  for_each_sg(start, s, nelems, i) {
347  unsigned long pages, addr;
348  unsigned long phys_addr = s->dma_address;
349 
350  BUG_ON(s != start && s->offset);
351  if (s == start) {
352  sout->dma_address = iommu_bus_base;
353  sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
354  sout->dma_length = s->length;
355  } else {
356  sout->dma_length += s->length;
357  }
358 
359  addr = phys_addr;
360  pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
361  while (pages--) {
362  iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
363  addr += PAGE_SIZE;
364  iommu_page++;
365  }
366  }
367  BUG_ON(iommu_page - iommu_start != pages);
368 
369  return 0;
370 }
371 
372 static inline int
373 dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
374  struct scatterlist *sout, unsigned long pages, int need)
375 {
376  if (!need) {
377  BUG_ON(nelems != 1);
378  sout->dma_address = start->dma_address;
379  sout->dma_length = start->length;
380  return 0;
381  }
382  return __dma_map_cont(dev, start, nelems, sout, pages);
383 }
384 
385 /*
386  * DMA map all entries in a scatterlist.
387  * Merge chunks that have page aligned sizes into a continuous mapping.
388  */
389 static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
390  enum dma_data_direction dir, struct dma_attrs *attrs)
391 {
392  struct scatterlist *s, *ps, *start_sg, *sgmap;
393  int need = 0, nextneed, i, out, start;
394  unsigned long pages = 0;
395  unsigned int seg_size;
396  unsigned int max_seg_size;
397 
398  if (nents == 0)
399  return 0;
400 
401  if (!dev)
402  dev = &x86_dma_fallback_dev;
403 
404  out = 0;
405  start = 0;
406  start_sg = sg;
407  sgmap = sg;
408  seg_size = 0;
409  max_seg_size = dma_get_max_seg_size(dev);
410  ps = NULL; /* shut up gcc */
411 
412  for_each_sg(sg, s, nents, i) {
413  dma_addr_t addr = sg_phys(s);
414 
415  s->dma_address = addr;
416  BUG_ON(s->length == 0);
417 
418  nextneed = need_iommu(dev, addr, s->length);
419 
420  /* Handle the previous not yet processed entries */
421  if (i > start) {
422  /*
423  * Can only merge when the last chunk ends on a
424  * page boundary and the new one doesn't have an
425  * offset.
426  */
427  if (!iommu_merge || !nextneed || !need || s->offset ||
428  (s->length + seg_size > max_seg_size) ||
429  (ps->offset + ps->length) % PAGE_SIZE) {
430  if (dma_map_cont(dev, start_sg, i - start,
431  sgmap, pages, need) < 0)
432  goto error;
433  out++;
434 
435  seg_size = 0;
436  sgmap = sg_next(sgmap);
437  pages = 0;
438  start = i;
439  start_sg = s;
440  }
441  }
442 
443  seg_size += s->length;
444  need = nextneed;
445  pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
446  ps = s;
447  }
448  if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
449  goto error;
450  out++;
451  flush_gart();
452  if (out < nents) {
453  sgmap = sg_next(sgmap);
454  sgmap->dma_length = 0;
455  }
456  return out;
457 
458 error:
459  flush_gart();
460  gart_unmap_sg(dev, sg, out, dir, NULL);
461 
462  /* When it was forced or merged try again in a dumb way */
463  if (force_iommu || iommu_merge) {
464  out = dma_map_sg_nonforce(dev, sg, nents, dir);
465  if (out > 0)
466  return out;
467  }
468  if (panic_on_overflow)
469  panic("dma_map_sg: overflow on %lu pages\n", pages);
470 
471  iommu_full(dev, pages << PAGE_SHIFT, dir);
472  for_each_sg(sg, s, nents, i)
473  s->dma_address = bad_dma_addr;
474  return 0;
475 }
476 
477 /* allocate and map a coherent mapping */
478 static void *
479 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
480  gfp_t flag, struct dma_attrs *attrs)
481 {
483  unsigned long align_mask;
484  struct page *page;
485 
486  if (force_iommu && !(flag & GFP_DMA)) {
487  flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
488  page = alloc_pages(flag | __GFP_ZERO, get_order(size));
489  if (!page)
490  return NULL;
491 
492  align_mask = (1UL << get_order(size)) - 1;
493  paddr = dma_map_area(dev, page_to_phys(page), size,
494  DMA_BIDIRECTIONAL, align_mask);
495 
496  flush_gart();
497  if (paddr != bad_dma_addr) {
498  *dma_addr = paddr;
499  return page_address(page);
500  }
501  __free_pages(page, get_order(size));
502  } else
503  return dma_generic_alloc_coherent(dev, size, dma_addr, flag,
504  attrs);
505 
506  return NULL;
507 }
508 
509 /* free a coherent mapping */
510 static void
511 gart_free_coherent(struct device *dev, size_t size, void *vaddr,
512  dma_addr_t dma_addr, struct dma_attrs *attrs)
513 {
514  gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
515  free_pages((unsigned long)vaddr, get_order(size));
516 }
517 
518 static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
519 {
520  return (dma_addr == bad_dma_addr);
521 }
522 
523 static int no_agp;
524 
525 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
526 {
527  unsigned long a;
528 
529  if (!iommu_size) {
530  iommu_size = aper_size;
531  if (!no_agp)
532  iommu_size /= 2;
533  }
534 
535  a = aper + iommu_size;
536  iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
537 
538  if (iommu_size < 64*1024*1024) {
539  pr_warning(
540  "PCI-DMA: Warning: Small IOMMU %luMB."
541  " Consider increasing the AGP aperture in BIOS\n",
542  iommu_size >> 20);
543  }
544 
545  return iommu_size;
546 }
547 
548 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
549 {
550  unsigned aper_size = 0, aper_base_32, aper_order;
551  u64 aper_base;
552 
553  pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
554  pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
555  aper_order = (aper_order >> 1) & 7;
556 
557  aper_base = aper_base_32 & 0x7fff;
558  aper_base <<= 25;
559 
560  aper_size = (32 * 1024 * 1024) << aper_order;
561  if (aper_base + aper_size > 0x100000000UL || !aper_size)
562  aper_base = 0;
563 
564  *size = aper_size;
565  return aper_base;
566 }
567 
568 static void enable_gart_translations(void)
569 {
570  int i;
571 
573  return;
574 
575  for (i = 0; i < amd_nb_num(); i++) {
576  struct pci_dev *dev = node_to_amd_nb(i)->misc;
577 
578  enable_gart_translation(dev, __pa(agp_gatt_table));
579  }
580 
581  /* Flush the GART-TLB to remove stale entries */
582  amd_flush_garts();
583 }
584 
585 /*
586  * If fix_up_north_bridges is set, the north bridges have to be fixed up on
587  * resume in the same way as they are handled in gart_iommu_hole_init().
588  */
589 static bool fix_up_north_bridges;
590 static u32 aperture_order;
591 static u32 aperture_alloc;
592 
593 void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
594 {
595  fix_up_north_bridges = true;
596  aperture_order = aper_order;
597  aperture_alloc = aper_alloc;
598 }
599 
600 static void gart_fixup_northbridges(void)
601 {
602  int i;
603 
604  if (!fix_up_north_bridges)
605  return;
606 
608  return;
609 
610  pr_info("PCI-DMA: Restoring GART aperture settings\n");
611 
612  for (i = 0; i < amd_nb_num(); i++) {
613  struct pci_dev *dev = node_to_amd_nb(i)->misc;
614 
615  /*
616  * Don't enable translations just yet. That is the next
617  * step. Restore the pre-suspend aperture settings.
618  */
619  gart_set_size_and_enable(dev, aperture_order);
620  pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
621  }
622 }
623 
624 static void gart_resume(void)
625 {
626  pr_info("PCI-DMA: Resuming GART IOMMU\n");
627 
628  gart_fixup_northbridges();
629 
630  enable_gart_translations();
631 }
632 
633 static struct syscore_ops gart_syscore_ops = {
634  .resume = gart_resume,
635 
636 };
637 
638 /*
639  * Private Northbridge GATT initialization in case we cannot use the
640  * AGP driver for some reason.
641  */
642 static __init int init_amd_gatt(struct agp_kern_info *info)
643 {
644  unsigned aper_size, gatt_size, new_aper_size;
645  unsigned aper_base, new_aper_base;
646  struct pci_dev *dev;
647  void *gatt;
648  int i;
649 
650  pr_info("PCI-DMA: Disabling AGP.\n");
651 
652  aper_size = aper_base = info->aper_size = 0;
653  dev = NULL;
654  for (i = 0; i < amd_nb_num(); i++) {
655  dev = node_to_amd_nb(i)->misc;
656  new_aper_base = read_aperture(dev, &new_aper_size);
657  if (!new_aper_base)
658  goto nommu;
659 
660  if (!aper_base) {
661  aper_size = new_aper_size;
662  aper_base = new_aper_base;
663  }
664  if (aper_size != new_aper_size || aper_base != new_aper_base)
665  goto nommu;
666  }
667  if (!aper_base)
668  goto nommu;
669 
670  info->aper_base = aper_base;
671  info->aper_size = aper_size >> 20;
672 
673  gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
674  gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
675  get_order(gatt_size));
676  if (!gatt)
677  panic("Cannot allocate GATT table");
678  if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
679  panic("Could not set GART PTEs to uncacheable pages");
680 
682 
683  register_syscore_ops(&gart_syscore_ops);
684 
685  flush_gart();
686 
687  pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
688  aper_base, aper_size>>10);
689 
690  return 0;
691 
692  nommu:
693  /* Should not happen anymore */
694  pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
695  "falling back to iommu=soft.\n");
696  return -1;
697 }
698 
699 static struct dma_map_ops gart_dma_ops = {
700  .map_sg = gart_map_sg,
701  .unmap_sg = gart_unmap_sg,
702  .map_page = gart_map_page,
703  .unmap_page = gart_unmap_page,
704  .alloc = gart_alloc_coherent,
705  .free = gart_free_coherent,
706  .mapping_error = gart_mapping_error,
707 };
708 
709 static void gart_iommu_shutdown(void)
710 {
711  struct pci_dev *dev;
712  int i;
713 
714  /* don't shutdown it if there is AGP installed */
715  if (!no_agp)
716  return;
717 
719  return;
720 
721  for (i = 0; i < amd_nb_num(); i++) {
722  u32 ctl;
723 
724  dev = node_to_amd_nb(i)->misc;
725  pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
726 
727  ctl &= ~GARTEN;
728 
729  pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
730  }
731 }
732 
734 {
735  struct agp_kern_info info;
736  unsigned long iommu_start;
737  unsigned long aper_base, aper_size;
738  unsigned long start_pfn, end_pfn;
739  unsigned long scratch;
740  long i;
741 
743  return 0;
744 
745 #ifndef CONFIG_AGP_AMD64
746  no_agp = 1;
747 #else
748  /* Makefile puts PCI initialization via subsys_initcall first. */
749  /* Add other AMD AGP bridge drivers here */
750  no_agp = no_agp ||
751  (agp_amd64_init() < 0) ||
752  (agp_copy_info(agp_bridge, &info) < 0);
753 #endif
754 
755  if (no_iommu ||
756  (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
758  (no_agp && init_amd_gatt(&info) < 0)) {
759  if (max_pfn > MAX_DMA32_PFN) {
760  pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
761  pr_warning("falling back to iommu=soft.\n");
762  }
763  return 0;
764  }
765 
766  /* need to map that range */
767  aper_size = info.aper_size << 20;
768  aper_base = info.aper_base;
769  end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
770 
771  if (end_pfn > max_low_pfn_mapped) {
772  start_pfn = (aper_base>>PAGE_SHIFT);
773  init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
774  }
775 
776  pr_info("PCI-DMA: using GART IOMMU.\n");
777  iommu_size = check_iommu_size(info.aper_base, aper_size);
778  iommu_pages = iommu_size >> PAGE_SHIFT;
779 
780  iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
781  get_order(iommu_pages/8));
782  if (!iommu_gart_bitmap)
783  panic("Cannot allocate iommu bitmap\n");
784 
785 #ifdef CONFIG_IOMMU_LEAK
786  if (leak_trace) {
787  int ret;
788 
789  ret = dma_debug_resize_entries(iommu_pages);
790  if (ret)
791  pr_debug("PCI-DMA: Cannot trace all the entries\n");
792  }
793 #endif
794 
795  /*
796  * Out of IOMMU space handling.
797  * Reserve some invalid pages at the beginning of the GART.
798  */
799  bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
800 
801  pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
802  iommu_size >> 20);
803 
804  agp_memory_reserved = iommu_size;
805  iommu_start = aper_size - iommu_size;
806  iommu_bus_base = info.aper_base + iommu_start;
807  bad_dma_addr = iommu_bus_base;
808  iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
809 
810  /*
811  * Unmap the IOMMU part of the GART. The alias of the page is
812  * always mapped with cache enabled and there is no full cache
813  * coherency across the GART remapping. The unmapping avoids
814  * automatic prefetches from the CPU allocating cache lines in
815  * there. All CPU accesses are done via the direct mapping to
816  * the backing memory. The GART address is only used by PCI
817  * devices.
818  */
819  set_memory_np((unsigned long)__va(iommu_bus_base),
820  iommu_size >> PAGE_SHIFT);
821  /*
822  * Tricky. The GART table remaps the physical memory range,
823  * so the CPU wont notice potential aliases and if the memory
824  * is remapped to UC later on, we might surprise the PCI devices
825  * with a stray writeout of a cacheline. So play it sure and
826  * do an explicit, full-scale wbinvd() _after_ having marked all
827  * the pages as Not-Present:
828  */
829  wbinvd();
830 
831  /*
832  * Now all caches are flushed and we can safely enable
833  * GART hardware. Doing it early leaves the possibility
834  * of stale cache entries that can lead to GART PTE
835  * errors.
836  */
837  enable_gart_translations();
838 
839  /*
840  * Try to workaround a bug (thanks to BenH):
841  * Set unmapped entries to a scratch page instead of 0.
842  * Any prefetches that hit unmapped entries won't get an bus abort
843  * then. (P2P bridge may be prefetching on DMA reads).
844  */
845  scratch = get_zeroed_page(GFP_KERNEL);
846  if (!scratch)
847  panic("Cannot allocate iommu scratch page");
848  gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
849  for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
850  iommu_gatt_base[i] = gart_unmapped_entry;
851 
852  flush_gart();
853  dma_ops = &gart_dma_ops;
854  x86_platform.iommu_shutdown = gart_iommu_shutdown;
855  swiotlb = 0;
856 
857  return 0;
858 }
859 
861 {
862  int arg;
863 
864 #ifdef CONFIG_IOMMU_LEAK
865  if (!strncmp(p, "leak", 4)) {
866  leak_trace = 1;
867  p += 4;
868  if (*p == '=')
869  ++p;
870  if (isdigit(*p) && get_option(&p, &arg))
871  iommu_leak_pages = arg;
872  }
873 #endif
874  if (isdigit(*p) && get_option(&p, &arg))
875  iommu_size = arg;
876  if (!strncmp(p, "fullflush", 9))
877  iommu_fullflush = 1;
878  if (!strncmp(p, "nofullflush", 11))
879  iommu_fullflush = 0;
880  if (!strncmp(p, "noagp", 5))
881  no_agp = 1;
882  if (!strncmp(p, "noaperture", 10))
883  fix_aperture = 0;
884  /* duplicated from pci-dma.c */
885  if (!strncmp(p, "force", 5))
887  if (!strncmp(p, "allowed", 7))
889  if (!strncmp(p, "memaper", 7)) {
891  p += 7;
892  if (*p == '=') {
893  ++p;
894  if (get_option(&p, &arg))
896  }
897  }
898 }