Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
intel-gtt.c
Go to the documentation of this file.
1 /*
2  * Intel GTT (Graphics Translation Table) routines
3  *
4  * Caveat: This driver implements the linux agp interface, but this is far from
5  * a agp driver! GTT support ended up here for purely historical reasons: The
6  * old userspace intel graphics drivers needed an interface to map memory into
7  * the GTT. And the drm provides a default interface for graphic devices sitting
8  * on an agp port. So it made sense to fake the GTT support as an agp port to
9  * avoid having to create a new api.
10  *
11  * With gem this does not make much sense anymore, just needlessly complicates
12  * the code. But as long as the old graphics stack is still support, it's stuck
13  * here.
14  *
15  * /fairy-tale-mode off
16  */
17 
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/pagemap.h>
23 #include <linux/agp_backend.h>
24 #include <linux/delay.h>
25 #include <asm/smp.h>
26 #include "agp.h"
27 #include "intel-agp.h"
28 #include <drm/intel-gtt.h>
29 
30 /*
31  * If we have Intel graphics, we're not going to have anything other than
32  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
33  * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
34  * Only newer chipsets need to bother with this, of course.
35  */
36 #ifdef CONFIG_INTEL_IOMMU
37 #define USE_PCI_DMA_API 1
38 #else
39 #define USE_PCI_DMA_API 0
40 #endif
41 
43  unsigned int gen : 8;
44  unsigned int is_g33 : 1;
45  unsigned int is_pineview : 1;
46  unsigned int is_ironlake : 1;
47  unsigned int has_pgtbl_enable : 1;
48  unsigned int dma_mask_size : 8;
49  /* Chipset specific GTT setup */
50  int (*setup)(void);
51  /* This should undo anything done in ->setup() save the unmapping
52  * of the mmio register file, that's done in the generic code. */
54  void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
55  /* Flags is a more or less chipset specific opaque value.
56  * For chipsets that need to support old ums (non-gem) code, this
57  * needs to be identical to the various supported agp memory types! */
58  bool (*check_flags)(unsigned int flags);
60 };
61 
62 static struct _intel_private {
63  struct intel_gtt base;
64  const struct intel_gtt_driver *driver;
65  struct pci_dev *pcidev; /* device one */
66  struct pci_dev *bridge_dev;
68  phys_addr_t gtt_bus_addr;
69  u32 PGETBL_save;
70  u32 __iomem *gtt; /* I915G */
71  bool clear_fake_agp; /* on first access via agp, fill with scratch */
72  int num_dcache_entries;
73  void __iomem *i9xx_flush_page;
74  char *i81x_gtt_table;
75  struct resource ifp_resource;
76  int resource_valid;
77  struct page *scratch_page;
78  int refcount;
79 } intel_private;
80 
81 #define INTEL_GTT_GEN intel_private.driver->gen
82 #define IS_G33 intel_private.driver->is_g33
83 #define IS_PINEVIEW intel_private.driver->is_pineview
84 #define IS_IRONLAKE intel_private.driver->is_ironlake
85 #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
86 
87 static int intel_gtt_map_memory(struct page **pages,
88  unsigned int num_entries,
89  struct sg_table *st)
90 {
91  struct scatterlist *sg;
92  int i;
93 
94  DBG("try mapping %lu pages\n", (unsigned long)num_entries);
95 
96  if (sg_alloc_table(st, num_entries, GFP_KERNEL))
97  goto err;
98 
99  for_each_sg(st->sgl, sg, num_entries, i)
100  sg_set_page(sg, pages[i], PAGE_SIZE, 0);
101 
102  if (!pci_map_sg(intel_private.pcidev,
103  st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
104  goto err;
105 
106  return 0;
107 
108 err:
109  sg_free_table(st);
110  return -ENOMEM;
111 }
112 
113 static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
114 {
115  struct sg_table st;
116  DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
117 
118  pci_unmap_sg(intel_private.pcidev, sg_list,
119  num_sg, PCI_DMA_BIDIRECTIONAL);
120 
121  st.sgl = sg_list;
122  st.orig_nents = st.nents = num_sg;
123 
124  sg_free_table(&st);
125 }
126 
127 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
128 {
129  return;
130 }
131 
132 /* Exists to support ARGB cursors */
133 static struct page *i8xx_alloc_pages(void)
134 {
135  struct page *page;
136 
137  page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
138  if (page == NULL)
139  return NULL;
140 
141  if (set_pages_uc(page, 4) < 0) {
142  set_pages_wb(page, 4);
143  __free_pages(page, 2);
144  return NULL;
145  }
146  get_page(page);
147  atomic_inc(&agp_bridge->current_memory_agp);
148  return page;
149 }
150 
151 static void i8xx_destroy_pages(struct page *page)
152 {
153  if (page == NULL)
154  return;
155 
156  set_pages_wb(page, 4);
157  put_page(page);
158  __free_pages(page, 2);
159  atomic_dec(&agp_bridge->current_memory_agp);
160 }
161 
162 #define I810_GTT_ORDER 4
163 static int i810_setup(void)
164 {
165  u32 reg_addr;
166  char *gtt_table;
167 
168  /* i81x does not preallocate the gtt. It's always 64kb in size. */
169  gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
170  if (gtt_table == NULL)
171  return -ENOMEM;
172  intel_private.i81x_gtt_table = gtt_table;
173 
174  pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
175  reg_addr &= 0xfff80000;
176 
177  intel_private.registers = ioremap(reg_addr, KB(64));
178  if (!intel_private.registers)
179  return -ENOMEM;
180 
182  intel_private.registers+I810_PGETBL_CTL);
183 
184  intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
185 
186  if ((readl(intel_private.registers+I810_DRAM_CTL)
188  dev_info(&intel_private.pcidev->dev,
189  "detected 4MB dedicated video ram\n");
190  intel_private.num_dcache_entries = 1024;
191  }
192 
193  return 0;
194 }
195 
196 static void i810_cleanup(void)
197 {
198  writel(0, intel_private.registers+I810_PGETBL_CTL);
199  free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
200 }
201 
202 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
203  int type)
204 {
205  int i;
206 
207  if ((pg_start + mem->page_count)
208  > intel_private.num_dcache_entries)
209  return -EINVAL;
210 
211  if (!mem->is_flushed)
213 
214  for (i = pg_start; i < (pg_start + mem->page_count); i++) {
215  dma_addr_t addr = i << PAGE_SHIFT;
216  intel_private.driver->write_entry(addr,
217  i, type);
218  }
219  readl(intel_private.gtt+i-1);
220 
221  return 0;
222 }
223 
224 /*
225  * The i810/i830 requires a physical address to program its mouse
226  * pointer into hardware.
227  * However the Xserver still writes to it through the agp aperture.
228  */
229 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
230 {
231  struct agp_memory *new;
232  struct page *page;
233 
234  switch (pg_count) {
235  case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
236  break;
237  case 4:
238  /* kludge to get 4 physical pages for ARGB cursor */
239  page = i8xx_alloc_pages();
240  break;
241  default:
242  return NULL;
243  }
244 
245  if (page == NULL)
246  return NULL;
247 
248  new = agp_create_memory(pg_count);
249  if (new == NULL)
250  return NULL;
251 
252  new->pages[0] = page;
253  if (pg_count == 4) {
254  /* kludge to get 4 physical pages for ARGB cursor */
255  new->pages[1] = new->pages[0] + 1;
256  new->pages[2] = new->pages[1] + 1;
257  new->pages[3] = new->pages[2] + 1;
258  }
259  new->page_count = pg_count;
260  new->num_scratch_pages = pg_count;
261  new->type = AGP_PHYS_MEMORY;
262  new->physical = page_to_phys(new->pages[0]);
263  return new;
264 }
265 
266 static void intel_i810_free_by_type(struct agp_memory *curr)
267 {
268  agp_free_key(curr->key);
269  if (curr->type == AGP_PHYS_MEMORY) {
270  if (curr->page_count == 4)
271  i8xx_destroy_pages(curr->pages[0]);
272  else {
273  agp_bridge->driver->agp_destroy_page(curr->pages[0],
275  agp_bridge->driver->agp_destroy_page(curr->pages[0],
277  }
278  agp_free_page_array(curr);
279  }
280  kfree(curr);
281 }
282 
283 static int intel_gtt_setup_scratch_page(void)
284 {
285  struct page *page;
287 
289  if (page == NULL)
290  return -ENOMEM;
291  get_page(page);
292  set_pages_uc(page, 1);
293 
294  if (intel_private.base.needs_dmar) {
295  dma_addr = pci_map_page(intel_private.pcidev, page, 0,
297  if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
298  return -EINVAL;
299 
300  intel_private.base.scratch_page_dma = dma_addr;
301  } else
302  intel_private.base.scratch_page_dma = page_to_phys(page);
303 
304  intel_private.scratch_page = page;
305 
306  return 0;
307 }
308 
309 static void i810_write_entry(dma_addr_t addr, unsigned int entry,
310  unsigned int flags)
311 {
313 
314  switch (flags) {
315  case AGP_DCACHE_MEMORY:
316  pte_flags |= I810_PTE_LOCAL;
317  break;
319  pte_flags |= I830_PTE_SYSTEM_CACHED;
320  break;
321  }
322 
323  writel(addr | pte_flags, intel_private.gtt + entry);
324 }
325 
326 static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
327  {32, 8192, 3},
328  {64, 16384, 4},
329  {128, 32768, 5},
330  {256, 65536, 6},
331  {512, 131072, 7},
332 };
333 
334 static unsigned int intel_gtt_stolen_size(void)
335 {
336  u16 gmch_ctrl;
337  u8 rdct;
338  int local = 0;
339  static const int ddt[4] = { 0, 16, 32, 64 };
340  unsigned int stolen_size = 0;
341 
342  if (INTEL_GTT_GEN == 1)
343  return 0; /* no stolen mem on i81x */
344 
345  pci_read_config_word(intel_private.bridge_dev,
346  I830_GMCH_CTRL, &gmch_ctrl);
347 
348  if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
349  intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
350  switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
352  stolen_size = KB(512);
353  break;
355  stolen_size = MB(1);
356  break;
358  stolen_size = MB(8);
359  break;
360  case I830_GMCH_GMS_LOCAL:
361  rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
362  stolen_size = (I830_RDRAM_ND(rdct) + 1) *
363  MB(ddt[I830_RDRAM_DDT(rdct)]);
364  local = 1;
365  break;
366  default:
367  stolen_size = 0;
368  break;
369  }
370  } else if (INTEL_GTT_GEN == 6) {
371  /*
372  * SandyBridge has new memory control reg at 0x50.w
373  */
374  u16 snb_gmch_ctl;
375  pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
376  switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
378  stolen_size = MB(32);
379  break;
381  stolen_size = MB(64);
382  break;
384  stolen_size = MB(96);
385  break;
387  stolen_size = MB(128);
388  break;
390  stolen_size = MB(160);
391  break;
393  stolen_size = MB(192);
394  break;
396  stolen_size = MB(224);
397  break;
399  stolen_size = MB(256);
400  break;
402  stolen_size = MB(288);
403  break;
405  stolen_size = MB(320);
406  break;
408  stolen_size = MB(352);
409  break;
411  stolen_size = MB(384);
412  break;
414  stolen_size = MB(416);
415  break;
417  stolen_size = MB(448);
418  break;
420  stolen_size = MB(480);
421  break;
423  stolen_size = MB(512);
424  break;
425  }
426  } else {
427  switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
429  stolen_size = MB(1);
430  break;
432  stolen_size = MB(4);
433  break;
435  stolen_size = MB(8);
436  break;
438  stolen_size = MB(16);
439  break;
441  stolen_size = MB(32);
442  break;
444  stolen_size = MB(48);
445  break;
447  stolen_size = MB(64);
448  break;
450  stolen_size = MB(128);
451  break;
453  stolen_size = MB(256);
454  break;
456  stolen_size = MB(96);
457  break;
459  stolen_size = MB(160);
460  break;
462  stolen_size = MB(224);
463  break;
465  stolen_size = MB(352);
466  break;
467  default:
468  stolen_size = 0;
469  break;
470  }
471  }
472 
473  if (stolen_size > 0) {
474  dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
475  stolen_size / KB(1), local ? "local" : "stolen");
476  } else {
477  dev_info(&intel_private.bridge_dev->dev,
478  "no pre-allocated video memory detected\n");
479  stolen_size = 0;
480  }
481 
482  return stolen_size;
483 }
484 
485 static void i965_adjust_pgetbl_size(unsigned int size_flag)
486 {
487  u32 pgetbl_ctl, pgetbl_ctl2;
488 
489  /* ensure that ppgtt is disabled */
490  pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
491  pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
492  writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
493 
494  /* write the new ggtt size */
495  pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
496  pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
497  pgetbl_ctl |= size_flag;
498  writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
499 }
500 
501 static unsigned int i965_gtt_total_entries(void)
502 {
503  int size;
504  u32 pgetbl_ctl;
505  u16 gmch_ctl;
506 
507  pci_read_config_word(intel_private.bridge_dev,
508  I830_GMCH_CTRL, &gmch_ctl);
509 
510  if (INTEL_GTT_GEN == 5) {
511  switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
512  case G4x_GMCH_SIZE_1M:
513  case G4x_GMCH_SIZE_VT_1M:
514  i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
515  break;
517  i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
518  break;
519  case G4x_GMCH_SIZE_2M:
520  case G4x_GMCH_SIZE_VT_2M:
521  i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
522  break;
523  }
524  }
525 
526  pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
527 
528  switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
530  size = KB(128);
531  break;
533  size = KB(256);
534  break;
536  size = KB(512);
537  break;
538  /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
540  size = KB(1024);
541  break;
543  size = KB(2048);
544  break;
546  size = KB(1024 + 512);
547  break;
548  default:
549  dev_info(&intel_private.pcidev->dev,
550  "unknown page table size, assuming 512KB\n");
551  size = KB(512);
552  }
553 
554  return size/4;
555 }
556 
557 static unsigned int intel_gtt_total_entries(void)
558 {
559  int size;
560 
561  if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
562  return i965_gtt_total_entries();
563  else if (INTEL_GTT_GEN == 6) {
564  u16 snb_gmch_ctl;
565 
566  pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
567  switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
568  default:
569  case SNB_GTT_SIZE_0M:
570  printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
571  size = MB(0);
572  break;
573  case SNB_GTT_SIZE_1M:
574  size = MB(1);
575  break;
576  case SNB_GTT_SIZE_2M:
577  size = MB(2);
578  break;
579  }
580  return size/4;
581  } else {
582  /* On previous hardware, the GTT size was just what was
583  * required to map the aperture.
584  */
585  return intel_private.base.gtt_mappable_entries;
586  }
587 }
588 
589 static unsigned int intel_gtt_mappable_entries(void)
590 {
591  unsigned int aperture_size;
592 
593  if (INTEL_GTT_GEN == 1) {
594  u32 smram_miscc;
595 
596  pci_read_config_dword(intel_private.bridge_dev,
597  I810_SMRAM_MISCC, &smram_miscc);
598 
599  if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
601  aperture_size = MB(32);
602  else
603  aperture_size = MB(64);
604  } else if (INTEL_GTT_GEN == 2) {
605  u16 gmch_ctrl;
606 
607  pci_read_config_word(intel_private.bridge_dev,
608  I830_GMCH_CTRL, &gmch_ctrl);
609 
610  if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
611  aperture_size = MB(64);
612  else
613  aperture_size = MB(128);
614  } else {
615  /* 9xx supports large sizes, just look at the length */
616  aperture_size = pci_resource_len(intel_private.pcidev, 2);
617  }
618 
619  return aperture_size >> PAGE_SHIFT;
620 }
621 
622 static void intel_gtt_teardown_scratch_page(void)
623 {
624  set_pages_wb(intel_private.scratch_page, 1);
625  pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
627  put_page(intel_private.scratch_page);
628  __free_page(intel_private.scratch_page);
629 }
630 
631 static void intel_gtt_cleanup(void)
632 {
633  intel_private.driver->cleanup();
634 
635  iounmap(intel_private.gtt);
636  iounmap(intel_private.registers);
637 
638  intel_gtt_teardown_scratch_page();
639 }
640 
641 static int intel_gtt_init(void)
642 {
643  u32 gma_addr;
644  u32 gtt_map_size;
645  int ret;
646 
647  ret = intel_private.driver->setup();
648  if (ret != 0)
649  return ret;
650 
651  intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
652  intel_private.base.gtt_total_entries = intel_gtt_total_entries();
653 
654  /* save the PGETBL reg for resume */
655  intel_private.PGETBL_save =
656  readl(intel_private.registers+I810_PGETBL_CTL)
658  /* we only ever restore the register when enabling the PGTBL... */
659  if (HAS_PGTBL_EN)
660  intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
661 
662  dev_info(&intel_private.bridge_dev->dev,
663  "detected gtt size: %dK total, %dK mappable\n",
664  intel_private.base.gtt_total_entries * 4,
665  intel_private.base.gtt_mappable_entries * 4);
666 
667  gtt_map_size = intel_private.base.gtt_total_entries * 4;
668 
669  intel_private.gtt = NULL;
670  if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
671  intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
672  gtt_map_size);
673  if (intel_private.gtt == NULL)
674  intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
675  gtt_map_size);
676  if (intel_private.gtt == NULL) {
677  intel_private.driver->cleanup();
678  iounmap(intel_private.registers);
679  return -ENOMEM;
680  }
681  intel_private.base.gtt = intel_private.gtt;
682 
683  global_cache_flush(); /* FIXME: ? */
684 
685  intel_private.base.stolen_size = intel_gtt_stolen_size();
686 
687  intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
688 
689  ret = intel_gtt_setup_scratch_page();
690  if (ret != 0) {
691  intel_gtt_cleanup();
692  return ret;
693  }
694 
695  if (INTEL_GTT_GEN <= 2)
696  pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
697  &gma_addr);
698  else
699  pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
700  &gma_addr);
701 
702  intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
703 
704  return 0;
705 }
706 
707 static int intel_fake_agp_fetch_size(void)
708 {
709  int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
710  unsigned int aper_size;
711  int i;
712 
713  aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
714  / MB(1);
715 
716  for (i = 0; i < num_sizes; i++) {
717  if (aper_size == intel_fake_agp_sizes[i].size) {
718  agp_bridge->current_size =
719  (void *) (intel_fake_agp_sizes + i);
720  return aper_size;
721  }
722  }
723 
724  return 0;
725 }
726 
727 static void i830_cleanup(void)
728 {
729 }
730 
731 /* The chipset_flush interface needs to get data that has already been
732  * flushed out of the CPU all the way out to main memory, because the GPU
733  * doesn't snoop those buffers.
734  *
735  * The 8xx series doesn't have the same lovely interface for flushing the
736  * chipset write buffers that the later chips do. According to the 865
737  * specs, it's 64 octwords, or 1KB. So, to get those previous things in
738  * that buffer out, we just fill 1KB and clflush it out, on the assumption
739  * that it'll push whatever was in there out. It appears to work.
740  */
741 static void i830_chipset_flush(void)
742 {
743  unsigned long timeout = jiffies + msecs_to_jiffies(1000);
744 
745  /* Forcibly evict everything from the CPU write buffers.
746  * clflush appears to be insufficient.
747  */
749 
750  /* Now we've only seen documents for this magic bit on 855GM,
751  * we hope it exists for the other gen2 chipsets...
752  *
753  * Also works as advertised on my 845G.
754  */
755  writel(readl(intel_private.registers+I830_HIC) | (1<<31),
756  intel_private.registers+I830_HIC);
757 
758  while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
759  if (time_after(jiffies, timeout))
760  break;
761 
762  udelay(50);
763  }
764 }
765 
766 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
767  unsigned int flags)
768 {
769  u32 pte_flags = I810_PTE_VALID;
770 
771  if (flags == AGP_USER_CACHED_MEMORY)
772  pte_flags |= I830_PTE_SYSTEM_CACHED;
773 
774  writel(addr | pte_flags, intel_private.gtt + entry);
775 }
776 
778 {
779  u8 __iomem *reg;
780 
781  if (INTEL_GTT_GEN >= 6)
782  return true;
783 
784  if (INTEL_GTT_GEN == 2) {
785  u16 gmch_ctrl;
786 
787  pci_read_config_word(intel_private.bridge_dev,
788  I830_GMCH_CTRL, &gmch_ctrl);
789  gmch_ctrl |= I830_GMCH_ENABLED;
790  pci_write_config_word(intel_private.bridge_dev,
791  I830_GMCH_CTRL, gmch_ctrl);
792 
793  pci_read_config_word(intel_private.bridge_dev,
794  I830_GMCH_CTRL, &gmch_ctrl);
795  if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
796  dev_err(&intel_private.pcidev->dev,
797  "failed to enable the GTT: GMCH_CTRL=%x\n",
798  gmch_ctrl);
799  return false;
800  }
801  }
802 
803  /* On the resume path we may be adjusting the PGTBL value, so
804  * be paranoid and flush all chipset write buffers...
805  */
806  if (INTEL_GTT_GEN >= 3)
807  writel(0, intel_private.registers+GFX_FLSH_CNTL);
808 
809  reg = intel_private.registers+I810_PGETBL_CTL;
810  writel(intel_private.PGETBL_save, reg);
811  if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
812  dev_err(&intel_private.pcidev->dev,
813  "failed to enable the GTT: PGETBL=%x [expected %x]\n",
814  readl(reg), intel_private.PGETBL_save);
815  return false;
816  }
817 
818  if (INTEL_GTT_GEN >= 3)
819  writel(0, intel_private.registers+GFX_FLSH_CNTL);
820 
821  return true;
822 }
824 
825 static int i830_setup(void)
826 {
827  u32 reg_addr;
828 
829  pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
830  reg_addr &= 0xfff80000;
831 
832  intel_private.registers = ioremap(reg_addr, KB(64));
833  if (!intel_private.registers)
834  return -ENOMEM;
835 
836  intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
837 
838  return 0;
839 }
840 
841 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
842 {
843  agp_bridge->gatt_table_real = NULL;
844  agp_bridge->gatt_table = NULL;
845  agp_bridge->gatt_bus_addr = 0;
846 
847  return 0;
848 }
849 
850 static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
851 {
852  return 0;
853 }
854 
855 static int intel_fake_agp_configure(void)
856 {
857  if (!intel_enable_gtt())
858  return -EIO;
859 
860  intel_private.clear_fake_agp = true;
861  agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr;
862 
863  return 0;
864 }
865 
866 static bool i830_check_flags(unsigned int flags)
867 {
868  switch (flags) {
869  case 0:
870  case AGP_PHYS_MEMORY:
872  case AGP_USER_MEMORY:
873  return true;
874  }
875 
876  return false;
877 }
878 
880  unsigned int pg_start,
881  unsigned int flags)
882 {
883  struct scatterlist *sg;
884  unsigned int len, m;
885  int i, j;
886 
887  j = pg_start;
888 
889  /* sg may merge pages, but we have to separate
890  * per-page addr for GTT */
891  for_each_sg(st->sgl, sg, st->nents, i) {
892  len = sg_dma_len(sg) >> PAGE_SHIFT;
893  for (m = 0; m < len; m++) {
894  dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
895  intel_private.driver->write_entry(addr, j, flags);
896  j++;
897  }
898  }
899  readl(intel_private.gtt+j-1);
900 }
902 
903 static void intel_gtt_insert_pages(unsigned int first_entry,
904  unsigned int num_entries,
905  struct page **pages,
906  unsigned int flags)
907 {
908  int i, j;
909 
910  for (i = 0, j = first_entry; i < num_entries; i++, j++) {
911  dma_addr_t addr = page_to_phys(pages[i]);
912  intel_private.driver->write_entry(addr,
913  j, flags);
914  }
915  readl(intel_private.gtt+j-1);
916 }
917 
918 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
919  off_t pg_start, int type)
920 {
921  int ret = -EINVAL;
922 
923  if (intel_private.base.do_idle_maps)
924  return -ENODEV;
925 
926  if (intel_private.clear_fake_agp) {
927  int start = intel_private.base.stolen_size / PAGE_SIZE;
928  int end = intel_private.base.gtt_mappable_entries;
929  intel_gtt_clear_range(start, end - start);
930  intel_private.clear_fake_agp = false;
931  }
932 
933  if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
934  return i810_insert_dcache_entries(mem, pg_start, type);
935 
936  if (mem->page_count == 0)
937  goto out;
938 
939  if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
940  goto out_err;
941 
942  if (type != mem->type)
943  goto out_err;
944 
945  if (!intel_private.driver->check_flags(type))
946  goto out_err;
947 
948  if (!mem->is_flushed)
950 
951  if (intel_private.base.needs_dmar) {
952  struct sg_table st;
953 
954  ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
955  if (ret != 0)
956  return ret;
957 
958  intel_gtt_insert_sg_entries(&st, pg_start, type);
959  mem->sg_list = st.sgl;
960  mem->num_sg = st.nents;
961  } else
962  intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
963  type);
964 
965 out:
966  ret = 0;
967 out_err:
968  mem->is_flushed = true;
969  return ret;
970 }
971 
972 void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
973 {
974  unsigned int i;
975 
976  for (i = first_entry; i < (first_entry + num_entries); i++) {
977  intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
978  i, 0);
979  }
980  readl(intel_private.gtt+i-1);
981 }
983 
984 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
985  off_t pg_start, int type)
986 {
987  if (mem->page_count == 0)
988  return 0;
989 
990  if (intel_private.base.do_idle_maps)
991  return -ENODEV;
992 
993  intel_gtt_clear_range(pg_start, mem->page_count);
994 
995  if (intel_private.base.needs_dmar) {
996  intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
997  mem->sg_list = NULL;
998  mem->num_sg = 0;
999  }
1000 
1001  return 0;
1002 }
1003 
1004 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1005  int type)
1006 {
1007  struct agp_memory *new;
1008 
1009  if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
1010  if (pg_count != intel_private.num_dcache_entries)
1011  return NULL;
1012 
1013  new = agp_create_memory(1);
1014  if (new == NULL)
1015  return NULL;
1016 
1017  new->type = AGP_DCACHE_MEMORY;
1018  new->page_count = pg_count;
1019  new->num_scratch_pages = 0;
1020  agp_free_page_array(new);
1021  return new;
1022  }
1023  if (type == AGP_PHYS_MEMORY)
1024  return alloc_agpphysmem_i8xx(pg_count, type);
1025  /* always return NULL for other allocation types for now */
1026  return NULL;
1027 }
1028 
1029 static int intel_alloc_chipset_flush_resource(void)
1030 {
1031  int ret;
1032  ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1034  pcibios_align_resource, intel_private.bridge_dev);
1035 
1036  return ret;
1037 }
1038 
1039 static void intel_i915_setup_chipset_flush(void)
1040 {
1041  int ret;
1042  u32 temp;
1043 
1044  pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1045  if (!(temp & 0x1)) {
1046  intel_alloc_chipset_flush_resource();
1047  intel_private.resource_valid = 1;
1048  pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1049  } else {
1050  temp &= ~1;
1051 
1052  intel_private.resource_valid = 1;
1053  intel_private.ifp_resource.start = temp;
1054  intel_private.ifp_resource.end = temp + PAGE_SIZE;
1055  ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1056  /* some BIOSes reserve this area in a pnp some don't */
1057  if (ret)
1058  intel_private.resource_valid = 0;
1059  }
1060 }
1061 
1062 static void intel_i965_g33_setup_chipset_flush(void)
1063 {
1064  u32 temp_hi, temp_lo;
1065  int ret;
1066 
1067  pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1068  pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1069 
1070  if (!(temp_lo & 0x1)) {
1071 
1072  intel_alloc_chipset_flush_resource();
1073 
1074  intel_private.resource_valid = 1;
1075  pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1076  upper_32_bits(intel_private.ifp_resource.start));
1077  pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1078  } else {
1079  u64 l64;
1080 
1081  temp_lo &= ~0x1;
1082  l64 = ((u64)temp_hi << 32) | temp_lo;
1083 
1084  intel_private.resource_valid = 1;
1085  intel_private.ifp_resource.start = l64;
1086  intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1087  ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1088  /* some BIOSes reserve this area in a pnp some don't */
1089  if (ret)
1090  intel_private.resource_valid = 0;
1091  }
1092 }
1093 
1094 static void intel_i9xx_setup_flush(void)
1095 {
1096  /* return if already configured */
1097  if (intel_private.ifp_resource.start)
1098  return;
1099 
1100  if (INTEL_GTT_GEN == 6)
1101  return;
1102 
1103  /* setup a resource for this object */
1104  intel_private.ifp_resource.name = "Intel Flush Page";
1105  intel_private.ifp_resource.flags = IORESOURCE_MEM;
1106 
1107  /* Setup chipset flush for 915 */
1108  if (IS_G33 || INTEL_GTT_GEN >= 4) {
1109  intel_i965_g33_setup_chipset_flush();
1110  } else {
1111  intel_i915_setup_chipset_flush();
1112  }
1113 
1114  if (intel_private.ifp_resource.start)
1115  intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1116  if (!intel_private.i9xx_flush_page)
1117  dev_err(&intel_private.pcidev->dev,
1118  "can't ioremap flush page - no chipset flushing\n");
1119 }
1120 
1121 static void i9xx_cleanup(void)
1122 {
1123  if (intel_private.i9xx_flush_page)
1124  iounmap(intel_private.i9xx_flush_page);
1125  if (intel_private.resource_valid)
1126  release_resource(&intel_private.ifp_resource);
1127  intel_private.ifp_resource.start = 0;
1128  intel_private.resource_valid = 0;
1129 }
1130 
1131 static void i9xx_chipset_flush(void)
1132 {
1133  if (intel_private.i9xx_flush_page)
1134  writel(1, intel_private.i9xx_flush_page);
1135 }
1136 
1137 static void i965_write_entry(dma_addr_t addr,
1138  unsigned int entry,
1139  unsigned int flags)
1140 {
1141  u32 pte_flags;
1142 
1143  pte_flags = I810_PTE_VALID;
1144  if (flags == AGP_USER_CACHED_MEMORY)
1145  pte_flags |= I830_PTE_SYSTEM_CACHED;
1146 
1147  /* Shift high bits down */
1148  addr |= (addr >> 28) & 0xf0;
1149  writel(addr | pte_flags, intel_private.gtt + entry);
1150 }
1151 
1152 static bool gen6_check_flags(unsigned int flags)
1153 {
1154  return true;
1155 }
1156 
1157 static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
1158  unsigned int flags)
1159 {
1160  unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1161  unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1162  u32 pte_flags;
1163 
1164  if (type_mask == AGP_USER_MEMORY)
1165  pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
1166  else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1167  pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1168  if (gfdt)
1169  pte_flags |= GEN6_PTE_GFDT;
1170  } else { /* set 'normal'/'cached' to LLC by default */
1171  pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1172  if (gfdt)
1173  pte_flags |= GEN6_PTE_GFDT;
1174  }
1175 
1176  /* gen6 has bit11-4 for physical addr bit39-32 */
1177  addr |= (addr >> 28) & 0xff0;
1178  writel(addr | pte_flags, intel_private.gtt + entry);
1179 }
1180 
1181 static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1182  unsigned int flags)
1183 {
1184  unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1185  unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1186  u32 pte_flags;
1187 
1188  if (type_mask == AGP_USER_MEMORY)
1189  pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1190  else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1191  pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1192  if (gfdt)
1193  pte_flags |= GEN6_PTE_GFDT;
1194  } else { /* set 'normal'/'cached' to LLC by default */
1195  pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1196  if (gfdt)
1197  pte_flags |= GEN6_PTE_GFDT;
1198  }
1199 
1200  /* gen6 has bit11-4 for physical addr bit39-32 */
1201  addr |= (addr >> 28) & 0xff0;
1202  writel(addr | pte_flags, intel_private.gtt + entry);
1203 }
1204 
1205 static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
1206  unsigned int flags)
1207 {
1208  unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1209  unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1210  u32 pte_flags;
1211 
1212  if (type_mask == AGP_USER_MEMORY)
1213  pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1214  else {
1215  pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1216  if (gfdt)
1217  pte_flags |= GEN6_PTE_GFDT;
1218  }
1219 
1220  /* gen6 has bit11-4 for physical addr bit39-32 */
1221  addr |= (addr >> 28) & 0xff0;
1222  writel(addr | pte_flags, intel_private.gtt + entry);
1223 
1224  writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
1225 }
1226 
1227 static void gen6_cleanup(void)
1228 {
1229 }
1230 
1231 /* Certain Gen5 chipsets require require idling the GPU before
1232  * unmapping anything from the GTT when VT-d is enabled.
1233  */
1234 static inline int needs_idle_maps(void)
1235 {
1236 #ifdef CONFIG_INTEL_IOMMU
1237  const unsigned short gpu_devid = intel_private.pcidev->device;
1238 
1239  /* Query intel_iommu to see if we need the workaround. Presumably that
1240  * was loaded first.
1241  */
1242  if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
1243  gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
1245  return 1;
1246 #endif
1247  return 0;
1248 }
1249 
1250 static int i9xx_setup(void)
1251 {
1252  u32 reg_addr;
1253  int size = KB(512);
1254 
1255  pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1256 
1257  reg_addr &= 0xfff80000;
1258 
1259  if (INTEL_GTT_GEN >= 7)
1260  size = MB(2);
1261 
1262  intel_private.registers = ioremap(reg_addr, size);
1263  if (!intel_private.registers)
1264  return -ENOMEM;
1265 
1266  if (INTEL_GTT_GEN == 3) {
1267  u32 gtt_addr;
1268 
1269  pci_read_config_dword(intel_private.pcidev,
1270  I915_PTEADDR, &gtt_addr);
1271  intel_private.gtt_bus_addr = gtt_addr;
1272  } else {
1273  u32 gtt_offset;
1274 
1275  switch (INTEL_GTT_GEN) {
1276  case 5:
1277  case 6:
1278  case 7:
1279  gtt_offset = MB(2);
1280  break;
1281  case 4:
1282  default:
1283  gtt_offset = KB(512);
1284  break;
1285  }
1286  intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1287  }
1288 
1289  if (needs_idle_maps())
1290  intel_private.base.do_idle_maps = 1;
1291 
1292  intel_i9xx_setup_flush();
1293 
1294  return 0;
1295 }
1296 
1297 static const struct agp_bridge_driver intel_fake_agp_driver = {
1298  .owner = THIS_MODULE,
1299  .size_type = FIXED_APER_SIZE,
1300  .aperture_sizes = intel_fake_agp_sizes,
1301  .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1302  .configure = intel_fake_agp_configure,
1303  .fetch_size = intel_fake_agp_fetch_size,
1304  .cleanup = intel_gtt_cleanup,
1305  .agp_enable = intel_fake_agp_enable,
1306  .cache_flush = global_cache_flush,
1307  .create_gatt_table = intel_fake_agp_create_gatt_table,
1308  .free_gatt_table = intel_fake_agp_free_gatt_table,
1309  .insert_memory = intel_fake_agp_insert_entries,
1310  .remove_memory = intel_fake_agp_remove_entries,
1311  .alloc_by_type = intel_fake_agp_alloc_by_type,
1312  .free_by_type = intel_i810_free_by_type,
1313  .agp_alloc_page = agp_generic_alloc_page,
1314  .agp_alloc_pages = agp_generic_alloc_pages,
1315  .agp_destroy_page = agp_generic_destroy_page,
1316  .agp_destroy_pages = agp_generic_destroy_pages,
1317 };
1318 
1319 static const struct intel_gtt_driver i81x_gtt_driver = {
1320  .gen = 1,
1321  .has_pgtbl_enable = 1,
1322  .dma_mask_size = 32,
1323  .setup = i810_setup,
1324  .cleanup = i810_cleanup,
1325  .check_flags = i830_check_flags,
1326  .write_entry = i810_write_entry,
1327 };
1328 static const struct intel_gtt_driver i8xx_gtt_driver = {
1329  .gen = 2,
1330  .has_pgtbl_enable = 1,
1331  .setup = i830_setup,
1332  .cleanup = i830_cleanup,
1333  .write_entry = i830_write_entry,
1334  .dma_mask_size = 32,
1335  .check_flags = i830_check_flags,
1336  .chipset_flush = i830_chipset_flush,
1337 };
1338 static const struct intel_gtt_driver i915_gtt_driver = {
1339  .gen = 3,
1340  .has_pgtbl_enable = 1,
1341  .setup = i9xx_setup,
1342  .cleanup = i9xx_cleanup,
1343  /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1344  .write_entry = i830_write_entry,
1345  .dma_mask_size = 32,
1346  .check_flags = i830_check_flags,
1347  .chipset_flush = i9xx_chipset_flush,
1348 };
1349 static const struct intel_gtt_driver g33_gtt_driver = {
1350  .gen = 3,
1351  .is_g33 = 1,
1352  .setup = i9xx_setup,
1353  .cleanup = i9xx_cleanup,
1354  .write_entry = i965_write_entry,
1355  .dma_mask_size = 36,
1356  .check_flags = i830_check_flags,
1357  .chipset_flush = i9xx_chipset_flush,
1358 };
1359 static const struct intel_gtt_driver pineview_gtt_driver = {
1360  .gen = 3,
1361  .is_pineview = 1, .is_g33 = 1,
1362  .setup = i9xx_setup,
1363  .cleanup = i9xx_cleanup,
1364  .write_entry = i965_write_entry,
1365  .dma_mask_size = 36,
1366  .check_flags = i830_check_flags,
1367  .chipset_flush = i9xx_chipset_flush,
1368 };
1369 static const struct intel_gtt_driver i965_gtt_driver = {
1370  .gen = 4,
1371  .has_pgtbl_enable = 1,
1372  .setup = i9xx_setup,
1373  .cleanup = i9xx_cleanup,
1374  .write_entry = i965_write_entry,
1375  .dma_mask_size = 36,
1376  .check_flags = i830_check_flags,
1377  .chipset_flush = i9xx_chipset_flush,
1378 };
1379 static const struct intel_gtt_driver g4x_gtt_driver = {
1380  .gen = 5,
1381  .setup = i9xx_setup,
1382  .cleanup = i9xx_cleanup,
1383  .write_entry = i965_write_entry,
1384  .dma_mask_size = 36,
1385  .check_flags = i830_check_flags,
1386  .chipset_flush = i9xx_chipset_flush,
1387 };
1388 static const struct intel_gtt_driver ironlake_gtt_driver = {
1389  .gen = 5,
1390  .is_ironlake = 1,
1391  .setup = i9xx_setup,
1392  .cleanup = i9xx_cleanup,
1393  .write_entry = i965_write_entry,
1394  .dma_mask_size = 36,
1395  .check_flags = i830_check_flags,
1396  .chipset_flush = i9xx_chipset_flush,
1397 };
1398 static const struct intel_gtt_driver sandybridge_gtt_driver = {
1399  .gen = 6,
1400  .setup = i9xx_setup,
1401  .cleanup = gen6_cleanup,
1402  .write_entry = gen6_write_entry,
1403  .dma_mask_size = 40,
1404  .check_flags = gen6_check_flags,
1405  .chipset_flush = i9xx_chipset_flush,
1406 };
1407 static const struct intel_gtt_driver haswell_gtt_driver = {
1408  .gen = 6,
1409  .setup = i9xx_setup,
1410  .cleanup = gen6_cleanup,
1411  .write_entry = haswell_write_entry,
1412  .dma_mask_size = 40,
1413  .check_flags = gen6_check_flags,
1414  .chipset_flush = i9xx_chipset_flush,
1415 };
1416 static const struct intel_gtt_driver valleyview_gtt_driver = {
1417  .gen = 7,
1418  .setup = i9xx_setup,
1419  .cleanup = gen6_cleanup,
1420  .write_entry = valleyview_write_entry,
1421  .dma_mask_size = 40,
1422  .check_flags = gen6_check_flags,
1423 };
1424 
1425 /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1426  * driver and gmch_driver must be non-null, and find_gmch will determine
1427  * which one should be used if a gmch_chip_id is present.
1428  */
1429 static const struct intel_gtt_driver_description {
1430  unsigned int gmch_chip_id;
1431  char *name;
1432  const struct intel_gtt_driver *gtt_driver;
1433 } intel_gtt_chipsets[] = {
1435  &i81x_gtt_driver},
1437  &i81x_gtt_driver},
1439  &i81x_gtt_driver},
1441  &i81x_gtt_driver},
1443  &i8xx_gtt_driver},
1445  &i8xx_gtt_driver},
1447  &i8xx_gtt_driver},
1448  { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1449  &i8xx_gtt_driver},
1451  &i8xx_gtt_driver},
1452  { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1453  &i915_gtt_driver },
1455  &i915_gtt_driver },
1456  { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1457  &i915_gtt_driver },
1459  &i915_gtt_driver },
1460  { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1461  &i915_gtt_driver },
1462  { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1463  &i915_gtt_driver },
1464  { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1465  &i965_gtt_driver },
1467  &i965_gtt_driver },
1469  &i965_gtt_driver },
1471  &i965_gtt_driver },
1472  { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1473  &i965_gtt_driver },
1474  { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1475  &i965_gtt_driver },
1476  { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1477  &g33_gtt_driver },
1478  { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1479  &g33_gtt_driver },
1480  { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1481  &g33_gtt_driver },
1482  { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1483  &pineview_gtt_driver },
1484  { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1485  &pineview_gtt_driver },
1486  { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1487  &g4x_gtt_driver },
1488  { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1489  &g4x_gtt_driver },
1490  { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1491  &g4x_gtt_driver },
1492  { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1493  &g4x_gtt_driver },
1494  { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1495  &g4x_gtt_driver },
1497  &g4x_gtt_driver },
1498  { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1499  &g4x_gtt_driver },
1501  "HD Graphics", &ironlake_gtt_driver },
1503  "HD Graphics", &ironlake_gtt_driver },
1505  "Sandybridge", &sandybridge_gtt_driver },
1507  "Sandybridge", &sandybridge_gtt_driver },
1509  "Sandybridge", &sandybridge_gtt_driver },
1511  "Sandybridge", &sandybridge_gtt_driver },
1513  "Sandybridge", &sandybridge_gtt_driver },
1515  "Sandybridge", &sandybridge_gtt_driver },
1517  "Sandybridge", &sandybridge_gtt_driver },
1519  "Ivybridge", &sandybridge_gtt_driver },
1521  "Ivybridge", &sandybridge_gtt_driver },
1523  "Ivybridge", &sandybridge_gtt_driver },
1525  "Ivybridge", &sandybridge_gtt_driver },
1527  "Ivybridge", &sandybridge_gtt_driver },
1529  "Ivybridge", &sandybridge_gtt_driver },
1531  "ValleyView", &valleyview_gtt_driver },
1533  "Haswell", &haswell_gtt_driver },
1535  "Haswell", &haswell_gtt_driver },
1537  "Haswell", &haswell_gtt_driver },
1539  "Haswell", &haswell_gtt_driver },
1541  "Haswell", &haswell_gtt_driver },
1543  "Haswell", &haswell_gtt_driver },
1545  "Haswell", &haswell_gtt_driver },
1547  "Haswell", &haswell_gtt_driver },
1549  "Haswell", &haswell_gtt_driver },
1551  "Haswell", &haswell_gtt_driver },
1553  "Haswell", &haswell_gtt_driver },
1555  "Haswell", &haswell_gtt_driver },
1557  "Haswell", &haswell_gtt_driver },
1559  "Haswell", &haswell_gtt_driver },
1561  "Haswell", &haswell_gtt_driver },
1563  "Haswell", &haswell_gtt_driver },
1565  "Haswell", &haswell_gtt_driver },
1567  "Haswell", &haswell_gtt_driver },
1569  "Haswell", &haswell_gtt_driver },
1571  "Haswell", &haswell_gtt_driver },
1573  "Haswell", &haswell_gtt_driver },
1575  "Haswell", &haswell_gtt_driver },
1577  "Haswell", &haswell_gtt_driver },
1579  "Haswell", &haswell_gtt_driver },
1581  "Haswell", &haswell_gtt_driver },
1583  "Haswell", &haswell_gtt_driver },
1585  "Haswell", &haswell_gtt_driver },
1587  "Haswell", &haswell_gtt_driver },
1589  "Haswell", &haswell_gtt_driver },
1591  "Haswell", &haswell_gtt_driver },
1593  "Haswell", &haswell_gtt_driver },
1595  "Haswell", &haswell_gtt_driver },
1597  "Haswell", &haswell_gtt_driver },
1599  "Haswell", &haswell_gtt_driver },
1601  "Haswell", &haswell_gtt_driver },
1603  "Haswell", &haswell_gtt_driver },
1604  { 0, NULL, NULL }
1605 };
1606 
1607 static int find_gmch(u16 device)
1608 {
1609  struct pci_dev *gmch_device;
1610 
1611  gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1612  if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1613  gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1614  device, gmch_device);
1615  }
1616 
1617  if (!gmch_device)
1618  return 0;
1619 
1620  intel_private.pcidev = gmch_device;
1621  return 1;
1622 }
1623 
1624 int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1625  struct agp_bridge_data *bridge)
1626 {
1627  int i, mask;
1628 
1629  /*
1630  * Can be called from the fake agp driver but also directly from
1631  * drm/i915.ko. Hence we need to check whether everything is set up
1632  * already.
1633  */
1634  if (intel_private.driver) {
1635  intel_private.refcount++;
1636  return 1;
1637  }
1638 
1639  for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1640  if (gpu_pdev) {
1641  if (gpu_pdev->device ==
1642  intel_gtt_chipsets[i].gmch_chip_id) {
1643  intel_private.pcidev = pci_dev_get(gpu_pdev);
1644  intel_private.driver =
1645  intel_gtt_chipsets[i].gtt_driver;
1646 
1647  break;
1648  }
1649  } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1650  intel_private.driver =
1651  intel_gtt_chipsets[i].gtt_driver;
1652  break;
1653  }
1654  }
1655 
1656  if (!intel_private.driver)
1657  return 0;
1658 
1659  intel_private.refcount++;
1660 
1661  if (bridge) {
1662  bridge->driver = &intel_fake_agp_driver;
1663  bridge->dev_private_data = &intel_private;
1664  bridge->dev = bridge_pdev;
1665  }
1666 
1667  intel_private.bridge_dev = pci_dev_get(bridge_pdev);
1668 
1669  dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1670 
1671  mask = intel_private.driver->dma_mask_size;
1672  if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1673  dev_err(&intel_private.pcidev->dev,
1674  "set gfx device dma mask %d-bit failed!\n", mask);
1675  else
1676  pci_set_consistent_dma_mask(intel_private.pcidev,
1677  DMA_BIT_MASK(mask));
1678 
1679  if (intel_gtt_init() != 0) {
1681 
1682  return 0;
1683  }
1684 
1685  return 1;
1686 }
1688 
1689 const struct intel_gtt *intel_gtt_get(void)
1690 {
1691  return &intel_private.base;
1692 }
1694 
1696 {
1697  if (intel_private.driver->chipset_flush)
1698  intel_private.driver->chipset_flush();
1699 }
1701 
1703 {
1704  if (--intel_private.refcount)
1705  return;
1706 
1707  if (intel_private.pcidev)
1708  pci_dev_put(intel_private.pcidev);
1709  if (intel_private.bridge_dev)
1710  pci_dev_put(intel_private.bridge_dev);
1711  intel_private.driver = NULL;
1712 }
1714 
1715 MODULE_AUTHOR("Dave Jones <[email protected]>");
1716 MODULE_LICENSE("GPL and additional rights");