18 #include <linux/module.h>
19 #include <linux/pci.h>
21 #include <linux/kernel.h>
36 #ifdef CONFIG_INTEL_IOMMU
37 #define USE_PCI_DMA_API 1
39 #define USE_PCI_DMA_API 0
62 static struct _intel_private {
72 int num_dcache_entries;
77 struct page *scratch_page;
81 #define INTEL_GTT_GEN intel_private.driver->gen
82 #define IS_G33 intel_private.driver->is_g33
83 #define IS_PINEVIEW intel_private.driver->is_pineview
84 #define IS_IRONLAKE intel_private.driver->is_ironlake
85 #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
87 static int intel_gtt_map_memory(
struct page **
pages,
94 DBG(
"try mapping %lu pages\n", (
unsigned long)num_entries);
102 if (!pci_map_sg(intel_private.pcidev,
116 DBG(
"try unmapping %lu pages\n", (
unsigned long)
mem->page_count);
118 pci_unmap_sg(intel_private.pcidev, sg_list,
122 st.orig_nents =
st.nents = num_sg;
133 static struct page *i8xx_alloc_pages(
void)
151 static void i8xx_destroy_pages(
struct page *page)
162 #define I810_GTT_ORDER 4
163 static int i810_setup(
void)
170 if (gtt_table ==
NULL)
172 intel_private.i81x_gtt_table = gtt_table;
174 pci_read_config_dword(intel_private.pcidev,
I810_MMADDR, ®_addr);
175 reg_addr &= 0xfff80000;
177 intel_private.registers =
ioremap(reg_addr,
KB(64));
178 if (!intel_private.registers)
188 dev_info(&intel_private.pcidev->dev,
189 "detected 4MB dedicated video ram\n");
190 intel_private.num_dcache_entries = 1024;
196 static void i810_cleanup(
void)
208 > intel_private.num_dcache_entries)
214 for (i = pg_start; i < (pg_start + mem->
page_count); i++) {
216 intel_private.driver->write_entry(addr,
219 readl(intel_private.gtt+i-1);
229 static struct agp_memory *alloc_agpphysmem_i8xx(
size_t pg_count,
int type)
239 page = i8xx_alloc_pages();
252 new->pages[0] =
page;
255 new->pages[1] =
new->pages[0] + 1;
256 new->pages[2] =
new->pages[1] + 1;
257 new->pages[3] =
new->pages[2] + 1;
259 new->page_count = pg_count;
260 new->num_scratch_pages = pg_count;
271 i8xx_destroy_pages(curr->
pages[0]);
283 static int intel_gtt_setup_scratch_page(
void)
294 if (intel_private.base.needs_dmar) {
295 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
297 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
300 intel_private.base.scratch_page_dma =
dma_addr;
302 intel_private.base.scratch_page_dma =
page_to_phys(page);
304 intel_private.scratch_page =
page;
323 writel(addr | pte_flags, intel_private.gtt + entry);
334 static unsigned int intel_gtt_stolen_size(
void)
339 static const int ddt[4] = { 0, 16, 32, 64 };
345 pci_read_config_word(intel_private.bridge_dev,
352 stolen_size =
KB(512);
375 pci_read_config_word(intel_private.pcidev,
SNB_GMCH_CTRL, &snb_gmch_ctl);
378 stolen_size =
MB(32);
381 stolen_size =
MB(64);
384 stolen_size =
MB(96);
387 stolen_size =
MB(128);
390 stolen_size =
MB(160);
393 stolen_size =
MB(192);
396 stolen_size =
MB(224);
399 stolen_size =
MB(256);
402 stolen_size =
MB(288);
405 stolen_size =
MB(320);
408 stolen_size =
MB(352);
411 stolen_size =
MB(384);
414 stolen_size =
MB(416);
417 stolen_size =
MB(448);
420 stolen_size =
MB(480);
423 stolen_size =
MB(512);
438 stolen_size =
MB(16);
441 stolen_size =
MB(32);
444 stolen_size =
MB(48);
447 stolen_size =
MB(64);
450 stolen_size =
MB(128);
453 stolen_size =
MB(256);
456 stolen_size =
MB(96);
459 stolen_size =
MB(160);
462 stolen_size =
MB(224);
465 stolen_size =
MB(352);
473 if (stolen_size > 0) {
474 dev_info(&intel_private.bridge_dev->dev,
"detected %dK %s memory\n",
475 stolen_size /
KB(1), local ?
"local" :
"stolen");
477 dev_info(&intel_private.bridge_dev->dev,
478 "no pre-allocated video memory detected\n");
485 static void i965_adjust_pgetbl_size(
unsigned int size_flag)
487 u32 pgetbl_ctl, pgetbl_ctl2;
497 pgetbl_ctl |= size_flag;
501 static unsigned int i965_gtt_total_entries(
void)
507 pci_read_config_word(intel_private.bridge_dev,
546 size =
KB(1024 + 512);
549 dev_info(&intel_private.pcidev->dev,
550 "unknown page table size, assuming 512KB\n");
557 static unsigned int intel_gtt_total_entries(
void)
562 return i965_gtt_total_entries();
566 pci_read_config_word(intel_private.pcidev,
SNB_GMCH_CTRL, &snb_gmch_ctl);
585 return intel_private.base.gtt_mappable_entries;
589 static unsigned int intel_gtt_mappable_entries(
void)
591 unsigned int aperture_size;
596 pci_read_config_dword(intel_private.bridge_dev,
601 aperture_size =
MB(32);
603 aperture_size =
MB(64);
607 pci_read_config_word(intel_private.bridge_dev,
611 aperture_size =
MB(64);
613 aperture_size =
MB(128);
622 static void intel_gtt_teardown_scratch_page(
void)
625 pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
627 put_page(intel_private.scratch_page);
631 static void intel_gtt_cleanup(
void)
633 intel_private.driver->cleanup();
636 iounmap(intel_private.registers);
638 intel_gtt_teardown_scratch_page();
641 static int intel_gtt_init(
void)
647 ret = intel_private.driver->setup();
651 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
652 intel_private.base.gtt_total_entries = intel_gtt_total_entries();
655 intel_private.PGETBL_save =
662 dev_info(&intel_private.bridge_dev->dev,
663 "detected gtt size: %dK total, %dK mappable\n",
664 intel_private.base.gtt_total_entries * 4,
665 intel_private.base.gtt_mappable_entries * 4);
667 gtt_map_size = intel_private.base.gtt_total_entries * 4;
669 intel_private.gtt =
NULL;
670 if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
671 intel_private.gtt =
ioremap_wc(intel_private.gtt_bus_addr,
673 if (intel_private.gtt ==
NULL)
674 intel_private.gtt =
ioremap(intel_private.gtt_bus_addr,
676 if (intel_private.gtt ==
NULL) {
677 intel_private.driver->cleanup();
678 iounmap(intel_private.registers);
681 intel_private.base.gtt = intel_private.gtt;
685 intel_private.base.stolen_size = intel_gtt_stolen_size();
689 ret = intel_gtt_setup_scratch_page();
695 if (INTEL_GTT_GEN <= 2)
696 pci_read_config_dword(intel_private.pcidev,
I810_GMADDR,
699 pci_read_config_dword(intel_private.pcidev,
I915_GMADDR,
707 static int intel_fake_agp_fetch_size(
void)
709 int num_sizes =
ARRAY_SIZE(intel_fake_agp_sizes);
710 unsigned int aper_size;
713 aper_size = (intel_private.base.gtt_mappable_entries <<
PAGE_SHIFT)
716 for (i = 0; i < num_sizes; i++) {
717 if (aper_size == intel_fake_agp_sizes[i].size) {
719 (
void *) (intel_fake_agp_sizes + i);
727 static void i830_cleanup(
void)
741 static void i830_chipset_flush(
void)
766 static void i830_write_entry(
dma_addr_t addr,
unsigned int entry,
774 writel(addr | pte_flags, intel_private.gtt + entry);
781 if (INTEL_GTT_GEN >= 6)
784 if (INTEL_GTT_GEN == 2) {
787 pci_read_config_word(intel_private.bridge_dev,
790 pci_write_config_word(intel_private.bridge_dev,
793 pci_read_config_word(intel_private.bridge_dev,
796 dev_err(&intel_private.pcidev->dev,
797 "failed to enable the GTT: GMCH_CTRL=%x\n",
806 if (INTEL_GTT_GEN >= 3)
810 writel(intel_private.PGETBL_save, reg);
812 dev_err(&intel_private.pcidev->dev,
813 "failed to enable the GTT: PGETBL=%x [expected %x]\n",
814 readl(reg), intel_private.PGETBL_save);
818 if (INTEL_GTT_GEN >= 3)
825 static int i830_setup(
void)
829 pci_read_config_dword(intel_private.pcidev,
I810_MMADDR, ®_addr);
830 reg_addr &= 0xfff80000;
832 intel_private.registers =
ioremap(reg_addr,
KB(64));
833 if (!intel_private.registers)
841 static int intel_fake_agp_create_gatt_table(
struct agp_bridge_data *bridge)
850 static int intel_fake_agp_free_gatt_table(
struct agp_bridge_data *bridge)
855 static int intel_fake_agp_configure(
void)
860 intel_private.clear_fake_agp =
true;
861 agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr;
866 static bool i830_check_flags(
unsigned int flags)
880 unsigned int pg_start,
893 for (m = 0; m < len; m++) {
895 intel_private.driver->write_entry(addr, j, flags);
899 readl(intel_private.gtt+j-1);
903 static void intel_gtt_insert_pages(
unsigned int first_entry,
910 for (i = 0, j = first_entry; i <
num_entries; i++, j++) {
912 intel_private.driver->write_entry(addr,
915 readl(intel_private.gtt+j-1);
918 static int intel_fake_agp_insert_entries(
struct agp_memory *mem,
919 off_t pg_start,
int type)
923 if (intel_private.base.do_idle_maps)
926 if (intel_private.clear_fake_agp) {
928 int end = intel_private.base.gtt_mappable_entries;
930 intel_private.clear_fake_agp =
false;
934 return i810_insert_dcache_entries(mem, pg_start, type);
939 if (pg_start + mem->
page_count > intel_private.base.gtt_total_entries)
942 if (type != mem->
type)
945 if (!intel_private.driver->check_flags(type))
951 if (intel_private.base.needs_dmar) {
976 for (i = first_entry; i < (first_entry +
num_entries); i++) {
977 intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
980 readl(intel_private.gtt+i-1);
984 static int intel_fake_agp_remove_entries(
struct agp_memory *mem,
985 off_t pg_start,
int type)
990 if (intel_private.base.do_idle_maps)
995 if (intel_private.base.needs_dmar) {
1004 static struct agp_memory *intel_fake_agp_alloc_by_type(
size_t pg_count,
1010 if (pg_count != intel_private.num_dcache_entries)
1018 new->page_count = pg_count;
1019 new->num_scratch_pages = 0;
1024 return alloc_agpphysmem_i8xx(pg_count, type);
1029 static int intel_alloc_chipset_flush_resource(
void)
1039 static void intel_i915_setup_chipset_flush(
void)
1044 pci_read_config_dword(intel_private.bridge_dev,
I915_IFPADDR, &temp);
1045 if (!(temp & 0x1)) {
1046 intel_alloc_chipset_flush_resource();
1047 intel_private.resource_valid = 1;
1048 pci_write_config_dword(intel_private.bridge_dev,
I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1052 intel_private.resource_valid = 1;
1053 intel_private.ifp_resource.start =
temp;
1054 intel_private.ifp_resource.end = temp +
PAGE_SIZE;
1058 intel_private.resource_valid = 0;
1062 static void intel_i965_g33_setup_chipset_flush(
void)
1064 u32 temp_hi, temp_lo;
1067 pci_read_config_dword(intel_private.bridge_dev,
I965_IFPADDR + 4, &temp_hi);
1068 pci_read_config_dword(intel_private.bridge_dev,
I965_IFPADDR, &temp_lo);
1070 if (!(temp_lo & 0x1)) {
1072 intel_alloc_chipset_flush_resource();
1074 intel_private.resource_valid = 1;
1075 pci_write_config_dword(intel_private.bridge_dev,
I965_IFPADDR + 4,
1077 pci_write_config_dword(intel_private.bridge_dev,
I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1082 l64 = ((
u64)temp_hi << 32) | temp_lo;
1084 intel_private.resource_valid = 1;
1085 intel_private.ifp_resource.start = l64;
1086 intel_private.ifp_resource.end = l64 +
PAGE_SIZE;
1090 intel_private.resource_valid = 0;
1094 static void intel_i9xx_setup_flush(
void)
1097 if (intel_private.ifp_resource.start)
1100 if (INTEL_GTT_GEN == 6)
1104 intel_private.ifp_resource.name =
"Intel Flush Page";
1108 if (
IS_G33 || INTEL_GTT_GEN >= 4) {
1109 intel_i965_g33_setup_chipset_flush();
1111 intel_i915_setup_chipset_flush();
1114 if (intel_private.ifp_resource.start)
1116 if (!intel_private.i9xx_flush_page)
1117 dev_err(&intel_private.pcidev->dev,
1118 "can't ioremap flush page - no chipset flushing\n");
1121 static void i9xx_cleanup(
void)
1123 if (intel_private.i9xx_flush_page)
1124 iounmap(intel_private.i9xx_flush_page);
1125 if (intel_private.resource_valid)
1127 intel_private.ifp_resource.start = 0;
1128 intel_private.resource_valid = 0;
1131 static void i9xx_chipset_flush(
void)
1133 if (intel_private.i9xx_flush_page)
1134 writel(1, intel_private.i9xx_flush_page);
1137 static void i965_write_entry(
dma_addr_t addr,
1148 addr |= (addr >> 28) & 0xf0;
1149 writel(addr | pte_flags, intel_private.gtt + entry);
1152 static bool gen6_check_flags(
unsigned int flags)
1157 static void haswell_write_entry(
dma_addr_t addr,
unsigned int entry,
1177 addr |= (addr >> 28) & 0xff0;
1178 writel(addr | pte_flags, intel_private.gtt + entry);
1181 static void gen6_write_entry(
dma_addr_t addr,
unsigned int entry,
1184 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1201 addr |= (addr >> 28) & 0xff0;
1202 writel(addr | pte_flags, intel_private.gtt + entry);
1205 static void valleyview_write_entry(
dma_addr_t addr,
unsigned int entry,
1208 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1221 addr |= (addr >> 28) & 0xff0;
1222 writel(addr | pte_flags, intel_private.gtt + entry);
1227 static void gen6_cleanup(
void)
1234 static inline int needs_idle_maps(
void)
1236 #ifdef CONFIG_INTEL_IOMMU
1237 const unsigned short gpu_devid = intel_private.pcidev->device;
1250 static int i9xx_setup(
void)
1255 pci_read_config_dword(intel_private.pcidev,
I915_MMADDR, ®_addr);
1257 reg_addr &= 0xfff80000;
1259 if (INTEL_GTT_GEN >= 7)
1262 intel_private.registers =
ioremap(reg_addr, size);
1263 if (!intel_private.registers)
1266 if (INTEL_GTT_GEN == 3) {
1269 pci_read_config_dword(intel_private.pcidev,
1271 intel_private.gtt_bus_addr = gtt_addr;
1275 switch (INTEL_GTT_GEN) {
1283 gtt_offset =
KB(512);
1286 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1289 if (needs_idle_maps())
1290 intel_private.base.do_idle_maps = 1;
1292 intel_i9xx_setup_flush();
1300 .aperture_sizes = intel_fake_agp_sizes,
1301 .num_aperture_sizes =
ARRAY_SIZE(intel_fake_agp_sizes),
1302 .configure = intel_fake_agp_configure,
1303 .fetch_size = intel_fake_agp_fetch_size,
1304 .cleanup = intel_gtt_cleanup,
1305 .agp_enable = intel_fake_agp_enable,
1307 .create_gatt_table = intel_fake_agp_create_gatt_table,
1308 .free_gatt_table = intel_fake_agp_free_gatt_table,
1309 .insert_memory = intel_fake_agp_insert_entries,
1310 .remove_memory = intel_fake_agp_remove_entries,
1311 .alloc_by_type = intel_fake_agp_alloc_by_type,
1312 .free_by_type = intel_i810_free_by_type,
1321 .has_pgtbl_enable = 1,
1322 .dma_mask_size = 32,
1323 .setup = i810_setup,
1324 .cleanup = i810_cleanup,
1325 .check_flags = i830_check_flags,
1326 .write_entry = i810_write_entry,
1330 .has_pgtbl_enable = 1,
1331 .setup = i830_setup,
1332 .cleanup = i830_cleanup,
1333 .write_entry = i830_write_entry,
1334 .dma_mask_size = 32,
1335 .check_flags = i830_check_flags,
1336 .chipset_flush = i830_chipset_flush,
1340 .has_pgtbl_enable = 1,
1341 .setup = i9xx_setup,
1342 .cleanup = i9xx_cleanup,
1344 .write_entry = i830_write_entry,
1345 .dma_mask_size = 32,
1346 .check_flags = i830_check_flags,
1347 .chipset_flush = i9xx_chipset_flush,
1352 .setup = i9xx_setup,
1353 .cleanup = i9xx_cleanup,
1354 .write_entry = i965_write_entry,
1355 .dma_mask_size = 36,
1356 .check_flags = i830_check_flags,
1357 .chipset_flush = i9xx_chipset_flush,
1361 .is_pineview = 1, .is_g33 = 1,
1362 .setup = i9xx_setup,
1363 .cleanup = i9xx_cleanup,
1364 .write_entry = i965_write_entry,
1365 .dma_mask_size = 36,
1366 .check_flags = i830_check_flags,
1367 .chipset_flush = i9xx_chipset_flush,
1371 .has_pgtbl_enable = 1,
1372 .setup = i9xx_setup,
1373 .cleanup = i9xx_cleanup,
1374 .write_entry = i965_write_entry,
1375 .dma_mask_size = 36,
1376 .check_flags = i830_check_flags,
1377 .chipset_flush = i9xx_chipset_flush,
1381 .setup = i9xx_setup,
1382 .cleanup = i9xx_cleanup,
1383 .write_entry = i965_write_entry,
1384 .dma_mask_size = 36,
1385 .check_flags = i830_check_flags,
1386 .chipset_flush = i9xx_chipset_flush,
1391 .setup = i9xx_setup,
1392 .cleanup = i9xx_cleanup,
1393 .write_entry = i965_write_entry,
1394 .dma_mask_size = 36,
1395 .check_flags = i830_check_flags,
1396 .chipset_flush = i9xx_chipset_flush,
1400 .setup = i9xx_setup,
1401 .cleanup = gen6_cleanup,
1402 .write_entry = gen6_write_entry,
1403 .dma_mask_size = 40,
1404 .check_flags = gen6_check_flags,
1405 .chipset_flush = i9xx_chipset_flush,
1409 .setup = i9xx_setup,
1410 .cleanup = gen6_cleanup,
1411 .write_entry = haswell_write_entry,
1412 .dma_mask_size = 40,
1413 .check_flags = gen6_check_flags,
1414 .chipset_flush = i9xx_chipset_flush,
1418 .setup = i9xx_setup,
1419 .cleanup = gen6_cleanup,
1420 .write_entry = valleyview_write_entry,
1421 .dma_mask_size = 40,
1422 .check_flags = gen6_check_flags,
1429 static const struct intel_gtt_driver_description {
1430 unsigned int gmch_chip_id;
1433 } intel_gtt_chipsets[] = {
1483 &pineview_gtt_driver },
1485 &pineview_gtt_driver },
1501 "HD Graphics", &ironlake_gtt_driver },
1503 "HD Graphics", &ironlake_gtt_driver },
1505 "Sandybridge", &sandybridge_gtt_driver },
1507 "Sandybridge", &sandybridge_gtt_driver },
1509 "Sandybridge", &sandybridge_gtt_driver },
1511 "Sandybridge", &sandybridge_gtt_driver },
1513 "Sandybridge", &sandybridge_gtt_driver },
1515 "Sandybridge", &sandybridge_gtt_driver },
1517 "Sandybridge", &sandybridge_gtt_driver },
1519 "Ivybridge", &sandybridge_gtt_driver },
1521 "Ivybridge", &sandybridge_gtt_driver },
1523 "Ivybridge", &sandybridge_gtt_driver },
1525 "Ivybridge", &sandybridge_gtt_driver },
1527 "Ivybridge", &sandybridge_gtt_driver },
1529 "Ivybridge", &sandybridge_gtt_driver },
1531 "ValleyView", &valleyview_gtt_driver },
1533 "Haswell", &haswell_gtt_driver },
1535 "Haswell", &haswell_gtt_driver },
1537 "Haswell", &haswell_gtt_driver },
1539 "Haswell", &haswell_gtt_driver },
1541 "Haswell", &haswell_gtt_driver },
1543 "Haswell", &haswell_gtt_driver },
1545 "Haswell", &haswell_gtt_driver },
1547 "Haswell", &haswell_gtt_driver },
1549 "Haswell", &haswell_gtt_driver },
1551 "Haswell", &haswell_gtt_driver },
1553 "Haswell", &haswell_gtt_driver },
1555 "Haswell", &haswell_gtt_driver },
1557 "Haswell", &haswell_gtt_driver },
1559 "Haswell", &haswell_gtt_driver },
1561 "Haswell", &haswell_gtt_driver },
1563 "Haswell", &haswell_gtt_driver },
1565 "Haswell", &haswell_gtt_driver },
1567 "Haswell", &haswell_gtt_driver },
1569 "Haswell", &haswell_gtt_driver },
1571 "Haswell", &haswell_gtt_driver },
1573 "Haswell", &haswell_gtt_driver },
1575 "Haswell", &haswell_gtt_driver },
1577 "Haswell", &haswell_gtt_driver },
1579 "Haswell", &haswell_gtt_driver },
1581 "Haswell", &haswell_gtt_driver },
1583 "Haswell", &haswell_gtt_driver },
1585 "Haswell", &haswell_gtt_driver },
1587 "Haswell", &haswell_gtt_driver },
1589 "Haswell", &haswell_gtt_driver },
1591 "Haswell", &haswell_gtt_driver },
1593 "Haswell", &haswell_gtt_driver },
1595 "Haswell", &haswell_gtt_driver },
1597 "Haswell", &haswell_gtt_driver },
1599 "Haswell", &haswell_gtt_driver },
1601 "Haswell", &haswell_gtt_driver },
1603 "Haswell", &haswell_gtt_driver },
1614 device, gmch_device);
1620 intel_private.pcidev = gmch_device;
1634 if (intel_private.driver) {
1635 intel_private.refcount++;
1639 for (i = 0; intel_gtt_chipsets[
i].name !=
NULL; i++) {
1642 intel_gtt_chipsets[i].gmch_chip_id) {
1644 intel_private.driver =
1645 intel_gtt_chipsets[
i].gtt_driver;
1649 }
else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1650 intel_private.driver =
1651 intel_gtt_chipsets[
i].gtt_driver;
1656 if (!intel_private.driver)
1659 intel_private.refcount++;
1662 bridge->
driver = &intel_fake_agp_driver;
1664 bridge->
dev = bridge_pdev;
1667 intel_private.bridge_dev =
pci_dev_get(bridge_pdev);
1669 dev_info(&bridge_pdev->
dev,
"Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1671 mask = intel_private.driver->dma_mask_size;
1672 if (pci_set_dma_mask(intel_private.pcidev,
DMA_BIT_MASK(mask)))
1673 dev_err(&intel_private.pcidev->dev,
1674 "set gfx device dma mask %d-bit failed!\n", mask);
1676 pci_set_consistent_dma_mask(intel_private.pcidev,
1679 if (intel_gtt_init() != 0) {
1691 return &intel_private.base;
1697 if (intel_private.driver->chipset_flush)
1698 intel_private.driver->chipset_flush();
1704 if (--intel_private.refcount)
1707 if (intel_private.pcidev)
1709 if (intel_private.bridge_dev)
1711 intel_private.driver =
NULL;