29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/slab.h>
34 #include <drm/i915_drm.h>
43 if ((dev_priv->
irq_mask & mask) != 0) {
53 if ((dev_priv->
irq_mask & mask) != mask) {
63 if ((dev_priv->
pipestat[pipe] & mask) != mask) {
76 if ((dev_priv->
pipestat[pipe] & mask) != 0) {
91 unsigned long irqflags;
100 ironlake_enable_display_irq(dev_priv,
DE_GSE);
109 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
131 static u32 i915_get_vblank_counter(
struct drm_device *dev,
int pipe)
134 unsigned long high_frame;
135 unsigned long low_frame;
138 if (!i915_pipe_enabled(dev, pipe)) {
139 DRM_DEBUG_DRIVER(
"trying to get vblank count for disabled "
156 }
while (high1 != high2);
160 return (high1 << 8) |
low;
163 static u32 gm45_get_vblank_counter(
struct drm_device *dev,
int pipe)
168 if (!i915_pipe_enabled(dev, pipe)) {
169 DRM_DEBUG_DRIVER(
"trying to get vblank count for disabled "
177 static int i915_get_crtc_scanoutpos(
struct drm_device *dev,
int pipe,
181 u32 vbl = 0, position = 0;
182 int vbl_start, vbl_end, htotal, vtotal;
186 if (!i915_pipe_enabled(dev, pipe)) {
187 DRM_DEBUG_DRIVER(
"trying to get scanoutpos for disabled "
204 *vpos = position & 0x1fff;
214 *vpos = position / htotal;
215 *hpos = position - (*vpos * htotal);
222 vbl_start = vbl & 0x1fff;
223 vbl_end = (vbl >> 16) & 0x1fff;
225 if ((*vpos < vbl_start) || (*vpos > vbl_end))
229 if (in_vbl && (*vpos >= vbl_start))
230 *vpos = *vpos - vtotal;
234 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
238 ret |= DRM_SCANOUTPOS_INVBL;
243 static int i915_get_vblank_timestamp(
struct drm_device *dev,
int pipe,
252 DRM_ERROR(
"Invalid crtc %d\n", pipe);
257 crtc = intel_get_crtc_for_pipe(dev, pipe);
259 DRM_ERROR(
"Invalid crtc %d\n", pipe);
264 DRM_DEBUG_KMS(
"crtc %d is disabled\n", pipe);
286 DRM_DEBUG_KMS(
"running encoder hotplug functions\n");
304 u32 busy_up, busy_down, max_avg, min_avg;
312 new_delay = dev_priv->
ips.cur_delay;
321 if (busy_up > max_avg) {
322 if (dev_priv->
ips.cur_delay != dev_priv->
ips.max_delay)
323 new_delay = dev_priv->
ips.cur_delay - 1;
324 if (new_delay < dev_priv->ips.max_delay)
325 new_delay = dev_priv->
ips.max_delay;
326 }
else if (busy_down < min_avg) {
327 if (dev_priv->
ips.cur_delay != dev_priv->
ips.min_delay)
328 new_delay = dev_priv->
ips.cur_delay + 1;
329 if (new_delay > dev_priv->
ips.min_delay)
330 new_delay = dev_priv->
ips.min_delay;
334 dev_priv->
ips.cur_delay = new_delay;
336 spin_unlock_irqrestore(&mchdev_lock, flags);
341 static void notify_ring(
struct drm_device *dev,
349 trace_i915_gem_request_complete(ring, ring->
get_seqno(ring,
false));
352 if (i915_enable_hangcheck) {
360 static void gen6_pm_rps_work(
struct work_struct *work)
367 spin_lock_irq(&dev_priv->
rps.lock);
368 pm_iir = dev_priv->
rps.pm_iir;
369 dev_priv->
rps.pm_iir = 0;
372 spin_unlock_irq(&dev_priv->
rps.lock);
380 new_delay = dev_priv->
rps.cur_delay + 1;
382 new_delay = dev_priv->
rps.cur_delay - 1;
387 if (!(new_delay > dev_priv->
rps.max_delay ||
388 new_delay < dev_priv->
rps.min_delay)) {
405 static void ivybridge_parity_work(
struct work_struct *work)
409 u32 error_status, row, bank, subbank;
410 char *parity_event[5];
438 spin_unlock_irqrestore(&dev_priv->
irq_lock, flags);
442 parity_event[0] =
"L3_PARITY_ERROR=1";
446 parity_event[4] =
NULL;
451 DRM_DEBUG(
"Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
454 kfree(parity_event[3]);
455 kfree(parity_event[2]);
456 kfree(parity_event[1]);
459 static void ivybridge_handle_parity_error(
struct drm_device *dev)
470 spin_unlock_irqrestore(&dev_priv->
irq_lock, flags);
475 static void snb_gt_irq_handler(
struct drm_device *dev,
482 notify_ring(dev, &dev_priv->
ring[RCS]);
484 notify_ring(dev, &dev_priv->
ring[VCS]);
486 notify_ring(dev, &dev_priv->
ring[BCS]);
491 DRM_ERROR(
"GT error interrupt 0x%08x\n", gt_iir);
496 ivybridge_handle_parity_error(dev);
518 spin_unlock_irqrestore(&dev_priv->
rps.lock, flags);
529 unsigned long irqflags;
541 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
546 snb_gt_irq_handler(dev, dev_priv, gt_iir);
556 if (pipe_stats[pipe] & 0x8000ffff) {
558 DRM_DEBUG_DRIVER(
"pipe %c underrun\n",
563 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
579 DRM_DEBUG_DRIVER(
"hotplug event received, stat 0x%08x\n",
592 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
593 gen6_queue_rps_work(dev_priv, pm_iir);
604 static void ibx_irq_handler(
struct drm_device *dev,
u32 pch_iir)
610 DRM_DEBUG_DRIVER(
"PCH audio power change on port %d\n",
611 (pch_iir & SDE_AUDIO_POWER_MASK) >>
615 DRM_DEBUG_DRIVER(
"PCH GMBUS interrupt\n");
618 DRM_DEBUG_DRIVER(
"PCH HDCP audio interrupt\n");
621 DRM_DEBUG_DRIVER(
"PCH transcoder audio interrupt\n");
624 DRM_ERROR(
"PCH poison interrupt\n");
628 DRM_DEBUG_DRIVER(" pipe %
c FDI
IIR: 0
x%08
x\
n",
639 DRM_DEBUG_DRIVER("PCH transcoder
B underrun
interrupt\n");
641 DRM_DEBUG_DRIVER("PCH transcoder
A underrun
interrupt\n");
650 DRM_DEBUG_DRIVER(
"PCH audio power change on port %d\n",
651 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
655 DRM_DEBUG_DRIVER(
"AUX channel interrupt\n");
658 DRM_DEBUG_DRIVER(
"PCH GMBUS interrupt\n");
661 DRM_DEBUG_DRIVER(
"Audio CP request interrupt\n");
664 DRM_DEBUG_DRIVER(
"Audio CP change interrupt\n");
668 DRM_DEBUG_DRIVER(" pipe %
c FDI
IIR: 0
x%08
x\n",
677 u32 de_iir, gt_iir, de_ier, pm_iir;
689 snb_gt_irq_handler(dev, dev_priv, gt_iir);
697 intel_opregion_gse_intr(dev);
699 for (
i = 0;
i < 3;
i++) {
714 cpt_irq_handler(dev, pch_iir);
726 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
727 gen6_queue_rps_work(dev_priv, pm_iir);
738 static void ilk_gt_irq_handler(
struct drm_device *dev,
743 notify_ring(dev, &dev_priv->
ring[RCS]);
745 notify_ring(dev, &dev_priv->
ring[VCS]);
748 static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
753 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
768 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
769 (!
IS_GEN6(dev) || pm_iir == 0))
780 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
782 snb_gt_irq_handler(dev, dev_priv, gt_iir);
785 intel_opregion_gse_intr(dev);
805 if (pch_iir & hotplug_mask)
808 cpt_irq_handler(dev, pch_iir);
810 ibx_irq_handler(dev, pch_iir);
814 ironlake_handle_rps_change(dev);
816 if (
IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
817 gen6_queue_rps_work(dev_priv, pm_iir);
839 static void i915_error_work_func(
struct work_struct *work)
844 char *error_event[] = {
"ERROR=1",
NULL };
845 char *reset_event[] = {
"RESET=1",
NULL };
846 char *reset_done_event[] = {
"ERROR=0",
NULL };
851 DRM_DEBUG_DRIVER(
"resetting chip\n");
862 static void i915_get_extra_instdone(
struct drm_device *dev,
890 #ifdef CONFIG_DEBUG_FS
891 static struct drm_i915_error_object *
895 struct drm_i915_error_object *
dst;
909 for (i = 0; i <
count; i++) {
918 if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
927 s = io_mapping_map_atomic_wc(dev_priv->
mm.gtt_mapping,
930 io_mapping_unmap_atomic(s);
935 page = i915_gem_object_get_page(src, i);
951 dst->page_count =
count;
958 kfree(dst->pages[i]);
964 i915_error_object_free(
struct drm_i915_error_object *obj)
971 for (page = 0; page < obj->page_count; page++)
972 kfree(obj->pages[page]);
985 i915_error_object_free(error->
ring[i].batchbuffer);
986 i915_error_object_free(error->
ring[i].ringbuffer);
994 static void capture_bo(
struct drm_i915_error_buffer *
err,
997 err->size = obj->
base.size;
998 err->name = obj->
base.name;
1002 err->read_domains = obj->
base.read_domains;
1003 err->write_domain = obj->
base.write_domain;
1011 err->dirty = obj->
dirty;
1013 err->ring = obj->
ring ? obj->
ring->id : -1;
1017 static u32 capture_active_bo(
struct drm_i915_error_buffer *err,
1024 capture_bo(err++, obj);
1032 static u32 capture_pinned_bo(
struct drm_i915_error_buffer *err,
1042 capture_bo(err++, obj);
1050 static void i915_gem_record_fences(
struct drm_device *dev,
1060 for (i = 0; i < 16; i++)
1065 for (i = 0; i < 16; i++)
1070 for (i = 0; i < 8; i++)
1073 for (i = 0; i < 8; i++)
1080 static struct drm_i915_error_object *
1092 if (obj->
ring != ring)
1104 return i915_error_object_create(dev_priv, obj);
1110 static void i915_record_ring_state(
struct drm_device *dev,
1131 if (ring->
id == RCS)
1151 static void i915_gem_record_rings(
struct drm_device *dev,
1160 i915_record_ring_state(dev, error, ring);
1162 error->
ring[
i].batchbuffer =
1163 i915_error_first_batchbuffer(dev_priv, ring);
1165 error->
ring[
i].ringbuffer =
1166 i915_error_object_create(dev_priv, ring->
obj);
1172 error->ring[i].num_requests = count;
1173 error->ring[i].requests =
1176 if (error->ring[i].requests ==
NULL) {
1177 error->
ring[
i].num_requests = 0;
1183 struct drm_i915_error_request *erq;
1185 erq = &error->
ring[
i].requests[count++];
1186 erq->seqno = request->
seqno;
1188 erq->tail = request->
tail;
1207 unsigned long flags;
1212 spin_unlock_irqrestore(&dev_priv->
error_lock, flags);
1219 DRM_DEBUG_DRIVER(
"out of memory, not capturing error state\n");
1223 DRM_INFO(
"capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1224 dev->primary->index);
1226 kref_init(&error->
ref);
1253 i915_gem_record_fences(dev, error);
1254 i915_gem_record_rings(dev, error);
1283 &dev_priv->
mm.active_list);
1289 &dev_priv->
mm.bound_list);
1293 error->
overlay = intel_overlay_capture_error_state(dev);
1294 error->
display = intel_display_capture_error_state(dev);
1301 spin_unlock_irqrestore(&dev_priv->
error_lock, flags);
1311 unsigned long flags;
1316 spin_unlock_irqrestore(&dev_priv->
error_lock, flags);
1322 #define i915_capture_error_state(x)
1325 static void i915_report_and_clear_eir(
struct drm_device *dev)
1335 pr_err(
"render error detected, EIR: 0x%08x\n", eir);
1337 i915_get_extra_instdone(dev, instdone);
1346 pr_err(
" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1354 pr_err(
"page table error\n");
1355 pr_err(
" PGTBL_ER: 0x%08x\n", pgtbl_err);
1364 pr_err(
"page table error\n");
1365 pr_err(
" PGTBL_ER: 0x%08x\n", pgtbl_err);
1372 pr_err(
"memory refresh error:\n");
1374 pr_err(
"pipe %c stat: 0x%08x\n",
1379 pr_err(
"instruction error\n");
1382 pr_err(
" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1411 DRM_ERROR(
"EIR stuck: 0x%08x, masking\n", eir);
1434 i915_report_and_clear_eir(dev);
1450 static void i915_pageflip_stall_check(
struct drm_device *dev,
int pipe)
1457 unsigned long flags;
1458 bool stall_detected;
1461 if (intel_crtc == NULL)
1469 spin_unlock_irqrestore(&dev->event_lock, flags);
1482 crtc->
y * crtc->
fb->pitches[0] +
1483 crtc->
x * crtc->
fb->bits_per_pixel/8);
1486 spin_unlock_irqrestore(&dev->event_lock, flags);
1488 if (stall_detected) {
1489 DRM_DEBUG_DRIVER(
"Pageflip stall detected\n");
1497 static int i915_enable_vblank(
struct drm_device *dev,
int pipe)
1500 unsigned long irqflags;
1502 if (!i915_pipe_enabled(dev, pipe))
1514 if (dev_priv->
info->gen == 3)
1516 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
1521 static int ironlake_enable_vblank(
struct drm_device *dev,
int pipe)
1524 unsigned long irqflags;
1526 if (!i915_pipe_enabled(dev, pipe))
1530 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1531 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1532 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
1537 static int ivybridge_enable_vblank(
struct drm_device *dev,
int pipe)
1540 unsigned long irqflags;
1542 if (!i915_pipe_enabled(dev, pipe))
1546 ironlake_enable_display_irq(dev_priv,
1548 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
1553 static int valleyview_enable_vblank(
struct drm_device *dev,
int pipe)
1556 unsigned long irqflags;
1559 if (!i915_pipe_enabled(dev, pipe))
1571 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
1579 static void i915_disable_vblank(
struct drm_device *dev,
int pipe)
1582 unsigned long irqflags;
1585 if (dev_priv->
info->gen == 3)
1591 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
1594 static void ironlake_disable_vblank(
struct drm_device *dev,
int pipe)
1597 unsigned long irqflags;
1600 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1601 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1602 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
1605 static void ivybridge_disable_vblank(
struct drm_device *dev,
int pipe)
1608 unsigned long irqflags;
1611 ironlake_disable_display_irq(dev_priv,
1613 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
1616 static void valleyview_disable_vblank(
struct drm_device *dev,
int pipe)
1619 unsigned long irqflags;
1631 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
1644 i915_seqno_passed(ring->
get_seqno(ring,
false),
1645 ring_last_seqno(ring))) {
1647 if (waitqueue_active(&ring->
irq_queue)) {
1648 DRM_ERROR(
"Hangcheck timer elapsed... %s idle\n",
1664 DRM_ERROR(
"Kicking stuck wait on %s\n",
1672 static bool i915_hangcheck_hung(
struct drm_device *dev)
1679 DRM_ERROR(
"Hangcheck timer elapsed... GPU hung\n");
1692 hung &= !kick_ring(ring);
1713 bool err =
false,
idle;
1716 if (!i915_enable_hangcheck)
1719 memset(acthd, 0,
sizeof(acthd));
1722 idle &= i915_hangcheck_ring_idle(ring, &err);
1729 if (i915_hangcheck_hung(dev))
1739 i915_get_extra_instdone(dev, instdone);
1742 if (i915_hangcheck_hung(dev))
1759 static void ironlake_irq_preinstall(
struct drm_device *dev)
1784 static void valleyview_irq_preinstall(
struct drm_device *dev)
1836 static int ironlake_irq_postinstall(
struct drm_device *dev)
1841 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1845 dev_priv->
irq_mask = ~display_mask;
1861 GEN6_BSD_USER_INTERRUPT |
1891 ironlake_enable_pch_hotplug(dev);
1897 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1903 static int ivybridge_irq_postinstall(
struct drm_device *dev)
1915 dev_priv->
irq_mask = ~display_mask;
1948 ironlake_enable_pch_hotplug(dev);
1953 static int valleyview_irq_postinstall(
struct drm_device *dev)
1971 dev_priv->
irq_mask = (~enable_mask) |
1979 pci_write_config_dword(dev_priv->
dev->pdev, 0x94, 0xfee00000);
1980 pci_read_config_word(dev->pdev, 0x98, &msid);
1983 pci_write_config_word(dev_priv->
dev->pdev, 0x98, msid);
2045 static void valleyview_irq_uninstall(
struct drm_device *dev)
2089 static void i8xx_irq_preinstall(
struct drm_device * dev)
2138 unsigned long irqflags;
2151 while (iir & ~flip_mask) {
2168 if (pipe_stats[pipe] & 0x8000ffff) {
2170 DRM_DEBUG_DRIVER(
"pipe %c underrun\n",
2176 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
2184 notify_ring(dev, &dev_priv->
ring[RCS]);
2186 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2191 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2195 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2200 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2210 static void i8xx_irq_uninstall(
struct drm_device * dev)
2225 static void i915_irq_preinstall(
struct drm_device * dev)
2268 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2275 dev_priv->
irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2305 intel_opregion_enable_asle(dev);
2315 unsigned long irqflags;
2329 bool irq_received = (iir & ~flip_mask) != 0;
2330 bool blc_event =
false;
2338 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2346 if (pipe_stats[pipe] & 0x8000ffff) {
2348 DRM_DEBUG_DRIVER(
"pipe %c underrun\n",
2351 irq_received =
true;
2354 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
2361 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2364 DRM_DEBUG_DRIVER(
"hotplug event received, stat 0x%08x\n",
2370 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2377 if (iir & I915_USER_INTERRUPT)
2378 notify_ring(dev, &dev_priv->
ring[RCS]);
2384 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2386 if (iir &
flip[plane]) {
2398 intel_opregion_asle_intr(dev);
2417 }
while (iir & ~flip_mask);
2424 static void i915_irq_uninstall(
struct drm_device * dev)
2446 static void i965_irq_preinstall(
struct drm_device * dev)
2473 I915_DISPLAY_PORT_INTERRUPT |
2478 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2543 intel_opregion_enable_asle(dev);
2554 unsigned long irqflags;
2563 bool blc_event =
false;
2565 irq_received = iir != 0;
2573 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2583 if (pipe_stats[pipe] & 0x8000ffff) {
2585 DRM_DEBUG_DRIVER(
"pipe %c underrun\n",
2591 spin_unlock_irqrestore(&dev_priv->
irq_lock, irqflags);
2599 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2602 DRM_DEBUG_DRIVER(
"hotplug event received, stat 0x%08x\n",
2608 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2615 if (iir & I915_USER_INTERRUPT)
2616 notify_ring(dev, &dev_priv->
ring[RCS]);
2618 notify_ring(dev, &dev_priv->
ring[VCS]);
2629 i915_pageflip_stall_check(dev, pipe);
2639 intel_opregion_asle_intr(dev);
2664 static void i965_irq_uninstall(
struct drm_device * dev)
2696 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2697 dev->max_vblank_count = 0xffffff;
2699 dev->max_vblank_count = 0xffffffff;
2700 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2703 if (drm_core_check_feature(dev, DRIVER_MODESET))
2704 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2706 dev->driver->get_vblank_timestamp =
NULL;
2707 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2710 dev->driver->irq_handler = valleyview_irq_handler;
2711 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2712 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2713 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2714 dev->driver->enable_vblank = valleyview_enable_vblank;
2715 dev->driver->disable_vblank = valleyview_disable_vblank;
2718 dev->driver->irq_handler = ivybridge_irq_handler;
2719 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2720 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2721 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2722 dev->driver->enable_vblank = ivybridge_enable_vblank;
2723 dev->driver->disable_vblank = ivybridge_disable_vblank;
2726 dev->driver->irq_handler = ivybridge_irq_handler;
2727 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2728 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2729 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2730 dev->driver->enable_vblank = ivybridge_enable_vblank;
2731 dev->driver->disable_vblank = ivybridge_disable_vblank;
2733 dev->driver->irq_handler = ironlake_irq_handler;
2734 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2735 dev->driver->irq_postinstall = ironlake_irq_postinstall;
2736 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2737 dev->driver->enable_vblank = ironlake_enable_vblank;
2738 dev->driver->disable_vblank = ironlake_disable_vblank;
2741 dev->driver->irq_preinstall = i8xx_irq_preinstall;
2742 dev->driver->irq_postinstall = i8xx_irq_postinstall;
2743 dev->driver->irq_handler = i8xx_irq_handler;
2744 dev->driver->irq_uninstall = i8xx_irq_uninstall;
2746 dev->driver->irq_preinstall = i915_irq_preinstall;
2747 dev->driver->irq_postinstall = i915_irq_postinstall;
2748 dev->driver->irq_uninstall = i915_irq_uninstall;
2749 dev->driver->irq_handler = i915_irq_handler;
2751 dev->driver->irq_preinstall = i965_irq_preinstall;
2752 dev->driver->irq_postinstall = i965_irq_postinstall;
2753 dev->driver->irq_uninstall = i965_irq_uninstall;
2754 dev->driver->irq_handler = i965_irq_handler;
2756 dev->driver->enable_vblank = i915_enable_vblank;
2757 dev->driver->disable_vblank = i915_disable_vblank;