29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <drm/i915_drm.h>
38 #include <linux/pci.h>
43 #include <linux/slab.h>
47 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
49 #define BEGIN_LP_RING(n) \
50 intel_ring_begin(LP_RING(dev_priv), (n))
53 intel_ring_emit(LP_RING(dev_priv), x)
55 #define ADVANCE_LP_RING() \
56 intel_ring_advance(LP_RING(dev_priv))
64 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
65 if (LP_RING(dev->dev_private)->obj == NULL) \
66 LOCK_TEST_WITH_RETURN(dev, file); \
75 return intel_read_status_page(
LP_RING(dev_priv), reg);
78 #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
79 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
80 #define I915_BREADCRUMB_INDEX 0x21
87 if (dev->primary->master) {
88 master_priv = dev->primary->master->driver_priv;
110 static int i915_init_phys_hws(
struct drm_device *dev)
119 DRM_ERROR(
"Can not allocate hardware status page\n");
126 i915_write_hws_pga(dev);
128 DRM_DEBUG_DRIVER(
"Enabled hardware status page\n");
136 static void i915_free_hws(
struct drm_device *dev)
165 if (drm_core_check_feature(dev, DRIVER_MODESET))
174 if (!dev->primary->master)
177 master_priv = dev->primary->master->driver_priv;
182 static int i915_dma_cleanup(
struct drm_device * dev)
191 if (dev->irq_enabled)
213 if (master_priv->
sarea) {
217 DRM_DEBUG_DRIVER(
"sarea not found assuming DRI2 userspace\n");
222 i915_dma_cleanup(dev);
223 DRM_ERROR(
"Client tried to initialize ringbuffer in "
232 i915_dma_cleanup(dev);
237 dev_priv->
dri1.cpp = init->
cpp;
240 dev_priv->
dri1.current_page = 0;
246 dev_priv->
dri1.allow_batchbuffer = 1;
251 static int i915_dma_resume(
struct drm_device * dev)
256 DRM_DEBUG_DRIVER(
"%s\n", __func__);
259 DRM_ERROR(
"can not ioremap virtual address for"
266 DRM_ERROR(
"Can not find hardware status page\n");
269 DRM_DEBUG_DRIVER(
"hw status page @ %p\n",
274 i915_write_hws_pga(dev);
276 DRM_DEBUG_DRIVER(
"Enabled hardware status page\n");
282 struct drm_file *file_priv)
287 if (drm_core_check_feature(dev, DRIVER_MODESET))
290 switch (init->
func) {
292 retcode = i915_initialize(dev, init);
294 case I915_CLEANUP_DMA:
295 retcode = i915_dma_cleanup(dev);
297 case I915_RESUME_DMA:
298 retcode = i915_dma_resume(dev);
317 static int validate_cmd(
int cmd)
319 switch (((cmd >> 29) & 0x7)) {
321 switch ((cmd >> 23) & 0x3f) {
333 return (cmd & 0xff) + 2;
335 if (((cmd >> 24) & 0x1f) <= 0x18)
338 switch ((cmd >> 24) & 0x1f) {
342 switch ((cmd >> 16) & 0xff) {
344 return (cmd & 0x1f) + 2;
346 return (cmd & 0xf) + 2;
348 return (cmd & 0xffff) + 2;
352 return (cmd & 0xffff) + 1;
356 if ((cmd & (1 << 23)) == 0)
357 return (cmd & 0x1ffff) + 2;
358 else if (cmd & (1 << 17))
359 if ((cmd & 0xffff) == 0)
362 return (((cmd & 0xffff) + 1) / 2) + 1;
380 if ((dwords+1) *
sizeof(
int) >=
LP_RING(dev_priv)->
size - 8)
383 for (i = 0; i < dwords;) {
384 int sz = validate_cmd(buffer[i]);
385 if (sz == 0 || i + sz > dwords)
394 for (i = 0; i < dwords; i++)
412 if (box->
y2 <= box->
y1 || box->
x2 <= box->
x1 ||
413 box->
y2 <= 0 || box->
x2 <= 0) {
414 DRM_ERROR(
"Bad box %d,%d..%d,%d\n",
415 box->
x1, box->
y1, box->
x2, box->
y2);
426 OUT_RING(((box->
x2 - 1) & 0xffff) | ((box->
y2 - 1) << 16));
436 OUT_RING(((box->
x2 - 1) & 0xffff) | ((box->
y2 - 1) << 16));
449 static void i915_emit_breadcrumb(
struct drm_device *dev)
455 if (dev_priv->
counter > 0x7FFFFFFFUL)
469 static int i915_dispatch_cmdbuffer(
struct drm_device * dev,
478 DRM_ERROR(
"alignment");
484 count = nbox ? nbox : 1;
486 for (i = 0; i <
count; i++) {
494 ret = i915_emit_cmds(dev, cmdbuf, cmd->
sz / 4);
499 i915_emit_breadcrumb(dev);
503 static int i915_dispatch_batchbuffer(
struct drm_device * dev,
511 if ((batch->
start | batch->
used) & 0x7) {
512 DRM_ERROR(
"alignment");
518 count = nbox ? nbox : 1;
519 for (i = 0; i <
count; i++) {
561 i915_emit_breadcrumb(dev);
565 static int i915_dispatch_flip(
struct drm_device * dev)
569 dev->primary->master->driver_priv;
575 DRM_DEBUG_DRIVER(
"%s: page=%d pfCurrentPage=%d\n",
577 dev_priv->
dri1.current_page,
591 if (dev_priv->
dri1.current_page == 0) {
593 dev_priv->
dri1.current_page = 1;
596 dev_priv->
dri1.current_page = 0;
615 master_priv->
sarea_priv->pf_current_page = dev_priv->
dri1.current_page;
619 static int i915_quiescent(
struct drm_device *dev)
624 return intel_wait_ring_idle(ring);
627 static int i915_flush_ioctl(
struct drm_device *dev,
void *data,
628 struct drm_file *file_priv)
632 if (drm_core_check_feature(dev, DRIVER_MODESET))
638 ret = i915_quiescent(dev);
644 static int i915_batchbuffer(
struct drm_device *dev,
void *data,
645 struct drm_file *file_priv)
655 if (drm_core_check_feature(dev, DRIVER_MODESET))
658 if (!dev_priv->
dri1.allow_batchbuffer) {
659 DRM_ERROR(
"Batchbuffer ioctl disabled\n");
663 DRM_DEBUG_DRIVER(
"i915 batchbuffer, start %x used %d cliprects %d\n",
675 if (cliprects ==
NULL)
688 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
700 static int i915_cmdbuffer(
struct drm_device *dev,
void *data,
701 struct drm_file *file_priv)
712 DRM_DEBUG_DRIVER(
"i915 cmdbuffer, buf %p sz %d cliprects %d\n",
715 if (drm_core_check_feature(dev, DRIVER_MODESET))
724 if (batch_data ==
NULL)
730 goto fail_batch_free;
736 if (cliprects ==
NULL) {
738 goto fail_batch_free;
751 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
754 DRM_ERROR(
"i915_dispatch_cmdbuffer failed\n");
769 static int i915_emit_irq(
struct drm_device * dev)
776 DRM_DEBUG_DRIVER(
"\n");
779 if (dev_priv->
counter > 0x7FFFFFFFUL)
795 static int i915_wait_irq(
struct drm_device * dev,
int irq_nr)
802 DRM_DEBUG_DRIVER(
"irq_nr=%d breadcrumb=%d\n", irq_nr,
822 DRM_ERROR(
"EBUSY -- rec: %d emitted: %d\n",
831 static int i915_irq_emit(
struct drm_device *dev,
void *data,
832 struct drm_file *file_priv)
838 if (drm_core_check_feature(dev, DRIVER_MODESET))
841 if (!dev_priv || !
LP_RING(dev_priv)->virtual_start) {
842 DRM_ERROR(
"called with no initialization\n");
849 result = i915_emit_irq(dev);
853 DRM_ERROR(
"copy_to_user\n");
862 static int i915_irq_wait(
struct drm_device *dev,
void *data,
863 struct drm_file *file_priv)
868 if (drm_core_check_feature(dev, DRIVER_MODESET))
872 DRM_ERROR(
"called with no initialization\n");
876 return i915_wait_irq(dev, irqwait->
irq_seq);
879 static int i915_vblank_pipe_get(
struct drm_device *dev,
void *data,
880 struct drm_file *file_priv)
885 if (drm_core_check_feature(dev, DRIVER_MODESET))
889 DRM_ERROR(
"called with no initialization\n");
901 static int i915_vblank_swap(
struct drm_device *dev,
void *data,
902 struct drm_file *file_priv)
921 static int i915_flip_bufs(
struct drm_device *dev,
void *data,
922 struct drm_file *file_priv)
926 if (drm_core_check_feature(dev, DRIVER_MODESET))
929 DRM_DEBUG_DRIVER(
"%s\n", __func__);
934 ret = i915_dispatch_flip(dev);
940 static int i915_getparam(
struct drm_device *dev,
void *data,
941 struct drm_file *file_priv)
948 DRM_ERROR(
"called with no initialization\n");
952 switch (param->
param) {
954 value = dev->pdev->irq ? 1 : 0;
957 value = dev_priv->
dri1.allow_batchbuffer ? 1 : 0;
963 value = dev->pci_device;
972 value = dev_priv->
overlay ? 1 : 0;
982 value = intel_ring_initialized(&dev_priv->
ring[VCS]);
985 value = intel_ring_initialized(&dev_priv->
ring[BCS]);
1006 value = dev_priv->
mm.aliasing_ppgtt ? 1 : 0;
1018 DRM_DEBUG_DRIVER(
"Unknown parameter %d\n",
1024 DRM_ERROR(
"DRM_COPY_TO_USER failed\n");
1031 static int i915_setparam(
struct drm_device *dev,
void *data,
1032 struct drm_file *file_priv)
1038 DRM_ERROR(
"called with no initialization\n");
1042 switch (param->
param) {
1048 dev_priv->
dri1.allow_batchbuffer = param->
value ? 1 : 0;
1058 DRM_DEBUG_DRIVER(
"unknown parameter %d\n",
1066 static int i915_set_status_page(
struct drm_device *dev,
void *data,
1067 struct drm_file *file_priv)
1073 if (drm_core_check_feature(dev, DRIVER_MODESET))
1080 DRM_ERROR(
"called with no initialization\n");
1084 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1085 WARN(1,
"tried to set status page when mode setting active\n");
1089 DRM_DEBUG_DRIVER(
"set status page addr 0x%08x\n", (
u32)hws->
addr);
1093 dev_priv->
dri1.gfx_hws_cpu_addr =
1095 if (dev_priv->
dri1.gfx_hws_cpu_addr ==
NULL) {
1096 i915_dma_cleanup(dev);
1098 DRM_ERROR(
"can not ioremap virtual address for"
1099 " G33 hw status page\n");
1106 DRM_DEBUG_DRIVER(
"load hws HWS_PGA with gfx mem 0x%x\n",
1108 DRM_DEBUG_DRIVER(
"load hws at %p\n",
1113 static int i915_get_bridge_dev(
struct drm_device *dev)
1119 DRM_ERROR(
"bridge device not found\n");
1125 #define MCHBAR_I915 0x44
1126 #define MCHBAR_I965 0x48
1127 #define MCHBAR_SIZE (4*4096)
1129 #define DEVEN_REG 0x54
1130 #define DEVEN_MCHBAR_EN (1 << 28)
1134 intel_alloc_mchbar_resource(
struct drm_device *dev)
1138 u32 temp_lo, temp_hi = 0;
1143 pci_read_config_dword(dev_priv->
bridge_dev, reg + 4, &temp_hi);
1144 pci_read_config_dword(dev_priv->
bridge_dev, reg, &temp_lo);
1145 mchbar_addr = ((
u64)temp_hi << 32) | temp_lo;
1155 dev_priv->
mch_res.name =
"i915 MCHBAR";
1164 DRM_DEBUG_DRIVER(
"failed bus alloc: %d\n", ret);
1170 pci_write_config_dword(dev_priv->
bridge_dev, reg + 4,
1173 pci_write_config_dword(dev_priv->
bridge_dev, reg,
1193 pci_read_config_dword(dev_priv->
bridge_dev, mchbar_reg, &temp);
1201 if (intel_alloc_mchbar_resource(dev))
1211 pci_read_config_dword(dev_priv->
bridge_dev, mchbar_reg, &temp);
1212 pci_write_config_dword(dev_priv->
bridge_dev, mchbar_reg, temp | 1);
1217 intel_teardown_mchbar(
struct drm_device *dev)
1229 pci_read_config_dword(dev_priv->
bridge_dev, mchbar_reg, &temp);
1231 pci_write_config_dword(dev_priv->
bridge_dev, mchbar_reg, temp);
1240 static unsigned int i915_vga_set_decode(
void *
cookie,
bool state)
1254 struct drm_device *dev = pci_get_drvdata(pdev);
1258 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1262 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1264 pr_err(
"switched off\n");
1265 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1267 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1271 static bool i915_switcheroo_can_switch(
struct pci_dev *pdev)
1273 struct drm_device *dev = pci_get_drvdata(pdev);
1276 spin_lock(&dev->count_lock);
1277 can_switch = (dev->open_count == 0);
1278 spin_unlock(&dev->count_lock);
1283 .set_gpu_state = i915_switcheroo_set_state,
1285 .can_switch = i915_switcheroo_can_switch,
1288 static int i915_load_modeset_init(
struct drm_device *dev)
1295 DRM_INFO(
"failed to find VBIOS tables\n");
1305 if (ret && ret != -
ENODEV)
1312 goto cleanup_vga_client;
1319 goto cleanup_vga_switcheroo;
1325 goto cleanup_gem_stolen;
1335 dev->vblank_disable_allowed = 1;
1344 dev_priv->
mm.suspended = 0;
1357 cleanup_vga_switcheroo:
1369 master_priv = kzalloc(
sizeof(*master_priv),
GFP_KERNEL);
1373 master->driver_priv = master_priv;
1386 master->driver_priv =
NULL;
1393 dev_priv->
mm.gtt_mtrr = -1;
1395 #if defined(CONFIG_X86_PAT)
1406 if (dev_priv->
mm.gtt_mtrr < 0) {
1407 DRM_INFO(
"MTRR allocation failed. Graphics "
1408 "performance may suffer.\n");
1414 struct apertures_struct *ap;
1418 ap = alloc_apertures(1);
1422 ap->ranges[0].base = dev_priv->
mm.gtt->gma_bus_addr;
1423 ap->ranges[0].size =
1437 #define DEV_INFO_FLAG(name) info->name ? #name "," : ""
1438 #define DEV_INFO_SEP ,
1439 DRM_DEBUG_DRIVER(
"i915 device info: gen=%i, pciid=0x%04x flags="
1440 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
1442 dev_priv->
dev->pdev->device,
1444 #undef DEV_INFO_FLAG
1463 int ret = 0, mmio_bar, mmio_size;
1469 if (info->
gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1480 if (dev_priv ==
NULL)
1483 dev->dev_private = (
void *)dev_priv;
1487 i915_dump_device_info(dev_priv);
1489 if (i915_get_bridge_dev(dev)) {
1496 DRM_ERROR(
"failed to set up gmch\n");
1502 if (!dev_priv->
mm.gtt) {
1503 DRM_ERROR(
"Failed to initialize GTT\n");
1508 if (drm_core_check_feature(dev, DRIVER_MODESET))
1509 i915_kick_out_firmware_fb(dev_priv);
1528 mmio_bar =
IS_GEN2(dev) ? 1 : 0;
1537 mmio_size = 512*1024;
1539 mmio_size = 2*1024*1024;
1541 dev_priv->
regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1542 if (!dev_priv->
regs) {
1543 DRM_ERROR(
"failed to map registers\n");
1548 aperture_size = dev_priv->
mm.gtt->gtt_mappable_entries <<
PAGE_SHIFT;
1549 dev_priv->
mm.gtt_base_addr = dev_priv->
mm.gtt->gma_bus_addr;
1551 dev_priv->
mm.gtt_mapping =
1552 io_mapping_create_wc(dev_priv->
mm.gtt_base_addr,
1554 if (dev_priv->
mm.gtt_mapping ==
NULL) {
1559 i915_mtrr_setup(dev_priv, dev_priv->
mm.gtt_base_addr,
1576 if (dev_priv->
wq ==
NULL) {
1577 DRM_ERROR(
"Failed to create our workqueue.\n");
1589 intel_setup_mchbar(dev);
1600 ret = i915_init_phys_hws(dev);
1602 goto out_gem_unload;
1617 pci_enable_msi(dev->pdev);
1633 goto out_gem_unload;
1636 dev_priv->
mm.suspended = 1;
1638 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1639 ret = i915_load_modeset_init(dev);
1641 DRM_ERROR(
"failed to init modeset\n");
1642 goto out_gem_unload;
1649 intel_opregion_init(dev);
1653 (
unsigned long) dev);
1661 if (dev_priv->
mm.inactive_shrinker.shrink)
1664 if (dev->pdev->msi_enabled)
1668 intel_teardown_mchbar(dev);
1671 if (dev_priv->
mm.gtt_mtrr >= 0) {
1673 dev_priv->
mm.gtt_base_addr,
1675 dev_priv->
mm.gtt_mtrr = -1;
1677 io_mapping_free(dev_priv->
mm.gtt_mapping);
1698 if (dev_priv->
mm.inactive_shrinker.shrink)
1704 DRM_ERROR(
"failed to idle hardware: %d\n", ret);
1711 io_mapping_free(dev_priv->
mm.gtt_mapping);
1712 if (dev_priv->
mm.gtt_mtrr >= 0) {
1714 dev_priv->
mm.gtt_base_addr,
1715 dev_priv->
mm.gtt->gtt_mappable_entries *
PAGE_SIZE);
1716 dev_priv->
mm.gtt_mtrr = -1;
1721 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1744 if (dev->pdev->msi_enabled)
1747 intel_opregion_fini(dev);
1749 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1772 intel_teardown_mchbar(dev);
1777 kfree(dev->dev_private);
1786 DRM_DEBUG_DRIVER(
"\n");
1791 file->driver_priv = file_priv;
1794 INIT_LIST_HEAD(&file_priv->
mm.request_list);
1823 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1831 i915_dma_cleanup(dev);
1848 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1849 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1850 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1851 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1852 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1853 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1854 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
1855 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1856 DRM_IOCTL_DEF_DRV(I915_ALLOC,
drm_noop, DRM_AUTH),
1857 DRM_IOCTL_DEF_DRV(I915_FREE,
drm_noop, DRM_AUTH),
1858 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP,
drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1859 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1860 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,
drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1861 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,
drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1862 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
1863 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1864 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1865 DRM_IOCTL_DEF_DRV(I915_GEM_INIT,
i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1868 DRM_IOCTL_DEF_DRV(I915_GEM_PIN,
i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1874 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT,
i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1875 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT,
i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1888 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE,
intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1889 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS,
intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),