31 #include <linux/slab.h>
32 #include <linux/export.h>
36 #include <drm/i915_drm.h>
39 #define DRM_I915_RING_DEBUG 1
42 #if defined(CONFIG_DEBUG_FS)
50 static const char *
yesno(
int v)
52 return v ?
"yes" :
"no";
57 struct drm_info_node *
node = (
struct drm_info_node *) m->
private;
63 #define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
64 #define DEV_INFO_SEP ;
92 static const char *cache_level_str(
int type)
105 seq_printf(m,
"%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
108 get_tiling_flag(obj),
109 obj->
base.size / 1024,
110 obj->
base.read_domains,
111 obj->
base.write_domain,
116 obj->
dirty ?
" dirty" :
"",
125 seq_printf(m,
" (gtt offset: %08x, size: %08x)",
140 static int i915_gem_object_list_info(
struct seq_file *m,
void *data)
142 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
148 size_t total_obj_size, total_gtt_size;
158 head = &dev_priv->
mm.active_list;
162 head = &dev_priv->
mm.inactive_list;
169 total_obj_size = total_gtt_size = count = 0;
172 describe_obj(m, obj);
174 total_obj_size += obj->
base.size;
180 seq_printf(m,
"Total %d objects, %zu bytes, %zu GTT size\n",
181 count, total_obj_size, total_gtt_size);
185 #define count_objects(list, member) do { \
186 list_for_each_entry(obj, list, member) { \
187 size += obj->gtt_space->size; \
189 if (obj->map_and_fenceable) { \
190 mappable_size += obj->gtt_space->size; \
196 static int i915_gem_object_info(
struct seq_file *m,
void* data)
198 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
201 u32 count, mappable_count, purgeable_count;
202 size_t size, mappable_size, purgeable_size;
211 dev_priv->
mm.object_count,
212 dev_priv->
mm.object_memory);
214 size = count = mappable_size = mappable_count = 0;
215 count_objects(&dev_priv->
mm.bound_list, gtt_list);
216 seq_printf(m,
"%u [%u] objects, %zu [%zu] bytes in gtt\n",
217 count, mappable_count, size, mappable_size);
219 size = count = mappable_size = mappable_count = 0;
220 count_objects(&dev_priv->
mm.active_list, mm_list);
221 seq_printf(m,
" %u [%u] active objects, %zu [%zu] bytes\n",
222 count, mappable_count, size, mappable_size);
224 size = count = mappable_size = mappable_count = 0;
225 count_objects(&dev_priv->
mm.inactive_list, mm_list);
226 seq_printf(m,
" %u [%u] inactive objects, %zu [%zu] bytes\n",
227 count, mappable_count, size, mappable_size);
229 size = count = purgeable_size = purgeable_count = 0;
233 purgeable_size += obj->
base.size, ++purgeable_count;
235 seq_printf(m,
"%u unbound objects, %zu bytes\n", count, size);
237 size = count = mappable_size = mappable_count = 0;
248 purgeable_size += obj->
base.size;
252 seq_printf(m,
"%u purgeable objects, %zu bytes\n",
253 purgeable_count, purgeable_size);
254 seq_printf(m,
"%u pinned mappable objects, %zu bytes\n",
255 mappable_count, mappable_size);
256 seq_printf(m,
"%u fault mappable objects, %zu bytes\n",
260 dev_priv->
mm.gtt_total, dev_priv->
mm.mappable_gtt_total);
267 static int i915_gem_gtt_info(
struct seq_file *m,
void* data)
269 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
274 size_t total_obj_size, total_gtt_size;
281 total_obj_size = total_gtt_size = count = 0;
287 describe_obj(m, obj);
289 total_obj_size += obj->
base.size;
296 seq_printf(m,
"Total %d objects, %zu bytes, %zu GTT size\n",
297 count, total_obj_size, total_gtt_size);
302 static int i915_gem_pageflip_info(
struct seq_file *m,
void *data)
304 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
315 work =
crtc->unpin_work;
317 seq_printf(m,
"No flip due on pipe %c (plane %c)\n",
321 seq_printf(m,
"Flip queued on pipe %c (plane %c)\n",
324 seq_printf(m,
"Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
330 seq_printf(m,
"Stall check waiting for page flip ioctl, ");
344 spin_unlock_irqrestore(&dev->event_lock,
flags);
350 static int i915_gem_request_info(
struct seq_file *m,
void *data)
352 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
365 if (list_empty(&
ring->request_list))
374 (
int) (jiffies - gem_request->emitted_jiffies));
386 static void i915_ring_seqno_info(
struct seq_file *m,
395 static int i915_gem_seqno_info(
struct seq_file *m,
void *data)
397 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
408 i915_ring_seqno_info(m, ring);
418 struct drm_info_node *node = (
struct drm_info_node *) m->private;
478 seq_printf(m,
"North Display Interrupt enable: %08x\n",
480 seq_printf(m,
"North Display Interrupt identity: %08x\n",
482 seq_printf(m,
"North Display Interrupt mask: %08x\n",
484 seq_printf(m,
"South Display Interrupt enable: %08x\n",
486 seq_printf(m,
"South Display Interrupt identity: %08x\n",
488 seq_printf(m,
"South Display Interrupt mask: %08x\n",
490 seq_printf(m,
"Graphics Interrupt enable: %08x\n",
492 seq_printf(m,
"Graphics Interrupt identity: %08x\n",
494 seq_printf(m,
"Graphics Interrupt mask: %08x\n",
502 "Graphics Interrupt mask (%s): %08x\n",
505 i915_ring_seqno_info(m, ring);
512 static int i915_gem_fence_regs_info(
struct seq_file *m,
void *data)
514 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
528 seq_printf(m,
"Fence %d, pin count = %d, object = ",
533 describe_obj(m, obj);
541 static int i915_hws_info(
struct seq_file *m,
void *data)
543 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
551 hws = (
volatile u32 __iomem *)ring->status_page.page_addr;
555 for (i = 0; i < 4096 /
sizeof(
u32) / 4; i += 4) {
556 seq_printf(m,
"0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
558 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
563 static const char *ring_str(
int ring)
566 case RCS:
return "render";
567 case VCS:
return "bsd";
568 case BCS:
return "blt";
573 static const char *pin_flag(
int pinned)
583 static const char *tiling_flag(
int tiling)
593 static const char *dirty_flag(
int dirty)
595 return dirty ?
" dirty" :
"";
598 static const char *purgeable_flag(
int purgeable)
600 return purgeable ?
" purgeable" :
"";
603 static void print_error_buffers(
struct seq_file *m,
605 struct drm_i915_error_buffer *
err,
611 seq_printf(m,
" %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
616 err->rseqno, err->wseqno,
617 pin_flag(err->pinned),
618 tiling_flag(err->tiling),
619 dirty_flag(err->dirty),
620 purgeable_flag(err->purgeable),
621 err->ring != -1 ?
" " :
"",
623 cache_level_str(err->cache_level));
628 seq_printf(m,
" (fence: %d)", err->fence_reg);
635 static void i915_ring_error_state(
struct seq_file *m,
641 seq_printf(m,
"%s command stream:\n", ring_str(ring));
669 struct i915_error_state_file_priv {
676 struct i915_error_state_file_priv *error_priv = m->
private;
689 error->
time.tv_usec);
690 seq_printf(m,
"PCI ID: 0x%04x\n", dev->pci_device);
711 i915_ring_error_state(m, dev, error, i);
713 if (error->active_bo)
714 print_error_buffers(m, "Active",
716 error->active_bo_count);
718 if (error->pinned_bo)
719 print_error_buffers(m, "Pinned",
721 error->pinned_bo_count);
724 struct drm_i915_error_object *obj;
726 if ((obj = error->ring[i].batchbuffer)) {
728 dev_priv->
ring[i].name,
731 for (page = 0; page < obj->page_count; page++) {
732 for (elt = 0; elt <
PAGE_SIZE/4; elt++) {
733 seq_printf(m,
"%08x : %08x\n", offset, obj->pages[page][elt]);
739 if (error->ring[i].num_requests) {
741 dev_priv->
ring[i].name,
742 error->ring[i].num_requests);
743 for (j = 0; j < error->ring[
i].num_requests; j++) {
744 seq_printf(m,
" seqno 0x%08x, emitted %ld, tail 0x%08x\n",
745 error->ring[i].requests[j].seqno,
746 error->ring[i].requests[j].jiffies,
747 error->ring[i].requests[j].tail);
751 if ((obj = error->ring[i].ringbuffer)) {
753 dev_priv->
ring[i].name,
756 for (page = 0; page < obj->page_count; page++) {
757 for (elt = 0; elt <
PAGE_SIZE/4; elt++) {
760 obj->pages[page][elt]);
768 intel_overlay_print_error_state(m, error->overlay);
771 intel_display_print_error_state(m, dev, error->display);
777 i915_error_state_write(
struct file *filp,
778 const char __user *ubuf,
783 struct i915_error_state_file_priv *error_priv = m->
private;
787 DRM_DEBUG_DRIVER(
"Resetting error state\n");
803 struct i915_error_state_file_priv *error_priv;
806 error_priv = kzalloc(
sizeof(*error_priv),
GFP_KERNEL);
810 error_priv->dev =
dev;
814 if (error_priv->error)
815 kref_get(&error_priv->error->ref);
816 spin_unlock_irqrestore(&dev_priv->
error_lock, flags);
818 return single_open(file, i915_error_state, error_priv);
821 static int i915_error_state_release(
struct inode *inode,
struct file *file)
824 struct i915_error_state_file_priv *error_priv = m->
private;
826 if (error_priv->error)
835 .open = i915_error_state_open,
837 .write = i915_error_state_write,
839 .release = i915_error_state_release,
842 static int i915_rstdby_delays(
struct seq_file *m,
void *unused)
844 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
858 seq_printf(m,
"w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
863 static int i915_cur_delayinfo(
struct seq_file *m,
void *unused)
865 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
874 seq_printf(m,
"Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
875 seq_printf(m,
"Requested VID: %d\n", rgvswctl & 0x3f);
885 u32 rpupei, rpcurup, rpprevup;
886 u32 rpdownei, rpcurdown, rpprevdown;
907 seq_printf(m,
"GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
910 (gt_perf_status & 0xff00) >> 8);
912 gt_perf_status & 0xff);
914 rp_state_limits & 0xff);
917 seq_printf(m,
"RP CUR UP EI: %dus\n", rpupei &
921 seq_printf(m,
"RP PREV UP: %dus\n", rpprevup &
923 seq_printf(m,
"RP CUR DOWN EI: %dus\n", rpdownei &
925 seq_printf(m,
"RP CUR DOWN: %dus\n", rpcurdown &
927 seq_printf(m,
"RP PREV DOWN: %dus\n", rpprevdown &
930 max_freq = (rp_state_cap & 0xff0000) >> 16;
931 seq_printf(m,
"Lowest (RPN) frequency: %dMHz\n",
934 max_freq = (rp_state_cap & 0xff00) >> 8;
935 seq_printf(m,
"Nominal (RP1) frequency: %dMHz\n",
938 max_freq = rp_state_cap & 0xff;
939 seq_printf(m,
"Max non-overclocked (RP0) frequency: %dMHz\n",
948 static int i915_delayfreq_table(
struct seq_file *m,
void *unused)
950 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
960 for (i = 0; i < 16; i++) {
962 seq_printf(m,
"P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
971 static inline int MAP_TO_MV(
int map)
973 return 1250 - (map * 25);
976 static int i915_inttoext_table(
struct seq_file *m,
void *unused)
978 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
988 for (i = 1; i <= 32; i++) {
990 seq_printf(m,
"INTTOEXT%02d: 0x%08x\n", i, inttoext);
998 static int ironlake_drpc_info(
struct seq_file *m)
1000 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1003 u32 rgvmodectl, rstdbyctl;
1033 seq_printf(m,
"RS1 VID: %d\n", (crstandvid & 0x3f));
1034 seq_printf(m,
"RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1035 seq_printf(m,
"Render standby enabled: %s\n",
1065 static int gen6_drpc_info(
struct seq_file *m)
1068 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1071 u32 rpmodectl1, gt_core_status, rcctl1;
1072 unsigned forcewake_count;
1080 spin_lock_irq(&dev_priv->
gt_lock);
1082 spin_unlock_irq(&dev_priv->
gt_lock);
1084 if (forcewake_count) {
1085 seq_printf(m,
"RC information inaccurate because somebody "
1086 "holds a forcewake reference \n");
1142 seq_printf(m,
"RC6 \"Locked to RPn\" residency since boot: %u\n",
1144 seq_printf(m,
"RC6 residency since boot: %u\n",
1146 seq_printf(m,
"RC6+ residency since boot: %u\n",
1148 seq_printf(m,
"RC6++ residency since boot: %u\n",
1154 static int i915_drpc_info(
struct seq_file *m,
void *unused)
1156 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1160 return gen6_drpc_info(m);
1162 return ironlake_drpc_info(m);
1165 static int i915_fbc_status(
struct seq_file *m,
void *unused)
1167 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1172 seq_printf(m,
"FBC unsupported on this chipset\n");
1203 seq_printf(m,
"disabled per module param (default off)");
1213 static int i915_sr_status(
struct seq_file *m,
void *unused)
1215 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1218 bool sr_enabled =
false;
1230 sr_enabled ?
"enabled" :
"disabled");
1235 static int i915_emon_status(
struct seq_file *m,
void *unused)
1237 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1263 static int i915_ring_freq_table(
struct seq_file *m,
void *unused)
1265 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1269 int gpu_freq, ia_freq;
1272 seq_printf(m,
"unsupported on this chipset\n");
1280 seq_printf(m,
"GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1282 for (gpu_freq = dev_priv->
rps.min_delay;
1283 gpu_freq <= dev_priv->rps.max_delay;
1290 DRM_ERROR(
"pcode read of freq table timed out\n");
1302 static int i915_gfxec(
struct seq_file *m,
void *unused)
1304 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1320 static int i915_opregion(
struct seq_file *m,
void *unused)
1322 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1336 if (opregion->header) {
1348 static int i915_gem_framebuffer_info(
struct seq_file *m,
void *data)
1350 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1361 ifbdev = dev_priv->
fbdev;
1364 seq_printf(m,
"fbcon size: %d x %d, depth %d, %d bpp, obj ",
1368 fb->base.bits_per_pixel);
1369 describe_obj(m,
fb->obj);
1373 if (&
fb->base == ifbdev->helper.fb)
1376 seq_printf(m,
"user size: %d x %d, depth %d, %d bpp, obj ",
1380 fb->base.bits_per_pixel);
1381 describe_obj(m,
fb->obj);
1390 static int i915_context_status(
struct seq_file *m,
void *unused)
1392 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1403 describe_obj(m, dev_priv->
pwrctx);
1418 static int i915_gen6_forcewake_count_info(
struct seq_file *m,
void *data)
1420 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1423 unsigned forcewake_count;
1425 spin_lock_irq(&dev_priv->
gt_lock);
1427 spin_unlock_irq(&dev_priv->
gt_lock);
1429 seq_printf(m,
"forcewake count = %u\n", forcewake_count);
1434 static const char *swizzle_string(
unsigned swizzle)
1442 return "bit9/bit10";
1444 return "bit9/bit11";
1446 return "bit9/bit10/bit11";
1448 return "bit9/bit17";
1450 return "bit9/bit10/bit17";
1458 static int i915_swizzle_info(
struct seq_file *m,
void *data)
1460 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1469 seq_printf(m,
"bit6 swizzle for X-tiling = %s\n",
1470 swizzle_string(dev_priv->
mm.bit_6_swizzle_x));
1471 seq_printf(m,
"bit6 swizzle for Y-tiling = %s\n",
1472 swizzle_string(dev_priv->
mm.bit_6_swizzle_y));
1500 static int i915_ppgtt_info(
struct seq_file *m,
void *data)
1502 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1523 if (dev_priv->
mm.aliasing_ppgtt) {
1535 static int i915_dpio_info(
struct seq_file *m,
void *data)
1537 struct drm_info_node *node = (
struct drm_info_node *) m->
private;
1574 seq_printf(m,
"DPIO_FASTCLK_DISABLE: 0x%08x\n",
1583 i915_wedged_read(
struct file *filp,
1597 if (len >
sizeof(buf))
1604 i915_wedged_write(
struct file *filp,
1605 const char __user *ubuf,
1614 if (cnt >
sizeof(buf) - 1)
1624 DRM_INFO(
"Manually setting wedged to %d\n", val);
1633 .read = i915_wedged_read,
1634 .write = i915_wedged_write,
1639 i915_ring_stop_read(
struct file *filp,
1652 if (len >
sizeof(buf))
1659 i915_ring_stop_write(
struct file *filp,
1660 const char __user *ubuf,
1670 if (cnt >
sizeof(buf) - 1)
1680 DRM_DEBUG_DRIVER(
"Stopping rings 0x%08x\n", val);
1695 .read = i915_ring_stop_read,
1696 .write = i915_ring_stop_write,
1701 i915_max_freq_read(
struct file *filp,
1722 if (len >
sizeof(buf))
1729 i915_max_freq_write(
struct file *filp,
1730 const char __user *ubuf,
1743 if (cnt >
sizeof(buf) - 1)
1753 DRM_DEBUG_DRIVER(
"Manually setting max freq to %d\n", val);
1773 .read = i915_max_freq_read,
1774 .write = i915_max_freq_write,
1779 i915_min_freq_read(
struct file *filp,
char __user *ubuf,
size_t max,
1798 if (len >
sizeof(buf))
1805 i915_min_freq_write(
struct file *filp,
const char __user *ubuf,
size_t cnt,
1817 if (cnt >
sizeof(buf) - 1)
1827 DRM_DEBUG_DRIVER(
"Manually setting min freq to %d\n", val);
1847 .read = i915_min_freq_read,
1848 .write = i915_min_freq_write,
1853 i915_cache_sharing_read(
struct file *filp,
1878 if (len >
sizeof(buf))
1885 i915_cache_sharing_write(
struct file *filp,
1886 const char __user *ubuf,
1900 if (cnt >
sizeof(buf) - 1)
1910 if (val < 0 || val > 3)
1913 DRM_DEBUG_DRIVER(
"Manually setting uncore sharing to %d\n", val);
1927 .read = i915_cache_sharing_read,
1928 .write = i915_cache_sharing_write,
1935 drm_add_fake_info_node(
struct drm_minor *minor,
1939 struct drm_info_node *
node;
1947 node->minor = minor;
1949 node->info_ent = (
void *) key;
1952 list_add(&node->list, &minor->debugfs_list);
1958 static int i915_forcewake_open(
struct inode *inode,
struct file *file)
1971 static int i915_forcewake_release(
struct inode *inode,
struct file *file)
1986 .open = i915_forcewake_open,
1987 .release = i915_forcewake_release,
1990 static int i915_forcewake_create(
struct dentry *root,
struct drm_minor *minor)
1998 &i915_forcewake_fops);
2000 return PTR_ERR(ent);
2002 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
2005 static int i915_debugfs_create(
struct dentry *root,
2006 struct drm_minor *minor,
2018 return PTR_ERR(ent);
2020 return drm_add_fake_info_node(minor, ent, fops);
2023 static struct drm_info_list i915_debugfs_list[] = {
2024 {
"i915_capabilities", i915_capabilities, 0},
2025 {
"i915_gem_objects", i915_gem_object_info, 0},
2026 {
"i915_gem_gtt", i915_gem_gtt_info, 0},
2027 {
"i915_gem_pinned", i915_gem_gtt_info, 0, (
void *) PINNED_LIST},
2028 {
"i915_gem_active", i915_gem_object_list_info, 0, (
void *) ACTIVE_LIST},
2029 {
"i915_gem_inactive", i915_gem_object_list_info, 0, (
void *) INACTIVE_LIST},
2030 {
"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2031 {
"i915_gem_request", i915_gem_request_info, 0},
2032 {
"i915_gem_seqno", i915_gem_seqno_info, 0},
2033 {
"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2034 {
"i915_gem_interrupt", i915_interrupt_info, 0},
2035 {
"i915_gem_hws", i915_hws_info, 0, (
void *)RCS},
2036 {
"i915_gem_hws_blt", i915_hws_info, 0, (
void *)BCS},
2037 {
"i915_gem_hws_bsd", i915_hws_info, 0, (
void *)VCS},
2038 {
"i915_rstdby_delays", i915_rstdby_delays, 0},
2039 {
"i915_cur_delayinfo", i915_cur_delayinfo, 0},
2040 {
"i915_delayfreq_table", i915_delayfreq_table, 0},
2041 {
"i915_inttoext_table", i915_inttoext_table, 0},
2042 {
"i915_drpc_info", i915_drpc_info, 0},
2043 {
"i915_emon_status", i915_emon_status, 0},
2044 {
"i915_ring_freq_table", i915_ring_freq_table, 0},
2045 {
"i915_gfxec", i915_gfxec, 0},
2046 {
"i915_fbc_status", i915_fbc_status, 0},
2047 {
"i915_sr_status", i915_sr_status, 0},
2048 {
"i915_opregion", i915_opregion, 0},
2049 {
"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2050 {
"i915_context_status", i915_context_status, 0},
2051 {
"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
2052 {
"i915_swizzle_info", i915_swizzle_info, 0},
2053 {
"i915_ppgtt_info", i915_ppgtt_info, 0},
2054 {
"i915_dpio", i915_dpio_info, 0},
2056 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2062 ret = i915_debugfs_create(minor->debugfs_root, minor,
2068 ret = i915_forcewake_create(minor->debugfs_root, minor);
2072 ret = i915_debugfs_create(minor->debugfs_root, minor,
2074 &i915_max_freq_fops);
2078 ret = i915_debugfs_create(minor->debugfs_root, minor,
2080 &i915_min_freq_fops);
2084 ret = i915_debugfs_create(minor->debugfs_root, minor,
2085 "i915_cache_sharing",
2086 &i915_cache_sharing_fops);
2090 ret = i915_debugfs_create(minor->debugfs_root, minor,
2092 &i915_ring_stop_fops);
2096 ret = i915_debugfs_create(minor->debugfs_root, minor,
2098 &i915_error_state_fops);
2102 return drm_debugfs_create_files(i915_debugfs_list,
2103 I915_DEBUGFS_ENTRIES,
2104 minor->debugfs_root, minor);
2109 drm_debugfs_remove_files(i915_debugfs_list,
2110 I915_DEBUGFS_ENTRIES, minor);
2111 drm_debugfs_remove_files((
struct drm_info_list *) &i915_forcewake_fops,
2113 drm_debugfs_remove_files((
struct drm_info_list *) &i915_wedged_fops,
2115 drm_debugfs_remove_files((
struct drm_info_list *) &i915_max_freq_fops,
2117 drm_debugfs_remove_files((
struct drm_info_list *) &i915_min_freq_fops,
2119 drm_debugfs_remove_files((
struct drm_info_list *) &i915_cache_sharing_fops,
2121 drm_debugfs_remove_files((
struct drm_info_list *) &i915_ring_stop_fops,
2123 drm_debugfs_remove_files((
struct drm_info_list *) &i915_error_state_fops,