Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
i915_gem_debug.c
Go to the documentation of this file.
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  * Keith Packard <[email protected]>
25  *
26  */
27 
28 #include <drm/drmP.h>
29 #include <drm/i915_drm.h>
30 #include "i915_drv.h"
31 
32 #if WATCH_LISTS
33 int
35 {
36  static int warned;
37  drm_i915_private_t *dev_priv = dev->dev_private;
38  struct drm_i915_gem_object *obj;
39  int err = 0;
40 
41  if (warned)
42  return 0;
43 
44  list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
45  if (obj->base.dev != dev ||
46  !atomic_read(&obj->base.refcount.refcount)) {
47  DRM_ERROR("freed render active %p\n", obj);
48  err++;
49  break;
50  } else if (!obj->active ||
51  (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
52  DRM_ERROR("invalid render active %p (a %d r %x)\n",
53  obj,
54  obj->active,
55  obj->base.read_domains);
56  err++;
57  } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
58  DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
59  obj,
60  obj->base.write_domain,
61  !list_empty(&obj->gpu_write_list));
62  err++;
63  }
64  }
65 
66  list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
67  if (obj->base.dev != dev ||
68  !atomic_read(&obj->base.refcount.refcount)) {
69  DRM_ERROR("freed flushing %p\n", obj);
70  err++;
71  break;
72  } else if (!obj->active ||
73  (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
74  list_empty(&obj->gpu_write_list)) {
75  DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
76  obj,
77  obj->active,
78  obj->base.write_domain,
79  !list_empty(&obj->gpu_write_list));
80  err++;
81  }
82  }
83 
84  list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
85  if (obj->base.dev != dev ||
86  !atomic_read(&obj->base.refcount.refcount)) {
87  DRM_ERROR("freed gpu write %p\n", obj);
88  err++;
89  break;
90  } else if (!obj->active ||
91  (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
92  DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
93  obj,
94  obj->active,
95  obj->base.write_domain);
96  err++;
97  }
98  }
99 
100  list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
101  if (obj->base.dev != dev ||
102  !atomic_read(&obj->base.refcount.refcount)) {
103  DRM_ERROR("freed inactive %p\n", obj);
104  err++;
105  break;
106  } else if (obj->pin_count || obj->active ||
107  (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
108  DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
109  obj,
110  obj->pin_count, obj->active,
111  obj->base.write_domain);
112  err++;
113  }
114  }
115 
116  return warned = err;
117 }
118 #endif /* WATCH_INACTIVE */
119 
120 #if WATCH_COHERENCY
121 void
123 {
124  struct drm_device *dev = obj->base.dev;
125  int page;
126  uint32_t *gtt_mapping;
127  uint32_t *backing_map = NULL;
128  int bad_count = 0;
129 
130  DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
131  __func__, obj, obj->gtt_offset, handle,
132  obj->size / 1024);
133 
134  gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
135  obj->base.size);
136  if (gtt_mapping == NULL) {
137  DRM_ERROR("failed to map GTT space\n");
138  return;
139  }
140 
141  for (page = 0; page < obj->size / PAGE_SIZE; page++) {
142  int i;
143 
144  backing_map = kmap_atomic(obj->pages[page]);
145 
146  if (backing_map == NULL) {
147  DRM_ERROR("failed to map backing page\n");
148  goto out;
149  }
150 
151  for (i = 0; i < PAGE_SIZE / 4; i++) {
152  uint32_t cpuval = backing_map[i];
153  uint32_t gttval = readl(gtt_mapping +
154  page * 1024 + i);
155 
156  if (cpuval != gttval) {
157  DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
158  "0x%08x vs 0x%08x\n",
159  (int)(obj->gtt_offset +
160  page * PAGE_SIZE + i * 4),
161  cpuval, gttval);
162  if (bad_count++ >= 8) {
163  DRM_INFO("...\n");
164  goto out;
165  }
166  }
167  }
168  kunmap_atomic(backing_map);
169  backing_map = NULL;
170  }
171 
172  out:
173  if (backing_map != NULL)
174  kunmap_atomic(backing_map);
175  iounmap(gtt_mapping);
176 
177  /* give syslog time to catch up */
178  msleep(1);
179 
180  /* Directly flush the object, since we just loaded values with the CPU
181  * from the backing pages and we don't want to disturb the cache
182  * management that we're trying to observe.
183  */
184 
186 }
187 #endif