Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
i915_gem_evict.c
Go to the documentation of this file.
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  * Eric Anholt <[email protected]>
25  * Chris Wilson <[email protected]>
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include "i915_drv.h"
31 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33 
34 static bool
35 mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
36 {
37  if (obj->pin_count)
38  return false;
39 
40  list_add(&obj->exec_list, unwind);
41  return drm_mm_scan_add_block(obj->gtt_space);
42 }
43 
44 int
45 i915_gem_evict_something(struct drm_device *dev, int min_size,
46  unsigned alignment, unsigned cache_level,
47  bool mappable, bool nonblocking)
48 {
49  drm_i915_private_t *dev_priv = dev->dev_private;
50  struct list_head eviction_list, unwind_list;
51  struct drm_i915_gem_object *obj;
52  int ret = 0;
53 
54  trace_i915_gem_evict(dev, min_size, alignment, mappable);
55 
56  /*
57  * The goal is to evict objects and amalgamate space in LRU order.
58  * The oldest idle objects reside on the inactive list, which is in
59  * retirement order. The next objects to retire are those on the (per
60  * ring) active list that do not have an outstanding flush. Once the
61  * hardware reports completion (the seqno is updated after the
62  * batchbuffer has been finished) the clean buffer objects would
63  * be retired to the inactive list. Any dirty objects would be added
64  * to the tail of the flushing list. So after processing the clean
65  * active objects we need to emit a MI_FLUSH to retire the flushing
66  * list, hence the retirement order of the flushing list is in
67  * advance of the dirty objects on the active lists.
68  *
69  * The retirement sequence is thus:
70  * 1. Inactive objects (already retired)
71  * 2. Clean active objects
72  * 3. Flushing list
73  * 4. Dirty active objects.
74  *
75  * On each list, the oldest objects lie at the HEAD with the freshest
76  * object on the TAIL.
77  */
78 
79  INIT_LIST_HEAD(&unwind_list);
80  if (mappable)
81  drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
82  min_size, alignment, cache_level,
83  0, dev_priv->mm.gtt_mappable_end);
84  else
85  drm_mm_init_scan(&dev_priv->mm.gtt_space,
86  min_size, alignment, cache_level);
87 
88  /* First see if there is a large enough contiguous idle region... */
89  list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
90  if (mark_free(obj, &unwind_list))
91  goto found;
92  }
93 
94  if (nonblocking)
95  goto none;
96 
97  /* Now merge in the soon-to-be-expired objects... */
98  list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
99  if (mark_free(obj, &unwind_list))
100  goto found;
101  }
102 
103 none:
104  /* Nothing found, clean up and bail out! */
105  while (!list_empty(&unwind_list)) {
106  obj = list_first_entry(&unwind_list,
107  struct drm_i915_gem_object,
108  exec_list);
109 
111  BUG_ON(ret);
112 
113  list_del_init(&obj->exec_list);
114  }
115 
116  /* We expect the caller to unpin, evict all and try again, or give up.
117  * So calling i915_gem_evict_everything() is unnecessary.
118  */
119  return -ENOSPC;
120 
121 found:
122  /* drm_mm doesn't allow any other other operations while
123  * scanning, therefore store to be evicted objects on a
124  * temporary list. */
125  INIT_LIST_HEAD(&eviction_list);
126  while (!list_empty(&unwind_list)) {
127  obj = list_first_entry(&unwind_list,
128  struct drm_i915_gem_object,
129  exec_list);
131  list_move(&obj->exec_list, &eviction_list);
132  drm_gem_object_reference(&obj->base);
133  continue;
134  }
135  list_del_init(&obj->exec_list);
136  }
137 
138  /* Unbinding will emit any required flushes */
139  while (!list_empty(&eviction_list)) {
140  obj = list_first_entry(&eviction_list,
141  struct drm_i915_gem_object,
142  exec_list);
143  if (ret == 0)
144  ret = i915_gem_object_unbind(obj);
145 
146  list_del_init(&obj->exec_list);
147  drm_gem_object_unreference(&obj->base);
148  }
149 
150  return ret;
151 }
152 
153 int
155 {
156  drm_i915_private_t *dev_priv = dev->dev_private;
157  struct drm_i915_gem_object *obj, *next;
158  bool lists_empty;
159  int ret;
160 
161  lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
162  list_empty(&dev_priv->mm.active_list));
163  if (lists_empty)
164  return -ENOSPC;
165 
166  trace_i915_gem_evict_everything(dev);
167 
168  /* The gpu_idle will flush everything in the write domain to the
169  * active list. Then we must move everything off the active list
170  * with retire requests.
171  */
172  ret = i915_gpu_idle(dev);
173  if (ret)
174  return ret;
175 
177 
178  /* Having flushed everything, unbind() should never raise an error */
179  list_for_each_entry_safe(obj, next,
180  &dev_priv->mm.inactive_list, mm_list)
181  if (obj->pin_count == 0)
183 
184  return 0;
185 }