Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ttm_object.c
Go to the documentation of this file.
1 /**************************************************************************
2  *
3  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
52 #define pr_fmt(fmt) "[TTM] " fmt
53 
54 #include <drm/ttm/ttm_object.h>
55 #include <drm/ttm/ttm_module.h>
56 #include <linux/list.h>
57 #include <linux/spinlock.h>
58 #include <linux/slab.h>
59 #include <linux/module.h>
60 #include <linux/atomic.h>
61 
67  struct kref refcount;
68 };
69 
87 };
88 
112  struct list_head head;
113  struct kref kref;
117 };
118 
119 static inline struct ttm_object_file *
120 ttm_object_file_ref(struct ttm_object_file *tfile)
121 {
122  kref_get(&tfile->refcount);
123  return tfile;
124 }
125 
126 static void ttm_object_file_destroy(struct kref *kref)
127 {
128  struct ttm_object_file *tfile =
129  container_of(kref, struct ttm_object_file, refcount);
130 
131  kfree(tfile);
132 }
133 
134 
135 static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
136 {
137  struct ttm_object_file *tfile = *p_tfile;
138 
139  *p_tfile = NULL;
140  kref_put(&tfile->refcount, ttm_object_file_destroy);
141 }
142 
143 
145  struct ttm_base_object *base,
146  bool shareable,
148  void (*refcount_release) (struct ttm_base_object **),
149  void (*ref_obj_release) (struct ttm_base_object *,
150  enum ttm_ref_type ref_type))
151 {
152  struct ttm_object_device *tdev = tfile->tdev;
153  int ret;
154 
155  base->shareable = shareable;
156  base->tfile = ttm_object_file_ref(tfile);
157  base->refcount_release = refcount_release;
158  base->ref_obj_release = ref_obj_release;
159  base->object_type = object_type;
160  write_lock(&tdev->object_lock);
161  kref_init(&base->refcount);
163  &base->hash,
164  (unsigned long)base, 31, 0, 0);
165  write_unlock(&tdev->object_lock);
166  if (unlikely(ret != 0))
167  goto out_err0;
168 
169  ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
170  if (unlikely(ret != 0))
171  goto out_err1;
172 
173  ttm_base_object_unref(&base);
174 
175  return 0;
176 out_err1:
177  (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
178 out_err0:
179  return ret;
180 }
182 
183 static void ttm_release_base(struct kref *kref)
184 {
185  struct ttm_base_object *base =
186  container_of(kref, struct ttm_base_object, refcount);
187  struct ttm_object_device *tdev = base->tfile->tdev;
188 
189  (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
190  write_unlock(&tdev->object_lock);
191  if (base->refcount_release) {
192  ttm_object_file_unref(&base->tfile);
193  base->refcount_release(&base);
194  }
195  write_lock(&tdev->object_lock);
196 }
197 
199 {
200  struct ttm_base_object *base = *p_base;
201  struct ttm_object_device *tdev = base->tfile->tdev;
202 
203  *p_base = NULL;
204 
205  /*
206  * Need to take the lock here to avoid racing with
207  * users trying to look up the object.
208  */
209 
210  write_lock(&tdev->object_lock);
211  kref_put(&base->refcount, ttm_release_base);
212  write_unlock(&tdev->object_lock);
213 }
215 
217  uint32_t key)
218 {
219  struct ttm_object_device *tdev = tfile->tdev;
220  struct ttm_base_object *base;
221  struct drm_hash_item *hash;
222  int ret;
223 
224  read_lock(&tdev->object_lock);
225  ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
226 
227  if (likely(ret == 0)) {
228  base = drm_hash_entry(hash, struct ttm_base_object, hash);
229  kref_get(&base->refcount);
230  }
231  read_unlock(&tdev->object_lock);
232 
233  if (unlikely(ret != 0))
234  return NULL;
235 
236  if (tfile != base->tfile && !base->shareable) {
237  pr_err("Attempted access of non-shareable object\n");
238  ttm_base_object_unref(&base);
239  return NULL;
240  }
241 
242  return base;
243 }
245 
247  struct ttm_base_object *base,
248  enum ttm_ref_type ref_type, bool *existed)
249 {
250  struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
251  struct ttm_ref_object *ref;
252  struct drm_hash_item *hash;
253  struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
254  int ret = -EINVAL;
255 
256  if (existed != NULL)
257  *existed = true;
258 
259  while (ret == -EINVAL) {
260  read_lock(&tfile->lock);
261  ret = drm_ht_find_item(ht, base->hash.key, &hash);
262 
263  if (ret == 0) {
264  ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
265  kref_get(&ref->kref);
266  read_unlock(&tfile->lock);
267  break;
268  }
269 
270  read_unlock(&tfile->lock);
271  ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
272  false, false);
273  if (unlikely(ret != 0))
274  return ret;
275  ref = kmalloc(sizeof(*ref), GFP_KERNEL);
276  if (unlikely(ref == NULL)) {
277  ttm_mem_global_free(mem_glob, sizeof(*ref));
278  return -ENOMEM;
279  }
280 
281  ref->hash.key = base->hash.key;
282  ref->obj = base;
283  ref->tfile = tfile;
284  ref->ref_type = ref_type;
285  kref_init(&ref->kref);
286 
287  write_lock(&tfile->lock);
288  ret = drm_ht_insert_item(ht, &ref->hash);
289 
290  if (likely(ret == 0)) {
291  list_add_tail(&ref->head, &tfile->ref_list);
292  kref_get(&base->refcount);
293  write_unlock(&tfile->lock);
294  if (existed != NULL)
295  *existed = false;
296  break;
297  }
298 
299  write_unlock(&tfile->lock);
300  BUG_ON(ret != -EINVAL);
301 
302  ttm_mem_global_free(mem_glob, sizeof(*ref));
303  kfree(ref);
304  }
305 
306  return ret;
307 }
309 
310 static void ttm_ref_object_release(struct kref *kref)
311 {
312  struct ttm_ref_object *ref =
313  container_of(kref, struct ttm_ref_object, kref);
314  struct ttm_base_object *base = ref->obj;
315  struct ttm_object_file *tfile = ref->tfile;
316  struct drm_open_hash *ht;
317  struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
318 
319  ht = &tfile->ref_hash[ref->ref_type];
320  (void)drm_ht_remove_item(ht, &ref->hash);
321  list_del(&ref->head);
322  write_unlock(&tfile->lock);
323 
324  if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
325  base->ref_obj_release(base, ref->ref_type);
326 
327  ttm_base_object_unref(&ref->obj);
328  ttm_mem_global_free(mem_glob, sizeof(*ref));
329  kfree(ref);
330  write_lock(&tfile->lock);
331 }
332 
334  unsigned long key, enum ttm_ref_type ref_type)
335 {
336  struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
337  struct ttm_ref_object *ref;
338  struct drm_hash_item *hash;
339  int ret;
340 
341  write_lock(&tfile->lock);
342  ret = drm_ht_find_item(ht, key, &hash);
343  if (unlikely(ret != 0)) {
344  write_unlock(&tfile->lock);
345  return -EINVAL;
346  }
347  ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
348  kref_put(&ref->kref, ttm_ref_object_release);
349  write_unlock(&tfile->lock);
350  return 0;
351 }
353 
355 {
356  struct ttm_ref_object *ref;
357  struct list_head *list;
358  unsigned int i;
359  struct ttm_object_file *tfile = *p_tfile;
360 
361  *p_tfile = NULL;
362  write_lock(&tfile->lock);
363 
364  /*
365  * Since we release the lock within the loop, we have to
366  * restart it from the beginning each time.
367  */
368 
369  while (!list_empty(&tfile->ref_list)) {
370  list = tfile->ref_list.next;
371  ref = list_entry(list, struct ttm_ref_object, head);
372  ttm_ref_object_release(&ref->kref);
373  }
374 
375  for (i = 0; i < TTM_REF_NUM; ++i)
376  drm_ht_remove(&tfile->ref_hash[i]);
377 
378  write_unlock(&tfile->lock);
379  ttm_object_file_unref(&tfile);
380 }
382 
384  unsigned int hash_order)
385 {
386  struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
387  unsigned int i;
388  unsigned int j = 0;
389  int ret;
390 
391  if (unlikely(tfile == NULL))
392  return NULL;
393 
394  rwlock_init(&tfile->lock);
395  tfile->tdev = tdev;
396  kref_init(&tfile->refcount);
397  INIT_LIST_HEAD(&tfile->ref_list);
398 
399  for (i = 0; i < TTM_REF_NUM; ++i) {
400  ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
401  if (ret) {
402  j = i;
403  goto out_err;
404  }
405  }
406 
407  return tfile;
408 out_err:
409  for (i = 0; i < j; ++i)
410  drm_ht_remove(&tfile->ref_hash[i]);
411 
412  kfree(tfile);
413 
414  return NULL;
415 }
417 
419  *mem_glob,
420  unsigned int hash_order)
421 {
422  struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
423  int ret;
424 
425  if (unlikely(tdev == NULL))
426  return NULL;
427 
428  tdev->mem_glob = mem_glob;
429  rwlock_init(&tdev->object_lock);
430  atomic_set(&tdev->object_count, 0);
431  ret = drm_ht_create(&tdev->object_hash, hash_order);
432 
433  if (likely(ret == 0))
434  return tdev;
435 
436  kfree(tdev);
437  return NULL;
438 }
440 
442 {
443  struct ttm_object_device *tdev = *p_tdev;
444 
445  *p_tdev = NULL;
446 
447  write_lock(&tdev->object_lock);
448  drm_ht_remove(&tdev->object_hash);
449  write_unlock(&tdev->object_lock);
450 
451  kfree(tdev);
452 }