Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vmwgfx_resource.c
Go to the documentation of this file.
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 
36  struct vmw_resource res;
37 };
38 
41  struct vmw_surface srf;
43 };
44 
48 };
49 
53 };
54 
55 struct vmw_stream {
56  struct vmw_resource res;
58 };
59 
63 };
64 
69 };
70 
71 
72 static uint64_t vmw_user_context_size;
73 static uint64_t vmw_user_surface_size;
74 static uint64_t vmw_user_stream_size;
75 
76 static inline struct vmw_dma_buffer *
78 {
79  return container_of(bo, struct vmw_dma_buffer, base);
80 }
81 
82 static inline struct vmw_user_dma_buffer *
84 {
85  struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
86  return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
87 }
88 
90 {
91  kref_get(&res->kref);
92  return res;
93 }
94 
95 
103 static void vmw_resource_release_id(struct vmw_resource *res)
104 {
105  struct vmw_private *dev_priv = res->dev_priv;
106 
107  write_lock(&dev_priv->resource_lock);
108  if (res->id != -1)
109  idr_remove(res->idr, res->id);
110  res->id = -1;
111  write_unlock(&dev_priv->resource_lock);
112 }
113 
114 static void vmw_resource_release(struct kref *kref)
115 {
116  struct vmw_resource *res =
117  container_of(kref, struct vmw_resource, kref);
118  struct vmw_private *dev_priv = res->dev_priv;
119  int id = res->id;
120  struct idr *idr = res->idr;
121 
122  res->avail = false;
123  if (res->remove_from_lists != NULL)
124  res->remove_from_lists(res);
125  write_unlock(&dev_priv->resource_lock);
126 
127  if (likely(res->hw_destroy != NULL))
128  res->hw_destroy(res);
129 
130  if (res->res_free != NULL)
131  res->res_free(res);
132  else
133  kfree(res);
134 
135  write_lock(&dev_priv->resource_lock);
136 
137  if (id != -1)
138  idr_remove(idr, id);
139 }
140 
142 {
143  struct vmw_resource *res = *p_res;
144  struct vmw_private *dev_priv = res->dev_priv;
145 
146  *p_res = NULL;
147  write_lock(&dev_priv->resource_lock);
148  kref_put(&res->kref, vmw_resource_release);
149  write_unlock(&dev_priv->resource_lock);
150 }
151 
152 
162 static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
163  struct vmw_resource *res)
164 {
165  int ret;
166 
167  BUG_ON(res->id != -1);
168 
169  do {
170  if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
171  return -ENOMEM;
172 
173  write_lock(&dev_priv->resource_lock);
174  ret = idr_get_new_above(res->idr, res, 1, &res->id);
175  write_unlock(&dev_priv->resource_lock);
176 
177  } while (ret == -EAGAIN);
178 
179  return ret;
180 }
181 
182 
183 static int vmw_resource_init(struct vmw_private *dev_priv,
184  struct vmw_resource *res,
185  struct idr *idr,
186  enum ttm_object_type obj_type,
187  bool delay_id,
188  void (*res_free) (struct vmw_resource *res),
189  void (*remove_from_lists)
190  (struct vmw_resource *res))
191 {
192  kref_init(&res->kref);
193  res->hw_destroy = NULL;
194  res->res_free = res_free;
195  res->remove_from_lists = remove_from_lists;
196  res->res_type = obj_type;
197  res->idr = idr;
198  res->avail = false;
199  res->dev_priv = dev_priv;
200  INIT_LIST_HEAD(&res->query_head);
201  INIT_LIST_HEAD(&res->validate_head);
202  res->id = -1;
203  if (delay_id)
204  return 0;
205  else
206  return vmw_resource_alloc_id(dev_priv, res);
207 }
208 
222 static void vmw_resource_activate(struct vmw_resource *res,
223  void (*hw_destroy) (struct vmw_resource *))
224 {
225  struct vmw_private *dev_priv = res->dev_priv;
226 
227  write_lock(&dev_priv->resource_lock);
228  res->avail = true;
229  res->hw_destroy = hw_destroy;
230  write_unlock(&dev_priv->resource_lock);
231 }
232 
234  struct idr *idr, int id)
235 {
236  struct vmw_resource *res;
237 
238  read_lock(&dev_priv->resource_lock);
239  res = idr_find(idr, id);
240  if (res && res->avail)
241  kref_get(&res->kref);
242  else
243  res = NULL;
244  read_unlock(&dev_priv->resource_lock);
245 
246  if (unlikely(res == NULL))
247  return NULL;
248 
249  return res;
250 }
251 
256 static void vmw_hw_context_destroy(struct vmw_resource *res)
257 {
258 
259  struct vmw_private *dev_priv = res->dev_priv;
260  struct {
263  } *cmd;
264 
265 
266  vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
267 
268  cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
269  if (unlikely(cmd == NULL)) {
270  DRM_ERROR("Failed reserving FIFO space for surface "
271  "destruction.\n");
272  return;
273  }
274 
276  cmd->header.size = cpu_to_le32(sizeof(cmd->body));
277  cmd->body.cid = cpu_to_le32(res->id);
278 
279  vmw_fifo_commit(dev_priv, sizeof(*cmd));
280  vmw_3d_resource_dec(dev_priv, false);
281 }
282 
283 static int vmw_context_init(struct vmw_private *dev_priv,
284  struct vmw_resource *res,
285  void (*res_free) (struct vmw_resource *res))
286 {
287  int ret;
288 
289  struct {
292  } *cmd;
293 
294  ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
295  VMW_RES_CONTEXT, false, res_free, NULL);
296 
297  if (unlikely(ret != 0)) {
298  DRM_ERROR("Failed to allocate a resource id.\n");
299  goto out_early;
300  }
301 
302  if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
303  DRM_ERROR("Out of hw context ids.\n");
305  return -ENOMEM;
306  }
307 
308  cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
309  if (unlikely(cmd == NULL)) {
310  DRM_ERROR("Fifo reserve failed.\n");
312  return -ENOMEM;
313  }
314 
316  cmd->header.size = cpu_to_le32(sizeof(cmd->body));
317  cmd->body.cid = cpu_to_le32(res->id);
318 
319  vmw_fifo_commit(dev_priv, sizeof(*cmd));
320  (void) vmw_3d_resource_inc(dev_priv, false);
321  vmw_resource_activate(res, vmw_hw_context_destroy);
322  return 0;
323 
324 out_early:
325  if (res_free == NULL)
326  kfree(res);
327  else
328  res_free(res);
329  return ret;
330 }
331 
332 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
333 {
334  struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
335  int ret;
336 
337  if (unlikely(res == NULL))
338  return NULL;
339 
340  ret = vmw_context_init(dev_priv, res, NULL);
341  return (ret == 0) ? res : NULL;
342 }
343 
348 static void vmw_user_context_free(struct vmw_resource *res)
349 {
350  struct vmw_user_context *ctx =
351  container_of(res, struct vmw_user_context, res);
352  struct vmw_private *dev_priv = res->dev_priv;
353 
354  kfree(ctx);
355  ttm_mem_global_free(vmw_mem_glob(dev_priv),
356  vmw_user_context_size);
357 }
358 
364 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
365 {
366  struct ttm_base_object *base = *p_base;
367  struct vmw_user_context *ctx =
368  container_of(base, struct vmw_user_context, base);
369  struct vmw_resource *res = &ctx->res;
370 
371  *p_base = NULL;
373 }
374 
376  struct drm_file *file_priv)
377 {
378  struct vmw_private *dev_priv = vmw_priv(dev);
379  struct vmw_resource *res;
380  struct vmw_user_context *ctx;
381  struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
382  struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
383  int ret = 0;
384 
385  res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
386  if (unlikely(res == NULL))
387  return -EINVAL;
388 
389  if (res->res_free != &vmw_user_context_free) {
390  ret = -EINVAL;
391  goto out;
392  }
393 
394  ctx = container_of(res, struct vmw_user_context, res);
395  if (ctx->base.tfile != tfile && !ctx->base.shareable) {
396  ret = -EPERM;
397  goto out;
398  }
399 
400  ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
401 out:
403  return ret;
404 }
405 
407  struct drm_file *file_priv)
408 {
409  struct vmw_private *dev_priv = vmw_priv(dev);
410  struct vmw_user_context *ctx;
411  struct vmw_resource *res;
412  struct vmw_resource *tmp;
413  struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
414  struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
415  struct vmw_master *vmaster = vmw_master(file_priv->master);
416  int ret;
417 
418 
419  /*
420  * Approximate idr memory usage with 128 bytes. It will be limited
421  * by maximum number_of contexts anyway.
422  */
423 
424  if (unlikely(vmw_user_context_size == 0))
425  vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
426 
427  ret = ttm_read_lock(&vmaster->lock, true);
428  if (unlikely(ret != 0))
429  return ret;
430 
431  ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
432  vmw_user_context_size,
433  false, true);
434  if (unlikely(ret != 0)) {
435  if (ret != -ERESTARTSYS)
436  DRM_ERROR("Out of graphics memory for context"
437  " creation.\n");
438  goto out_unlock;
439  }
440 
441  ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
442  if (unlikely(ctx == NULL)) {
443  ttm_mem_global_free(vmw_mem_glob(dev_priv),
444  vmw_user_context_size);
445  ret = -ENOMEM;
446  goto out_unlock;
447  }
448 
449  res = &ctx->res;
450  ctx->base.shareable = false;
451  ctx->base.tfile = NULL;
452 
453  /*
454  * From here on, the destructor takes over resource freeing.
455  */
456 
457  ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
458  if (unlikely(ret != 0))
459  goto out_unlock;
460 
461  tmp = vmw_resource_reference(&ctx->res);
462  ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
463  &vmw_user_context_base_release, NULL);
464 
465  if (unlikely(ret != 0)) {
467  goto out_err;
468  }
469 
470  arg->cid = res->id;
471 out_err:
473 out_unlock:
474  ttm_read_unlock(&vmaster->lock);
475  return ret;
476 
477 }
478 
479 int vmw_context_check(struct vmw_private *dev_priv,
480  struct ttm_object_file *tfile,
481  int id,
482  struct vmw_resource **p_res)
483 {
484  struct vmw_resource *res;
485  int ret = 0;
486 
487  read_lock(&dev_priv->resource_lock);
488  res = idr_find(&dev_priv->context_idr, id);
489  if (res && res->avail) {
490  struct vmw_user_context *ctx =
491  container_of(res, struct vmw_user_context, res);
492  if (ctx->base.tfile != tfile && !ctx->base.shareable)
493  ret = -EPERM;
494  if (p_res)
495  *p_res = vmw_resource_reference(res);
496  } else
497  ret = -EINVAL;
498  read_unlock(&dev_priv->resource_lock);
499 
500  return ret;
501 }
502 
503 struct vmw_bpp {
506 };
507 
508 /*
509  * Size table for the supported SVGA3D surface formats. It consists of
510  * two values. The bpp value and the s_bpp value which is short for
511  * "stride bits per pixel" The values are given in such a way that the
512  * minimum stride for the image is calculated using
513  *
514  * min_stride = w*s_bpp
515  *
516  * and the total memory requirement for the image is
517  *
518  * h*min_stride*bpp/s_bpp
519  *
520  */
521 static const struct vmw_bpp vmw_sf_bpp[] = {
522  [SVGA3D_FORMAT_INVALID] = {0, 0},
523  [SVGA3D_X8R8G8B8] = {32, 32},
524  [SVGA3D_A8R8G8B8] = {32, 32},
525  [SVGA3D_R5G6B5] = {16, 16},
526  [SVGA3D_X1R5G5B5] = {16, 16},
527  [SVGA3D_A1R5G5B5] = {16, 16},
528  [SVGA3D_A4R4G4B4] = {16, 16},
529  [SVGA3D_Z_D32] = {32, 32},
530  [SVGA3D_Z_D16] = {16, 16},
531  [SVGA3D_Z_D24S8] = {32, 32},
532  [SVGA3D_Z_D15S1] = {16, 16},
533  [SVGA3D_LUMINANCE8] = {8, 8},
534  [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
535  [SVGA3D_LUMINANCE16] = {16, 16},
536  [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
537  [SVGA3D_DXT1] = {4, 16},
538  [SVGA3D_DXT2] = {8, 32},
539  [SVGA3D_DXT3] = {8, 32},
540  [SVGA3D_DXT4] = {8, 32},
541  [SVGA3D_DXT5] = {8, 32},
542  [SVGA3D_BUMPU8V8] = {16, 16},
543  [SVGA3D_BUMPL6V5U5] = {16, 16},
544  [SVGA3D_BUMPX8L8V8U8] = {32, 32},
545  [SVGA3D_ARGB_S10E5] = {16, 16},
546  [SVGA3D_ARGB_S23E8] = {32, 32},
547  [SVGA3D_A2R10G10B10] = {32, 32},
548  [SVGA3D_V8U8] = {16, 16},
549  [SVGA3D_Q8W8V8U8] = {32, 32},
550  [SVGA3D_CxV8U8] = {16, 16},
551  [SVGA3D_X8L8V8U8] = {32, 32},
552  [SVGA3D_A2W10V10U10] = {32, 32},
553  [SVGA3D_ALPHA8] = {8, 8},
554  [SVGA3D_R_S10E5] = {16, 16},
555  [SVGA3D_R_S23E8] = {32, 32},
556  [SVGA3D_RG_S10E5] = {16, 16},
557  [SVGA3D_RG_S23E8] = {32, 32},
558  [SVGA3D_BUFFER] = {8, 8},
559  [SVGA3D_Z_D24X8] = {32, 32},
560  [SVGA3D_V16U16] = {32, 32},
561  [SVGA3D_G16R16] = {32, 32},
562  [SVGA3D_A16B16G16R16] = {64, 64},
563  [SVGA3D_UYVY] = {12, 12},
564  [SVGA3D_YUY2] = {12, 12},
565  [SVGA3D_NV12] = {12, 8},
566  [SVGA3D_AYUV] = {32, 32},
567  [SVGA3D_BC4_UNORM] = {4, 16},
568  [SVGA3D_BC5_UNORM] = {8, 32},
569  [SVGA3D_Z_DF16] = {16, 16},
570  [SVGA3D_Z_DF24] = {24, 24},
571  [SVGA3D_Z_D24S8_INT] = {32, 32}
572 };
573 
574 
584 };
585 
589 };
590 
594 };
595 
596 
605 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
606 {
607  return srf->num_sizes * sizeof(struct vmw_surface_dma);
608 }
609 
610 
619 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
620 {
621  return sizeof(struct vmw_surface_define) + srf->num_sizes *
622  sizeof(SVGA3dSize);
623 }
624 
625 
632 static inline uint32_t vmw_surface_destroy_size(void)
633 {
634  return sizeof(struct vmw_surface_destroy);
635 }
636 
643 static void vmw_surface_destroy_encode(uint32_t id,
644  void *cmd_space)
645 {
646  struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
647  cmd_space;
648 
650  cmd->header.size = sizeof(cmd->body);
651  cmd->body.sid = id;
652 }
653 
660 static void vmw_surface_define_encode(const struct vmw_surface *srf,
661  void *cmd_space)
662 {
663  struct vmw_surface_define *cmd = (struct vmw_surface_define *)
664  cmd_space;
665  struct drm_vmw_size *src_size;
668  int i;
669 
670  cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
671 
673  cmd->header.size = cmd_len;
674  cmd->body.sid = srf->res.id;
675  cmd->body.surfaceFlags = srf->flags;
676  cmd->body.format = cpu_to_le32(srf->format);
677  for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
678  cmd->body.face[i].numMipLevels = srf->mip_levels[i];
679 
680  cmd += 1;
681  cmd_size = (SVGA3dSize *) cmd;
682  src_size = srf->sizes;
683 
684  for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
685  cmd_size->width = src_size->width;
686  cmd_size->height = src_size->height;
687  cmd_size->depth = src_size->depth;
688  }
689 }
690 
691 
701 static void vmw_surface_dma_encode(struct vmw_surface *srf,
702  void *cmd_space,
703  const SVGAGuestPtr *ptr,
704  bool to_surface)
705 {
706  uint32_t i;
707  uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
708  uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
709  struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
710 
711  for (i = 0; i < srf->num_sizes; ++i) {
712  SVGA3dCmdHeader *header = &cmd->header;
713  SVGA3dCmdSurfaceDMA *body = &cmd->body;
714  SVGA3dCopyBox *cb = &cmd->cb;
716  const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
717  const struct drm_vmw_size *cur_size = &srf->sizes[i];
718 
719  header->id = SVGA_3D_CMD_SURFACE_DMA;
720  header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
721 
722  body->guest.ptr = *ptr;
723  body->guest.ptr.offset += cur_offset->bo_offset;
724  body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
725  body->host.sid = srf->res.id;
726  body->host.face = cur_offset->face;
727  body->host.mipmap = cur_offset->mip;
728  body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
730  cb->x = 0;
731  cb->y = 0;
732  cb->z = 0;
733  cb->srcx = 0;
734  cb->srcy = 0;
735  cb->srcz = 0;
736  cb->w = cur_size->width;
737  cb->h = cur_size->height;
738  cb->d = cur_size->depth;
739 
740  suffix->suffixSize = sizeof(*suffix);
741  suffix->maximumOffset = body->guest.pitch*cur_size->height*
742  cur_size->depth*bpp / stride_bpp;
743  suffix->flags.discard = 0;
744  suffix->flags.unsynchronized = 0;
745  suffix->flags.reserved = 0;
746  ++cmd;
747  }
748 };
749 
750 
751 static void vmw_hw_surface_destroy(struct vmw_resource *res)
752 {
753 
754  struct vmw_private *dev_priv = res->dev_priv;
755  struct vmw_surface *srf;
756  void *cmd;
757 
758  if (res->id != -1) {
759 
760  cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
761  if (unlikely(cmd == NULL)) {
762  DRM_ERROR("Failed reserving FIFO space for surface "
763  "destruction.\n");
764  return;
765  }
766 
767  vmw_surface_destroy_encode(res->id, cmd);
768  vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
769 
770  /*
771  * used_memory_size_atomic, or separate lock
772  * to avoid taking dev_priv::cmdbuf_mutex in
773  * the destroy path.
774  */
775 
776  mutex_lock(&dev_priv->cmdbuf_mutex);
777  srf = container_of(res, struct vmw_surface, res);
778  dev_priv->used_memory_size -= srf->backup_size;
779  mutex_unlock(&dev_priv->cmdbuf_mutex);
780 
781  }
782  vmw_3d_resource_dec(dev_priv, false);
783 }
784 
786 {
787  struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
788 
789  if (srf->backup)
790  ttm_bo_unref(&srf->backup);
791  kfree(srf->offsets);
792  kfree(srf->sizes);
793  kfree(srf->snooper.image);
794  kfree(srf);
795 }
796 
797 
813  struct vmw_surface *srf)
814 {
815  struct vmw_resource *res = &srf->res;
816  struct list_head val_list;
817  struct ttm_validate_buffer val_buf;
818  uint32_t submit_size;
819  uint8_t *cmd;
820  int ret;
821 
822  if (likely(res->id != -1))
823  return 0;
824 
825  if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
826  dev_priv->memory_size))
827  return -EBUSY;
828 
829  /*
830  * Reserve- and validate the backup DMA bo.
831  */
832 
833  if (srf->backup) {
834  INIT_LIST_HEAD(&val_list);
835  val_buf.bo = ttm_bo_reference(srf->backup);
836  val_buf.new_sync_obj_arg = (void *)((unsigned long)
838  list_add_tail(&val_buf.head, &val_list);
839  ret = ttm_eu_reserve_buffers(&val_list);
840  if (unlikely(ret != 0))
841  goto out_no_reserve;
842 
844  true, false, false);
845  if (unlikely(ret != 0))
846  goto out_no_validate;
847  }
848 
849  /*
850  * Alloc id for the resource.
851  */
852 
853  ret = vmw_resource_alloc_id(dev_priv, res);
854  if (unlikely(ret != 0)) {
855  DRM_ERROR("Failed to allocate a surface id.\n");
856  goto out_no_id;
857  }
858  if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
859  ret = -EBUSY;
860  goto out_no_fifo;
861  }
862 
863 
864  /*
865  * Encode surface define- and dma commands.
866  */
867 
868  submit_size = vmw_surface_define_size(srf);
869  if (srf->backup)
870  submit_size += vmw_surface_dma_size(srf);
871 
872  cmd = vmw_fifo_reserve(dev_priv, submit_size);
873  if (unlikely(cmd == NULL)) {
874  DRM_ERROR("Failed reserving FIFO space for surface "
875  "validation.\n");
876  ret = -ENOMEM;
877  goto out_no_fifo;
878  }
879 
880  vmw_surface_define_encode(srf, cmd);
881  if (srf->backup) {
883 
884  cmd += vmw_surface_define_size(srf);
885  vmw_bo_get_guest_ptr(srf->backup, &ptr);
886  vmw_surface_dma_encode(srf, cmd, &ptr, true);
887  }
888 
889  vmw_fifo_commit(dev_priv, submit_size);
890 
891  /*
892  * Create a fence object and fence the backup buffer.
893  */
894 
895  if (srf->backup) {
896  struct vmw_fence_obj *fence;
897 
899  &fence, NULL);
900  ttm_eu_fence_buffer_objects(&val_list, fence);
901  if (likely(fence != NULL))
903  ttm_bo_unref(&val_buf.bo);
904  ttm_bo_unref(&srf->backup);
905  }
906 
907  /*
908  * Surface memory usage accounting.
909  */
910 
911  dev_priv->used_memory_size += srf->backup_size;
912 
913  return 0;
914 
915 out_no_fifo:
916  vmw_resource_release_id(res);
917 out_no_id:
918 out_no_validate:
919  if (srf->backup)
920  ttm_eu_backoff_reservation(&val_list);
921 out_no_reserve:
922  if (srf->backup)
923  ttm_bo_unref(&val_buf.bo);
924  return ret;
925 }
926 
936 int vmw_surface_evict(struct vmw_private *dev_priv,
937  struct vmw_surface *srf)
938 {
939  struct vmw_resource *res = &srf->res;
940  struct list_head val_list;
941  struct ttm_validate_buffer val_buf;
942  uint32_t submit_size;
943  uint8_t *cmd;
944  int ret;
945  struct vmw_fence_obj *fence;
947 
948  BUG_ON(res->id == -1);
949 
950  /*
951  * Create a surface backup buffer object.
952  */
953 
954  if (!srf->backup) {
955  ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
957  &vmw_srf_placement, 0, 0, true,
958  NULL, &srf->backup);
959  if (unlikely(ret != 0))
960  return ret;
961  }
962 
963  /*
964  * Reserve- and validate the backup DMA bo.
965  */
966 
967  INIT_LIST_HEAD(&val_list);
968  val_buf.bo = ttm_bo_reference(srf->backup);
969  val_buf.new_sync_obj_arg = (void *)(unsigned long)
971  list_add_tail(&val_buf.head, &val_list);
972  ret = ttm_eu_reserve_buffers(&val_list);
973  if (unlikely(ret != 0))
974  goto out_no_reserve;
975 
977  true, false, false);
978  if (unlikely(ret != 0))
979  goto out_no_validate;
980 
981 
982  /*
983  * Encode the dma- and surface destroy commands.
984  */
985 
986  submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
987  cmd = vmw_fifo_reserve(dev_priv, submit_size);
988  if (unlikely(cmd == NULL)) {
989  DRM_ERROR("Failed reserving FIFO space for surface "
990  "eviction.\n");
991  ret = -ENOMEM;
992  goto out_no_fifo;
993  }
994 
995  vmw_bo_get_guest_ptr(srf->backup, &ptr);
996  vmw_surface_dma_encode(srf, cmd, &ptr, false);
997  cmd += vmw_surface_dma_size(srf);
998  vmw_surface_destroy_encode(res->id, cmd);
999  vmw_fifo_commit(dev_priv, submit_size);
1000 
1001  /*
1002  * Surface memory usage accounting.
1003  */
1004 
1005  dev_priv->used_memory_size -= srf->backup_size;
1006 
1007  /*
1008  * Create a fence object and fence the DMA buffer.
1009  */
1010 
1011  (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1012  &fence, NULL);
1013  ttm_eu_fence_buffer_objects(&val_list, fence);
1014  if (likely(fence != NULL))
1015  vmw_fence_obj_unreference(&fence);
1016  ttm_bo_unref(&val_buf.bo);
1017 
1018  /*
1019  * Release the surface ID.
1020  */
1021 
1022  vmw_resource_release_id(res);
1023 
1024  return 0;
1025 
1026 out_no_fifo:
1027 out_no_validate:
1028  if (srf->backup)
1029  ttm_eu_backoff_reservation(&val_list);
1030 out_no_reserve:
1031  ttm_bo_unref(&val_buf.bo);
1032  ttm_bo_unref(&srf->backup);
1033  return ret;
1034 }
1035 
1036 
1050 int vmw_surface_validate(struct vmw_private *dev_priv,
1051  struct vmw_surface *srf)
1052 {
1053  int ret;
1054  struct vmw_surface *evict_srf;
1055 
1056  do {
1057  write_lock(&dev_priv->resource_lock);
1058  list_del_init(&srf->lru_head);
1059  write_unlock(&dev_priv->resource_lock);
1060 
1061  ret = vmw_surface_do_validate(dev_priv, srf);
1062  if (likely(ret != -EBUSY))
1063  break;
1064 
1065  write_lock(&dev_priv->resource_lock);
1066  if (list_empty(&dev_priv->surface_lru)) {
1067  DRM_ERROR("Out of device memory for surfaces.\n");
1068  ret = -EBUSY;
1069  write_unlock(&dev_priv->resource_lock);
1070  break;
1071  }
1072 
1073  evict_srf = vmw_surface_reference
1074  (list_first_entry(&dev_priv->surface_lru,
1075  struct vmw_surface,
1076  lru_head));
1077  list_del_init(&evict_srf->lru_head);
1078 
1079  write_unlock(&dev_priv->resource_lock);
1080  (void) vmw_surface_evict(dev_priv, evict_srf);
1081 
1082  vmw_surface_unreference(&evict_srf);
1083 
1084  } while (1);
1085 
1086  if (unlikely(ret != 0 && srf->res.id != -1)) {
1087  write_lock(&dev_priv->resource_lock);
1088  list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
1089  write_unlock(&dev_priv->resource_lock);
1090  }
1091 
1092  return ret;
1093 }
1094 
1095 
1104 static void vmw_surface_remove_from_lists(struct vmw_resource *res)
1105 {
1106  struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1107 
1108  list_del_init(&srf->lru_head);
1109 }
1110 
1111 int vmw_surface_init(struct vmw_private *dev_priv,
1112  struct vmw_surface *srf,
1113  void (*res_free) (struct vmw_resource *res))
1114 {
1115  int ret;
1116  struct vmw_resource *res = &srf->res;
1117 
1118  BUG_ON(res_free == NULL);
1119  INIT_LIST_HEAD(&srf->lru_head);
1120  ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
1121  VMW_RES_SURFACE, true, res_free,
1122  vmw_surface_remove_from_lists);
1123 
1124  if (unlikely(ret != 0))
1125  res_free(res);
1126 
1127  /*
1128  * The surface won't be visible to hardware until a
1129  * surface validate.
1130  */
1131 
1132  (void) vmw_3d_resource_inc(dev_priv, false);
1133  vmw_resource_activate(res, vmw_hw_surface_destroy);
1134  return ret;
1135 }
1136 
1137 static void vmw_user_surface_free(struct vmw_resource *res)
1138 {
1139  struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1140  struct vmw_user_surface *user_srf =
1141  container_of(srf, struct vmw_user_surface, srf);
1142  struct vmw_private *dev_priv = srf->res.dev_priv;
1143  uint32_t size = user_srf->size;
1144 
1145  if (srf->backup)
1146  ttm_bo_unref(&srf->backup);
1147  kfree(srf->offsets);
1148  kfree(srf->sizes);
1149  kfree(srf->snooper.image);
1150  kfree(user_srf);
1151  ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1152 }
1153 
1168 {
1169  struct vmw_resource *res;
1170  struct vmw_surface *srf;
1171  rwlock_t *lock = NULL;
1172 
1173  list_for_each_entry(res, list, validate_head) {
1174 
1175  if (res->res_free != &vmw_surface_res_free &&
1176  res->res_free != &vmw_user_surface_free)
1177  continue;
1178 
1179  if (unlikely(lock == NULL)) {
1180  lock = &res->dev_priv->resource_lock;
1181  write_lock(lock);
1182  }
1183 
1184  srf = container_of(res, struct vmw_surface, res);
1185  list_del_init(&srf->lru_head);
1186  list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
1187  }
1188 
1189  if (lock != NULL)
1190  write_unlock(lock);
1191 }
1192 
1199  struct ttm_object_file *tfile,
1200  uint32_t handle,
1201  struct vmw_surface **out_surf,
1202  struct vmw_dma_buffer **out_buf)
1203 {
1204  int ret;
1205 
1206  BUG_ON(*out_surf || *out_buf);
1207 
1208  ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
1209  if (!ret)
1210  return 0;
1211 
1212  ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
1213  return ret;
1214 }
1215 
1216 
1218  struct ttm_object_file *tfile,
1219  uint32_t handle, struct vmw_surface **out)
1220 {
1221  struct vmw_resource *res;
1222  struct vmw_surface *srf;
1223  struct vmw_user_surface *user_srf;
1224  struct ttm_base_object *base;
1225  int ret = -EINVAL;
1226 
1227  base = ttm_base_object_lookup(tfile, handle);
1228  if (unlikely(base == NULL))
1229  return -EINVAL;
1230 
1231  if (unlikely(base->object_type != VMW_RES_SURFACE))
1232  goto out_bad_resource;
1233 
1234  user_srf = container_of(base, struct vmw_user_surface, base);
1235  srf = &user_srf->srf;
1236  res = &srf->res;
1237 
1238  read_lock(&dev_priv->resource_lock);
1239 
1240  if (!res->avail || res->res_free != &vmw_user_surface_free) {
1241  read_unlock(&dev_priv->resource_lock);
1242  goto out_bad_resource;
1243  }
1244 
1245  kref_get(&res->kref);
1246  read_unlock(&dev_priv->resource_lock);
1247 
1248  *out = srf;
1249  ret = 0;
1250 
1251 out_bad_resource:
1252  ttm_base_object_unref(&base);
1253 
1254  return ret;
1255 }
1256 
1257 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
1258 {
1259  struct ttm_base_object *base = *p_base;
1260  struct vmw_user_surface *user_srf =
1261  container_of(base, struct vmw_user_surface, base);
1262  struct vmw_resource *res = &user_srf->srf.res;
1263 
1264  *p_base = NULL;
1266 }
1267 
1269  struct drm_file *file_priv)
1270 {
1271  struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
1272  struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1273 
1274  return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
1275 }
1276 
1278  struct drm_file *file_priv)
1279 {
1280  struct vmw_private *dev_priv = vmw_priv(dev);
1281  struct vmw_user_surface *user_srf;
1282  struct vmw_surface *srf;
1283  struct vmw_resource *res;
1284  struct vmw_resource *tmp;
1286  (union drm_vmw_surface_create_arg *)data;
1287  struct drm_vmw_surface_create_req *req = &arg->req;
1288  struct drm_vmw_surface_arg *rep = &arg->rep;
1289  struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1290  struct drm_vmw_size __user *user_sizes;
1291  int ret;
1292  int i, j;
1293  uint32_t cur_bo_offset;
1294  struct drm_vmw_size *cur_size;
1295  struct vmw_surface_offset *cur_offset;
1296  uint32_t stride_bpp;
1297  uint32_t bpp;
1298  uint32_t num_sizes;
1299  uint32_t size;
1300  struct vmw_master *vmaster = vmw_master(file_priv->master);
1301 
1302  if (unlikely(vmw_user_surface_size == 0))
1303  vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1304  128;
1305 
1306  num_sizes = 0;
1307  for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
1308  num_sizes += req->mip_levels[i];
1309 
1310  if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
1312  return -EINVAL;
1313 
1314  size = vmw_user_surface_size + 128 +
1315  ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
1316  ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
1317 
1318 
1319  ret = ttm_read_lock(&vmaster->lock, true);
1320  if (unlikely(ret != 0))
1321  return ret;
1322 
1323  ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1324  size, false, true);
1325  if (unlikely(ret != 0)) {
1326  if (ret != -ERESTARTSYS)
1327  DRM_ERROR("Out of graphics memory for surface"
1328  " creation.\n");
1329  goto out_unlock;
1330  }
1331 
1332  user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
1333  if (unlikely(user_srf == NULL)) {
1334  ret = -ENOMEM;
1335  goto out_no_user_srf;
1336  }
1337 
1338  srf = &user_srf->srf;
1339  res = &srf->res;
1340 
1341  srf->flags = req->flags;
1342  srf->format = req->format;
1343  srf->scanout = req->scanout;
1344  srf->backup = NULL;
1345 
1346  memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
1347  srf->num_sizes = num_sizes;
1348  user_srf->size = size;
1349 
1350  srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
1351  if (unlikely(srf->sizes == NULL)) {
1352  ret = -ENOMEM;
1353  goto out_no_sizes;
1354  }
1355  srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
1356  GFP_KERNEL);
1357  if (unlikely(srf->sizes == NULL)) {
1358  ret = -ENOMEM;
1359  goto out_no_offsets;
1360  }
1361 
1362  user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1363  req->size_addr;
1364 
1365  ret = copy_from_user(srf->sizes, user_sizes,
1366  srf->num_sizes * sizeof(*srf->sizes));
1367  if (unlikely(ret != 0)) {
1368  ret = -EFAULT;
1369  goto out_no_copy;
1370  }
1371 
1372  cur_bo_offset = 0;
1373  cur_offset = srf->offsets;
1374  cur_size = srf->sizes;
1375 
1376  bpp = vmw_sf_bpp[srf->format].bpp;
1377  stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
1378 
1379  for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
1380  for (j = 0; j < srf->mip_levels[i]; ++j) {
1381  uint32_t stride =
1382  (cur_size->width * stride_bpp + 7) >> 3;
1383 
1384  cur_offset->face = i;
1385  cur_offset->mip = j;
1386  cur_offset->bo_offset = cur_bo_offset;
1387  cur_bo_offset += stride * cur_size->height *
1388  cur_size->depth * bpp / stride_bpp;
1389  ++cur_offset;
1390  ++cur_size;
1391  }
1392  }
1393  srf->backup_size = cur_bo_offset;
1394 
1395  if (srf->scanout &&
1396  srf->num_sizes == 1 &&
1397  srf->sizes[0].width == 64 &&
1398  srf->sizes[0].height == 64 &&
1399  srf->format == SVGA3D_A8R8G8B8) {
1400 
1401  /* allocate image area and clear it */
1402  srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
1403  if (!srf->snooper.image) {
1404  DRM_ERROR("Failed to allocate cursor_image\n");
1405  ret = -ENOMEM;
1406  goto out_no_copy;
1407  }
1408  } else {
1409  srf->snooper.image = NULL;
1410  }
1411  srf->snooper.crtc = NULL;
1412 
1413  user_srf->base.shareable = false;
1414  user_srf->base.tfile = NULL;
1415 
1421  ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1422  if (unlikely(ret != 0))
1423  goto out_unlock;
1424 
1425  tmp = vmw_resource_reference(&srf->res);
1426  ret = ttm_base_object_init(tfile, &user_srf->base,
1427  req->shareable, VMW_RES_SURFACE,
1428  &vmw_user_surface_base_release, NULL);
1429 
1430  if (unlikely(ret != 0)) {
1433  goto out_unlock;
1434  }
1435 
1436  rep->sid = user_srf->base.hash.key;
1437  if (rep->sid == SVGA3D_INVALID_ID)
1438  DRM_ERROR("Created bad Surface ID.\n");
1439 
1441 
1442  ttm_read_unlock(&vmaster->lock);
1443  return 0;
1444 out_no_copy:
1445  kfree(srf->offsets);
1446 out_no_offsets:
1447  kfree(srf->sizes);
1448 out_no_sizes:
1449  kfree(user_srf);
1450 out_no_user_srf:
1451  ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1452 out_unlock:
1453  ttm_read_unlock(&vmaster->lock);
1454  return ret;
1455 }
1456 
1458  struct drm_file *file_priv)
1459 {
1461  (union drm_vmw_surface_reference_arg *)data;
1462  struct drm_vmw_surface_arg *req = &arg->req;
1463  struct drm_vmw_surface_create_req *rep = &arg->rep;
1464  struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1465  struct vmw_surface *srf;
1466  struct vmw_user_surface *user_srf;
1467  struct drm_vmw_size __user *user_sizes;
1468  struct ttm_base_object *base;
1469  int ret = -EINVAL;
1470 
1471  base = ttm_base_object_lookup(tfile, req->sid);
1472  if (unlikely(base == NULL)) {
1473  DRM_ERROR("Could not find surface to reference.\n");
1474  return -EINVAL;
1475  }
1476 
1477  if (unlikely(base->object_type != VMW_RES_SURFACE))
1478  goto out_bad_resource;
1479 
1480  user_srf = container_of(base, struct vmw_user_surface, base);
1481  srf = &user_srf->srf;
1482 
1483  ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
1484  if (unlikely(ret != 0)) {
1485  DRM_ERROR("Could not add a reference to a surface.\n");
1486  goto out_no_reference;
1487  }
1488 
1489  rep->flags = srf->flags;
1490  rep->format = srf->format;
1491  memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1492  user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1493  rep->size_addr;
1494 
1495  if (user_sizes)
1496  ret = copy_to_user(user_sizes, srf->sizes,
1497  srf->num_sizes * sizeof(*srf->sizes));
1498  if (unlikely(ret != 0)) {
1499  DRM_ERROR("copy_to_user failed %p %u\n",
1500  user_sizes, srf->num_sizes);
1501  ret = -EFAULT;
1502  }
1503 out_bad_resource:
1504 out_no_reference:
1505  ttm_base_object_unref(&base);
1506 
1507  return ret;
1508 }
1509 
1510 int vmw_surface_check(struct vmw_private *dev_priv,
1511  struct ttm_object_file *tfile,
1512  uint32_t handle, int *id)
1513 {
1514  struct ttm_base_object *base;
1515  struct vmw_user_surface *user_srf;
1516 
1517  int ret = -EPERM;
1518 
1519  base = ttm_base_object_lookup(tfile, handle);
1520  if (unlikely(base == NULL))
1521  return -EINVAL;
1522 
1523  if (unlikely(base->object_type != VMW_RES_SURFACE))
1524  goto out_bad_surface;
1525 
1526  user_srf = container_of(base, struct vmw_user_surface, base);
1527  *id = user_srf->srf.res.id;
1528  ret = 0;
1529 
1530 out_bad_surface:
1536  ttm_base_object_unref(&base);
1537  return ret;
1538 }
1539 
1544 {
1545  struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1546 
1547  kfree(vmw_bo);
1548 }
1549 
1550 int vmw_dmabuf_init(struct vmw_private *dev_priv,
1551  struct vmw_dma_buffer *vmw_bo,
1552  size_t size, struct ttm_placement *placement,
1553  bool interruptible,
1554  void (*bo_free) (struct ttm_buffer_object *bo))
1555 {
1556  struct ttm_bo_device *bdev = &dev_priv->bdev;
1557  size_t acc_size;
1558  int ret;
1559 
1560  BUG_ON(!bo_free);
1561 
1562  acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
1563  memset(vmw_bo, 0, sizeof(*vmw_bo));
1564 
1565  INIT_LIST_HEAD(&vmw_bo->validate_list);
1566 
1567  ret = ttm_bo_init(bdev, &vmw_bo->base, size,
1568  ttm_bo_type_device, placement,
1569  0, 0, interruptible,
1570  NULL, acc_size, NULL, bo_free);
1571  return ret;
1572 }
1573 
1574 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
1575 {
1576  struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
1577 
1578  kfree(vmw_user_bo);
1579 }
1580 
1581 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
1582 {
1583  struct vmw_user_dma_buffer *vmw_user_bo;
1584  struct ttm_base_object *base = *p_base;
1585  struct ttm_buffer_object *bo;
1586 
1587  *p_base = NULL;
1588 
1589  if (unlikely(base == NULL))
1590  return;
1591 
1592  vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
1593  bo = &vmw_user_bo->dma.base;
1594  ttm_bo_unref(&bo);
1595 }
1596 
1598  struct drm_file *file_priv)
1599 {
1600  struct vmw_private *dev_priv = vmw_priv(dev);
1601  union drm_vmw_alloc_dmabuf_arg *arg =
1602  (union drm_vmw_alloc_dmabuf_arg *)data;
1603  struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
1604  struct drm_vmw_dmabuf_rep *rep = &arg->rep;
1605  struct vmw_user_dma_buffer *vmw_user_bo;
1606  struct ttm_buffer_object *tmp;
1607  struct vmw_master *vmaster = vmw_master(file_priv->master);
1608  int ret;
1609 
1610  vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
1611  if (unlikely(vmw_user_bo == NULL))
1612  return -ENOMEM;
1613 
1614  ret = ttm_read_lock(&vmaster->lock, true);
1615  if (unlikely(ret != 0)) {
1616  kfree(vmw_user_bo);
1617  return ret;
1618  }
1619 
1620  ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
1621  &vmw_vram_sys_placement, true,
1622  &vmw_user_dmabuf_destroy);
1623  if (unlikely(ret != 0))
1624  goto out_no_dmabuf;
1625 
1626  tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
1627  ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
1628  &vmw_user_bo->base,
1629  false,
1631  &vmw_user_dmabuf_release, NULL);
1632  if (unlikely(ret != 0))
1633  goto out_no_base_object;
1634  else {
1635  rep->handle = vmw_user_bo->base.hash.key;
1636  rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
1637  rep->cur_gmr_id = vmw_user_bo->base.hash.key;
1638  rep->cur_gmr_offset = 0;
1639  }
1640 
1641 out_no_base_object:
1642  ttm_bo_unref(&tmp);
1643 out_no_dmabuf:
1644  ttm_read_unlock(&vmaster->lock);
1645 
1646  return ret;
1647 }
1648 
1650  struct drm_file *file_priv)
1651 {
1652  struct drm_vmw_unref_dmabuf_arg *arg =
1653  (struct drm_vmw_unref_dmabuf_arg *)data;
1654 
1655  return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1656  arg->handle,
1657  TTM_REF_USAGE);
1658 }
1659 
1661  uint32_t cur_validate_node)
1662 {
1663  struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1664 
1665  if (likely(vmw_bo->on_validate_list))
1666  return vmw_bo->cur_validate_node;
1667 
1669  vmw_bo->on_validate_list = true;
1670 
1671  return cur_validate_node;
1672 }
1673 
1675 {
1676  struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1677 
1678  vmw_bo->on_validate_list = false;
1679 }
1680 
1682  uint32_t handle, struct vmw_dma_buffer **out)
1683 {
1684  struct vmw_user_dma_buffer *vmw_user_bo;
1685  struct ttm_base_object *base;
1686 
1687  base = ttm_base_object_lookup(tfile, handle);
1688  if (unlikely(base == NULL)) {
1689  printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
1690  (unsigned long)handle);
1691  return -ESRCH;
1692  }
1693 
1694  if (unlikely(base->object_type != ttm_buffer_type)) {
1695  ttm_base_object_unref(&base);
1696  printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
1697  (unsigned long)handle);
1698  return -EINVAL;
1699  }
1700 
1701  vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
1702  (void)ttm_bo_reference(&vmw_user_bo->dma.base);
1703  ttm_base_object_unref(&base);
1704  *out = &vmw_user_bo->dma;
1705 
1706  return 0;
1707 }
1708 
1709 /*
1710  * Stream management
1711  */
1712 
1713 static void vmw_stream_destroy(struct vmw_resource *res)
1714 {
1715  struct vmw_private *dev_priv = res->dev_priv;
1716  struct vmw_stream *stream;
1717  int ret;
1718 
1719  DRM_INFO("%s: unref\n", __func__);
1720  stream = container_of(res, struct vmw_stream, res);
1721 
1722  ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1723  WARN_ON(ret != 0);
1724 }
1725 
1726 static int vmw_stream_init(struct vmw_private *dev_priv,
1727  struct vmw_stream *stream,
1728  void (*res_free) (struct vmw_resource *res))
1729 {
1730  struct vmw_resource *res = &stream->res;
1731  int ret;
1732 
1733  ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1734  VMW_RES_STREAM, false, res_free, NULL);
1735 
1736  if (unlikely(ret != 0)) {
1737  if (res_free == NULL)
1738  kfree(stream);
1739  else
1740  res_free(&stream->res);
1741  return ret;
1742  }
1743 
1744  ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1745  if (ret) {
1747  return ret;
1748  }
1749 
1750  DRM_INFO("%s: claimed\n", __func__);
1751 
1752  vmw_resource_activate(&stream->res, vmw_stream_destroy);
1753  return 0;
1754 }
1755 
1760 static void vmw_user_stream_free(struct vmw_resource *res)
1761 {
1762  struct vmw_user_stream *stream =
1763  container_of(res, struct vmw_user_stream, stream.res);
1764  struct vmw_private *dev_priv = res->dev_priv;
1765 
1766  kfree(stream);
1767  ttm_mem_global_free(vmw_mem_glob(dev_priv),
1768  vmw_user_stream_size);
1769 }
1770 
1776 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1777 {
1778  struct ttm_base_object *base = *p_base;
1779  struct vmw_user_stream *stream =
1780  container_of(base, struct vmw_user_stream, base);
1781  struct vmw_resource *res = &stream->stream.res;
1782 
1783  *p_base = NULL;
1785 }
1786 
1788  struct drm_file *file_priv)
1789 {
1790  struct vmw_private *dev_priv = vmw_priv(dev);
1791  struct vmw_resource *res;
1792  struct vmw_user_stream *stream;
1793  struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1794  struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1795  int ret = 0;
1796 
1797  res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1798  if (unlikely(res == NULL))
1799  return -EINVAL;
1800 
1801  if (res->res_free != &vmw_user_stream_free) {
1802  ret = -EINVAL;
1803  goto out;
1804  }
1805 
1806  stream = container_of(res, struct vmw_user_stream, stream.res);
1807  if (stream->base.tfile != tfile) {
1808  ret = -EINVAL;
1809  goto out;
1810  }
1811 
1812  ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1813 out:
1815  return ret;
1816 }
1817 
1819  struct drm_file *file_priv)
1820 {
1821  struct vmw_private *dev_priv = vmw_priv(dev);
1822  struct vmw_user_stream *stream;
1823  struct vmw_resource *res;
1824  struct vmw_resource *tmp;
1825  struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1826  struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1827  struct vmw_master *vmaster = vmw_master(file_priv->master);
1828  int ret;
1829 
1830  /*
1831  * Approximate idr memory usage with 128 bytes. It will be limited
1832  * by maximum number_of streams anyway?
1833  */
1834 
1835  if (unlikely(vmw_user_stream_size == 0))
1836  vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
1837 
1838  ret = ttm_read_lock(&vmaster->lock, true);
1839  if (unlikely(ret != 0))
1840  return ret;
1841 
1842  ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1843  vmw_user_stream_size,
1844  false, true);
1845  if (unlikely(ret != 0)) {
1846  if (ret != -ERESTARTSYS)
1847  DRM_ERROR("Out of graphics memory for stream"
1848  " creation.\n");
1849  goto out_unlock;
1850  }
1851 
1852 
1853  stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1854  if (unlikely(stream == NULL)) {
1855  ttm_mem_global_free(vmw_mem_glob(dev_priv),
1856  vmw_user_stream_size);
1857  ret = -ENOMEM;
1858  goto out_unlock;
1859  }
1860 
1861  res = &stream->stream.res;
1862  stream->base.shareable = false;
1863  stream->base.tfile = NULL;
1864 
1865  /*
1866  * From here on, the destructor takes over resource freeing.
1867  */
1868 
1869  ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1870  if (unlikely(ret != 0))
1871  goto out_unlock;
1872 
1873  tmp = vmw_resource_reference(res);
1874  ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1875  &vmw_user_stream_base_release, NULL);
1876 
1877  if (unlikely(ret != 0)) {
1879  goto out_err;
1880  }
1881 
1882  arg->stream_id = res->id;
1883 out_err:
1885 out_unlock:
1886  ttm_read_unlock(&vmaster->lock);
1887  return ret;
1888 }
1889 
1891  struct ttm_object_file *tfile,
1892  uint32_t *inout_id, struct vmw_resource **out)
1893 {
1894  struct vmw_user_stream *stream;
1895  struct vmw_resource *res;
1896  int ret;
1897 
1898  res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1899  if (unlikely(res == NULL))
1900  return -EINVAL;
1901 
1902  if (res->res_free != &vmw_user_stream_free) {
1903  ret = -EINVAL;
1904  goto err_ref;
1905  }
1906 
1907  stream = container_of(res, struct vmw_user_stream, stream.res);
1908  if (stream->base.tfile != tfile) {
1909  ret = -EPERM;
1910  goto err_ref;
1911  }
1912 
1913  *inout_id = stream->stream.stream_id;
1914  *out = res;
1915  return 0;
1916 err_ref:
1918  return ret;
1919 }
1920 
1921 
1922 int vmw_dumb_create(struct drm_file *file_priv,
1923  struct drm_device *dev,
1924  struct drm_mode_create_dumb *args)
1925 {
1926  struct vmw_private *dev_priv = vmw_priv(dev);
1927  struct vmw_master *vmaster = vmw_master(file_priv->master);
1928  struct vmw_user_dma_buffer *vmw_user_bo;
1929  struct ttm_buffer_object *tmp;
1930  int ret;
1931 
1932  args->pitch = args->width * ((args->bpp + 7) / 8);
1933  args->size = args->pitch * args->height;
1934 
1935  vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
1936  if (vmw_user_bo == NULL)
1937  return -ENOMEM;
1938 
1939  ret = ttm_read_lock(&vmaster->lock, true);
1940  if (ret != 0) {
1941  kfree(vmw_user_bo);
1942  return ret;
1943  }
1944 
1945  ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
1946  &vmw_vram_sys_placement, true,
1947  &vmw_user_dmabuf_destroy);
1948  if (ret != 0)
1949  goto out_no_dmabuf;
1950 
1951  tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
1952  ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
1953  &vmw_user_bo->base,
1954  false,
1956  &vmw_user_dmabuf_release, NULL);
1957  if (unlikely(ret != 0))
1958  goto out_no_base_object;
1959 
1960  args->handle = vmw_user_bo->base.hash.key;
1961 
1962 out_no_base_object:
1963  ttm_bo_unref(&tmp);
1964 out_no_dmabuf:
1965  ttm_read_unlock(&vmaster->lock);
1966  return ret;
1967 }
1968 
1969 int vmw_dumb_map_offset(struct drm_file *file_priv,
1970  struct drm_device *dev, uint32_t handle,
1971  uint64_t *offset)
1972 {
1973  struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1974  struct vmw_dma_buffer *out_buf;
1975  int ret;
1976 
1977  ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1978  if (ret != 0)
1979  return -EINVAL;
1980 
1981  *offset = out_buf->base.addr_space_offset;
1982  vmw_dmabuf_unreference(&out_buf);
1983  return 0;
1984 }
1985 
1986 int vmw_dumb_destroy(struct drm_file *file_priv,
1987  struct drm_device *dev,
1988  uint32_t handle)
1989 {
1990  return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1991  handle, TTM_REF_USAGE);
1992 }