72 static uint64_t vmw_user_context_size;
73 static uint64_t vmw_user_surface_size;
74 static uint64_t vmw_user_stream_size;
114 static void vmw_resource_release(
struct kref *
kref)
148 kref_put(&res->
kref, vmw_resource_release);
162 static int vmw_resource_alloc_id(
struct vmw_private *dev_priv,
183 static int vmw_resource_init(
struct vmw_private *dev_priv,
189 void (*remove_from_lists)
192 kref_init(&res->
kref);
206 return vmw_resource_alloc_id(dev_priv, res);
222 static void vmw_resource_activate(
struct vmw_resource *res,
234 struct idr *idr,
int id)
240 if (res && res->
avail)
241 kref_get(&res->
kref);
256 static void vmw_hw_context_destroy(
struct vmw_resource *res)
270 DRM_ERROR(
"Failed reserving FIFO space for surface "
283 static int vmw_context_init(
struct vmw_private *dev_priv,
294 ret = vmw_resource_init(dev_priv, res, &dev_priv->
context_idr,
298 DRM_ERROR(
"Failed to allocate a resource id.\n");
303 DRM_ERROR(
"Out of hw context ids.\n");
310 DRM_ERROR(
"Fifo reserve failed.\n");
321 vmw_resource_activate(res, vmw_hw_context_destroy);
325 if (res_free ==
NULL)
340 ret = vmw_context_init(dev_priv, res,
NULL);
341 return (ret == 0) ? res :
NULL;
348 static void vmw_user_context_free(
struct vmw_resource *res)
356 vmw_user_context_size);
364 static void vmw_user_context_base_release(
struct ttm_base_object **p_base)
376 struct drm_file *file_priv)
389 if (res->
res_free != &vmw_user_context_free) {
395 if (ctx->
base.tfile != tfile && !ctx->
base.shareable) {
407 struct drm_file *file_priv)
424 if (
unlikely(vmw_user_context_size == 0))
432 vmw_user_context_size,
436 DRM_ERROR(
"Out of graphics memory for context"
444 vmw_user_context_size);
450 ctx->
base.shareable =
false;
457 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
463 &vmw_user_context_base_release,
NULL);
489 if (res && res->
avail) {
492 if (ctx->
base.tfile != tfile && !ctx->
base.shareable)
521 static const struct vmw_bpp vmw_sf_bpp[] = {
632 static inline uint32_t vmw_surface_destroy_size(
void)
643 static void vmw_surface_destroy_encode(
uint32_t id,
660 static void vmw_surface_define_encode(
const struct vmw_surface *srf,
682 src_size = srf->
sizes;
684 for (i = 0; i < srf->
num_sizes; ++
i, cmd_size++, src_size++) {
685 cmd_size->width = src_size->
width;
686 cmd_size->height = src_size->
height;
687 cmd_size->depth = src_size->
depth;
701 static void vmw_surface_dma_encode(
struct vmw_surface *srf,
720 header->
size =
sizeof(*body) +
sizeof(*cb) +
sizeof(*suffix);
740 suffix->suffixSize =
sizeof(*suffix);
742 cur_size->
depth*bpp / stride_bpp;
743 suffix->flags.discard = 0;
744 suffix->flags.unsynchronized = 0;
745 suffix->flags.reserved = 0;
751 static void vmw_hw_surface_destroy(
struct vmw_resource *res)
762 DRM_ERROR(
"Failed reserving FIFO space for surface "
767 vmw_surface_destroy_encode(res->
id, cmd);
834 INIT_LIST_HEAD(&val_list);
835 val_buf.
bo = ttm_bo_reference(srf->
backup);
846 goto out_no_validate;
853 ret = vmw_resource_alloc_id(dev_priv, res);
855 DRM_ERROR(
"Failed to allocate a surface id.\n");
868 submit_size = vmw_surface_define_size(srf);
870 submit_size += vmw_surface_dma_size(srf);
874 DRM_ERROR(
"Failed reserving FIFO space for surface "
880 vmw_surface_define_encode(srf, cmd);
884 cmd += vmw_surface_define_size(srf);
886 vmw_surface_dma_encode(srf, cmd, &ptr,
true);
916 vmw_resource_release_id(res);
967 INIT_LIST_HEAD(&val_list);
968 val_buf.
bo = ttm_bo_reference(srf->
backup);
979 goto out_no_validate;
986 submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
989 DRM_ERROR(
"Failed reserving FIFO space for surface "
996 vmw_surface_dma_encode(srf, cmd, &ptr,
false);
997 cmd += vmw_surface_dma_size(srf);
998 vmw_surface_destroy_encode(res->
id, cmd);
1022 vmw_resource_release_id(res);
1067 DRM_ERROR(
"Out of device memory for surfaces.\n");
1073 evict_srf = vmw_surface_reference
1077 list_del_init(&evict_srf->
lru_head);
1082 vmw_surface_unreference(&evict_srf);
1104 static void vmw_surface_remove_from_lists(
struct vmw_resource *res)
1120 ret = vmw_resource_init(dev_priv, res, &dev_priv->
surface_idr,
1122 vmw_surface_remove_from_lists);
1133 vmw_resource_activate(res, vmw_hw_surface_destroy);
1137 static void vmw_user_surface_free(
struct vmw_resource *res)
1176 res->
res_free != &vmw_user_surface_free)
1180 lock = &res->
dev_priv->resource_lock;
1206 BUG_ON(*out_surf || *out_buf);
1232 goto out_bad_resource;
1235 srf = &user_srf->
srf;
1240 if (!res->
avail || res->
res_free != &vmw_user_surface_free) {
1242 goto out_bad_resource;
1245 kref_get(&res->
kref);
1257 static void vmw_user_surface_base_release(
struct ttm_base_object **p_base)
1269 struct drm_file *file_priv)
1278 struct drm_file *file_priv)
1302 if (
unlikely(vmw_user_surface_size == 0))
1310 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
1314 size = vmw_user_surface_size + 128 +
1327 DRM_ERROR(
"Out of graphics memory for surface"
1335 goto out_no_user_srf;
1338 srf = &user_srf->
srf;
1359 goto out_no_offsets;
1362 user_sizes = (
struct drm_vmw_size __user *)(
unsigned long)
1374 cur_size = srf->
sizes;
1382 (cur_size->
width * stride_bpp + 7) >> 3;
1384 cur_offset->
face =
i;
1385 cur_offset->
mip =
j;
1387 cur_bo_offset += stride * cur_size->
height *
1388 cur_size->
depth * bpp / stride_bpp;
1397 srf->
sizes[0].width == 64 &&
1398 srf->
sizes[0].height == 64 &&
1404 DRM_ERROR(
"Failed to allocate cursor_image\n");
1413 user_srf->
base.shareable =
false;
1428 &vmw_user_surface_base_release,
NULL);
1436 rep->
sid = user_srf->
base.hash.key;
1438 DRM_ERROR(
"Created bad Surface ID.\n");
1458 struct drm_file *file_priv)
1473 DRM_ERROR(
"Could not find surface to reference.\n");
1478 goto out_bad_resource;
1481 srf = &user_srf->
srf;
1485 DRM_ERROR(
"Could not add a reference to a surface.\n");
1486 goto out_no_reference;
1492 user_sizes = (
struct drm_vmw_size __user *)(
unsigned long)
1499 DRM_ERROR(
"copy_to_user failed %p %u\n",
1524 goto out_bad_surface;
1527 *
id = user_srf->
srf.res.id;
1563 memset(vmw_bo, 0,
sizeof(*vmw_bo));
1569 0, 0, interruptible,
1593 bo = &vmw_user_bo->
dma.base;
1598 struct drm_file *file_priv)
1610 vmw_user_bo = kzalloc(
sizeof(*vmw_user_bo),
GFP_KERNEL);
1622 &vmw_user_dmabuf_destroy);
1626 tmp = ttm_bo_reference(&vmw_user_bo->
dma.base);
1631 &vmw_user_dmabuf_release,
NULL);
1633 goto out_no_base_object;
1650 struct drm_file *file_priv)
1690 (
unsigned long)handle);
1697 (
unsigned long)handle);
1702 (
void)ttm_bo_reference(&vmw_user_bo->
dma.base);
1704 *out = &vmw_user_bo->
dma;
1713 static void vmw_stream_destroy(
struct vmw_resource *res)
1719 DRM_INFO(
"%s: unref\n", __func__);
1726 static int vmw_stream_init(
struct vmw_private *dev_priv,
1733 ret = vmw_resource_init(dev_priv, res, &dev_priv->
stream_idr,
1750 DRM_INFO(
"%s: claimed\n", __func__);
1752 vmw_resource_activate(&stream->
res, vmw_stream_destroy);
1760 static void vmw_user_stream_free(
struct vmw_resource *res)
1768 vmw_user_stream_size);
1776 static void vmw_user_stream_base_release(
struct ttm_base_object **p_base)
1788 struct drm_file *file_priv)
1801 if (res->
res_free != &vmw_user_stream_free) {
1807 if (stream->
base.tfile != tfile) {
1819 struct drm_file *file_priv)
1835 if (
unlikely(vmw_user_stream_size == 0))
1836 vmw_user_stream_size =
ttm_round_pot(
sizeof(*stream)) + 128;
1843 vmw_user_stream_size,
1847 DRM_ERROR(
"Out of graphics memory for stream"
1856 vmw_user_stream_size);
1861 res = &stream->
stream.res;
1862 stream->
base.shareable =
false;
1869 ret = vmw_stream_init(dev_priv, &stream->
stream, vmw_user_stream_free);
1875 &vmw_user_stream_base_release,
NULL);
1902 if (res->
res_free != &vmw_user_stream_free) {
1908 if (stream->
base.tfile != tfile) {
1913 *inout_id = stream->
stream.stream_id;
1935 vmw_user_bo = kzalloc(
sizeof(*vmw_user_bo),
GFP_KERNEL);
1936 if (vmw_user_bo ==
NULL)
1947 &vmw_user_dmabuf_destroy);
1951 tmp = ttm_bo_reference(&vmw_user_bo->
dma.base);
1956 &vmw_user_dmabuf_release,
NULL);
1958 goto out_no_base_object;
1981 *offset = out_buf->
base.addr_space_offset;
1982 vmw_dmabuf_unreference(&out_buf);