60 if (gem->import_attach)
73 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
110 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
149 tile_flags,
NULL, pnvbo);
160 if (nv_device(drm->
device)->card_type >= NV_50)
165 nouveau_bo_ref(
NULL, pnvbo);
169 nvbo->
bo.persistent_swap_storage = nvbo->
gem->filp;
170 nvbo->
gem->driver_private = nvbo;
175 nouveau_gem_info(
struct drm_file *file_priv,
struct drm_gem_object *
gem,
179 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
205 struct drm_file *file_priv)
213 drm->
ttm.bdev.dev_mapping = drm->
dev->dev_mapping;
216 NV_ERROR(drm,
"bad page flags: 0x%08x\n", req->
info.tile_flags);
221 req->
info.domain, req->
info.tile_mode,
222 req->
info.tile_flags, &nvbo);
228 ret = nouveau_gem_info(file_priv, nvbo->
gem, &req->
info);
234 drm_gem_object_unreference_unlocked(nvbo->
gem);
239 nouveau_gem_set_domain(
struct drm_gem_object *gem,
uint32_t read_domains,
242 struct nouveau_bo *nvbo = gem->driver_private;
245 (write_domains ? write_domains : read_domains);
246 uint32_t pref_flags = 0, valid_flags = 0;
257 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
261 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
265 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
301 drm_gem_object_unreference_unlocked(nvbo->
gem);
308 validate_fini_list(&op->
vram_list, fence);
309 validate_fini_list(&op->
gart_list, fence);
310 validate_fini_list(&op->
both_list, fence);
326 if (++trycnt > 100000) {
327 NV_ERROR(drm,
"%s failed and gave up.\n", __func__);
331 for (i = 0; i < nr_buffers; i++) {
333 struct drm_gem_object *gem;
339 validate_fini(op,
NULL);
342 nvbo = gem->driver_private;
345 NV_ERROR(drm,
"multiple instances of buffer %d on "
346 "validation list\n", b->
handle);
347 drm_gem_object_unreference_unlocked(gem);
348 validate_fini(op,
NULL);
354 validate_fini(op,
NULL);
357 drm_gem_object_unreference_unlocked(gem);
379 NV_ERROR(drm,
"invalid valid domains: 0x%08x\n",
382 validate_fini(op,
NULL);
396 spin_lock(&nvbo->
bo.bdev->fence_lock);
397 if (nvbo->
bo.sync_obj)
399 spin_unlock(&nvbo->
bo.bdev->fence_lock);
422 ret = validate_sync(chan, nvbo);
424 NV_ERROR(drm,
"fail pre-validate sync\n");
439 NV_ERROR(drm,
"fail ttm_validate\n");
443 ret = validate_sync(chan, nvbo);
445 NV_ERROR(drm,
"fail post-validate sync\n");
449 if (nv_device(drm->
device)->card_type < NV_50) {
452 b->
presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
454 b->
presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
476 struct drm_file *file_priv,
478 uint64_t user_buffers,
int nr_buffers,
491 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
498 ret = validate_list(chan, &op->
vram_list, pbbo, user_buffers);
501 NV_ERROR(drm,
"validate vram_list\n");
502 validate_fini(op,
NULL);
507 ret = validate_list(chan, &op->
gart_list, pbbo, user_buffers);
510 NV_ERROR(drm,
"validate gart_list\n");
511 validate_fini(op,
NULL);
516 ret = validate_list(chan, &op->
both_list, pbbo, user_buffers);
519 NV_ERROR(drm,
"validate both_list\n");
520 validate_fini(op,
NULL);
525 *apply_relocs = relocs;
548 nouveau_gem_pushbuf_reloc_apply(
struct drm_device *dev,
559 return PTR_ERR(reloc);
568 NV_ERROR(drm,
"reloc bo index invalid\n");
578 NV_ERROR(drm,
"reloc container bo index invalid\n");
586 NV_ERROR(drm,
"reloc outside of bo\n");
591 if (!nvbo->
kmap.virtual) {
595 NV_ERROR(drm,
"failed kmap for reloc\n");
610 if (b->
presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
616 spin_lock(&nvbo->
bo.bdev->fence_lock);
618 spin_unlock(&nvbo->
bo.bdev->fence_lock);
620 NV_ERROR(drm,
"reloc wait_idle failed: %d\n", ret);
633 struct drm_file *file_priv)
644 int i,
j, ret = 0, do_reloc = 0;
665 NV_ERROR(drm,
"pushbuf push count exceeds limit: %d max %d\n",
671 NV_ERROR(drm,
"pushbuf bo count exceeds limit: %d max %d\n",
677 NV_ERROR(drm,
"pushbuf reloc count exceeds limit: %d max %d\n",
682 push = u_memcpya(req->
push, req->
nr_push,
sizeof(*push));
693 for (i = 0; i < req->
nr_push; i++) {
695 NV_ERROR(drm,
"push %d buffer not in list\n", i);
702 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->
buffers,
706 NV_ERROR(drm,
"validate: %d\n", ret);
712 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
714 NV_ERROR(drm,
"reloc apply: %d\n", ret);
719 if (chan->
dma.ib_max) {
722 NV_ERROR(drm,
"nv50cal_space: %d\n", ret);
726 for (i = 0; i < req->
nr_push; i++) {
727 struct nouveau_bo *nvbo = (
void *)(
unsigned long)
734 if (nv_device(drm->
device)->chipset >= 0x25) {
735 ret = RING_SPACE(chan, req->
nr_push * 2);
737 NV_ERROR(drm,
"cal_space: %d\n", ret);
741 for (i = 0; i < req->
nr_push; i++) {
742 struct nouveau_bo *nvbo = (
void *)(
unsigned long)
751 NV_ERROR(drm,
"jmp_space: %d\n", ret);
755 for (i = 0; i < req->
nr_push; i++) {
756 struct nouveau_bo *nvbo = (
void *)(
unsigned long)
760 cmd = chan->
push.vma.offset + ((chan->
dma.cur + 2) << 2);
763 if (!nvbo->
kmap.virtual) {
776 push[i].
length - 8) / 4, cmd);
780 (nvbo->
bo.offset + push[i].
offset));
789 NV_ERROR(drm,
"error fencing pushbuf: %d\n", ret);
795 validate_fini(&op, fence);
803 if (chan->
dma.ib_max) {
807 if (nv_device(drm->
device)->chipset >= 0x25) {
812 (chan->
push.vma.offset + ((chan->
dma.cur + 2) << 2));
824 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
826 if (domain & NOUVEAU_GEM_DOMAIN_GART)
834 struct drm_file *file_priv)
837 struct drm_gem_object *gem;
845 nvbo = nouveau_gem_object(gem);
847 spin_lock(&nvbo->
bo.bdev->fence_lock);
849 spin_unlock(&nvbo->
bo.bdev->fence_lock);
850 drm_gem_object_unreference_unlocked(gem);
856 struct drm_file *file_priv)
863 struct drm_file *file_priv)
866 struct drm_gem_object *gem;
873 ret = nouveau_gem_info(file_priv, gem, req);
874 drm_gem_object_unreference_unlocked(gem);