30 #include <drm/i915_drm.h>
108 struct drm_gem_object *target_obj;
135 DRM_DEBUG(
"No GTT space found for object %d\n",
142 DRM_DEBUG(
"reloc with multiple write domains: "
143 "obj %p target %d offset %d "
144 "read %08x write %08x",
153 DRM_DEBUG(
"reloc with read/write non-GPU domains: "
154 "obj %p target %d offset %d "
155 "read %08x write %08x",
163 reloc->
write_domain != target_obj->pending_write_domain)) {
164 DRM_DEBUG(
"Write domain conflict: "
165 "obj %p target %d offset %d "
166 "new %08x old %08x\n",
170 target_obj->pending_write_domain);
174 target_obj->pending_read_domains |= reloc->
read_domains;
175 target_obj->pending_write_domain |= reloc->
write_domain;
185 DRM_DEBUG(
"Relocation beyond object bounds: "
186 "obj %p target %d offset %d size %d.\n",
189 (
int) obj->
base.size);
193 DRM_DEBUG(
"Relocation not 4-byte aligned: "
194 "obj %p target %d offset %d.\n",
204 reloc->
delta += target_offset;
205 if (use_cpu_reloc(obj)) {
232 reloc_page = io_mapping_map_atomic_wc(dev_priv->
mm.gtt_mapping,
237 io_mapping_unmap_atomic(reloc_page);
250 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
272 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
301 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
310 i915_gem_execbuffer_relocate(
struct drm_device *dev,
326 ret = i915_gem_execbuffer_relocate_object(obj, eb);
335 #define __EXEC_OBJECT_HAS_PIN (1<<31)
336 #define __EXEC_OBJECT_HAS_FENCE (1<<30)
352 bool need_fence, need_mappable;
356 has_fenced_gpu_access &&
359 need_mappable = need_fence || need_reloc_mappable(obj);
367 if (has_fenced_gpu_access) {
373 if (i915_gem_object_pin_fence(obj))
403 i915_gem_object_unpin_fence(obj);
413 struct drm_file *
file,
421 INIT_LIST_HEAD(&ordered_objects);
422 while (!list_empty(objects)) {
424 bool need_fence, need_mappable;
432 has_fenced_gpu_access &&
435 need_mappable = need_fence || need_reloc_mappable(obj);
438 list_move(&obj->
exec_list, &ordered_objects);
440 list_move_tail(&obj->
exec_list, &ordered_objects);
442 obj->
base.pending_read_domains = 0;
443 obj->
base.pending_write_domain = 0;
446 list_splice(&ordered_objects, objects);
467 bool need_fence, need_mappable;
473 has_fenced_gpu_access &&
476 need_mappable = need_fence || need_reloc_mappable(obj);
482 ret = i915_gem_execbuffer_reserve_object(obj, ring);
492 ret = i915_gem_execbuffer_reserve_object(obj, ring);
499 i915_gem_execbuffer_unreserve_object(obj);
525 while (!list_empty(objects)) {
530 drm_gem_object_unreference(&obj->
base);
536 for (i = 0; i <
count; i++)
537 total += exec[i].relocation_count;
539 reloc_offset = drm_malloc_ab(count,
sizeof(*reloc_offset));
540 reloc = drm_malloc_ab(total,
sizeof(*reloc));
541 if (reloc ==
NULL || reloc_offset ==
NULL) {
542 drm_free_large(reloc);
543 drm_free_large(reloc_offset);
549 for (i = 0; i <
count; i++) {
555 exec[i].relocation_count *
sizeof(*reloc))) {
561 reloc_offset[
i] = total;
562 total += exec[
i].relocation_count;
573 for (i = 0; i <
count; i++) {
577 DRM_DEBUG(
"Invalid object handle %d at index %d\n",
586 eb_add_object(eb, obj);
589 ret = i915_gem_execbuffer_reserve(ring, file, objects);
595 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
596 reloc + reloc_offset[offset]);
608 drm_free_large(reloc);
609 drm_free_large(reloc_offset);
624 for (plane = 0; flips >>
plane; plane++) {
625 if (((flips >> plane) & 1) == 0)
638 intel_ring_emit(ring,
MI_NOOP);
662 if (obj->
base.pending_write_domain)
665 flush_domains |= obj->
base.write_domain;
669 ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
698 for (i = 0; i <
count; i++) {
703 if (exec[i].relocation_count >
716 if (fault_in_multipages_readable(ptr, length))
724 i915_gem_execbuffer_move_to_active(
struct list_head *objects,
731 u32 old_read = obj->
base.read_domains;
732 u32 old_write = obj->
base.write_domain;
734 obj->
base.read_domains = obj->
base.pending_read_domains;
735 obj->
base.write_domain = obj->
base.pending_write_domain;
739 if (obj->
base.write_domain) {
746 trace_i915_gem_object_change_domain(obj, old_read, old_write);
751 i915_gem_execbuffer_retire_commands(
struct drm_device *dev,
752 struct drm_file *file,
763 i915_reset_gen7_sol_offsets(
struct drm_device *dev,
776 for (i = 0; i < 4; i++) {
779 intel_ring_emit(ring, 0);
789 struct drm_file *file,
800 u32 exec_start, exec_len;
805 if (!i915_gem_check_execbuffer(args)) {
806 DRM_DEBUG(
"execbuf with invalid offset/length\n");
822 DRM_DEBUG(
"Ring %s doesn't support contexts\n",
830 DRM_DEBUG(
"Ring %s doesn't support contexts\n",
836 DRM_DEBUG(
"execbuf with unknown ring: %d\n",
840 if (!intel_ring_initialized(ring)) {
841 DRM_DEBUG(
"execbuf with invalid ring: %d\n",
852 if (ring == &dev_priv->
ring[
RCS] &&
867 DRM_DEBUG(
"execbuf with unknown constants: %d\n", mode);
872 DRM_DEBUG(
"execbuf with %d buffers\n", args->
buffer_count);
877 if (ring != &dev_priv->
ring[
RCS]) {
878 DRM_DEBUG(
"clip rectangles are only valid with the render ring\n");
883 DRM_DEBUG(
"clip rectangles are only valid on pre-gen5\n");
888 DRM_DEBUG(
"execbuf with %u cliprects\n",
895 if (cliprects ==
NULL) {
913 if (dev_priv->
mm.suspended) {
927 INIT_LIST_HEAD(&objects);
934 DRM_DEBUG(
"Invalid object handle %d at index %d\n",
942 DRM_DEBUG(
"Object %p [handle %d, index %d] appears more than once in object list\n",
943 obj, exec[i].handle, i);
951 eb_add_object(eb, obj);
960 ret = i915_gem_execbuffer_reserve(ring, file, &objects);
965 ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
968 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
972 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
979 if (batch_obj->
base.pending_write_domain) {
980 DRM_DEBUG(
"Attempting to use self-modifying batch buffer\n");
986 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
992 if (seqno < ring->sync_seqno[i]) {
1010 if (ring == &dev_priv->
ring[RCS] &&
1016 intel_ring_emit(ring,
MI_NOOP);
1018 intel_ring_emit(ring,
INSTPM);
1019 intel_ring_emit(ring, mask << 16 | mode);
1026 ret = i915_reset_gen7_sol_offsets(dev, ring);
1031 trace_i915_gem_ring_dispatch(ring, seqno);
1043 exec_start, exec_len);
1053 i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
1054 i915_gem_execbuffer_retire_commands(dev, file, ring);
1058 while (!list_empty(&objects)) {
1065 drm_gem_object_unreference(&obj->
base);
1081 struct drm_file *file)
1090 DRM_DEBUG(
"execbuf with %d buffers\n", args->
buffer_count);
1095 exec_list = drm_malloc_ab(
sizeof(*exec_list), args->
buffer_count);
1096 exec2_list = drm_malloc_ab(
sizeof(*exec2_list), args->
buffer_count);
1097 if (exec_list ==
NULL || exec2_list ==
NULL) {
1098 DRM_DEBUG(
"Failed to allocate exec list for %d buffers\n",
1100 drm_free_large(exec_list);
1101 drm_free_large(exec2_list);
1108 DRM_DEBUG(
"copy %d exec entries failed %d\n",
1110 drm_free_large(exec_list);
1111 drm_free_large(exec2_list);
1138 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1142 exec_list[i].offset = exec2_list[i].offset;
1149 DRM_DEBUG(
"failed to copy %d exec entries "
1150 "back to user (%d)\n",
1155 drm_free_large(exec_list);
1156 drm_free_large(exec2_list);
1162 struct drm_file *file)
1170 DRM_DEBUG(
"execbuf2 with %d buffers\n", args->
buffer_count);
1176 if (exec2_list ==
NULL)
1177 exec2_list = drm_malloc_ab(
sizeof(*exec2_list),
1179 if (exec2_list ==
NULL) {
1180 DRM_DEBUG(
"Failed to allocate exec list for %d buffers\n",
1185 (
struct drm_i915_relocation_entry
__user *)
1189 DRM_DEBUG(
"copy %d exec entries failed %d\n",
1191 drm_free_large(exec2_list);
1195 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1203 DRM_DEBUG(
"failed to copy %d exec entries "
1204 "back to user (%d)\n",
1209 drm_free_large(exec2_list);