31 #define VMW_FENCE_WRAP (1 << 31)
75 struct drm_pending_event *
event;
105 static void vmw_fence_obj_destroy_locked(
struct kref *
kref)
111 unsigned int num_fences;
113 list_del_init(&fence->
head);
115 spin_unlock_irq(&fman->
lock);
121 spin_lock_irq(&fman->
lock);
140 INIT_LIST_HEAD(&
list);
143 spin_lock_irq(&fman->
lock);
146 spin_unlock_irq(&fman->
lock);
154 if (list_empty(&
list))
164 list_del_init(&action->
head);
195 unsigned long irq_flags;
201 lists_empty = list_empty(&fman->
fence_list) &&
203 spin_unlock_irqrestore(&fman->
lock, irq_flags);
215 unsigned long irq_flags;
216 unsigned int num_fences;
224 kref_init(&fence->
kref);
237 spin_unlock_irqrestore(&fman->
lock, irq_flags);
247 kref_get(&fence->
kref);
268 spin_lock_irq(&fman->
lock);
270 kref_put(&fence->
kref, vmw_fence_obj_destroy_locked);
271 spin_unlock_irq(&fman->
lock);
280 list_del_init(&action->
head);
320 fifo_mem = fman->
dev_priv->mmio_virt;
354 static bool vmw_fence_goal_check_locked(
struct vmw_fence_obj *fence)
362 fifo_mem = fence->
fman->dev_priv->mmio_virt;
369 fence->
fman->seqno_valid =
true;
388 list_del_init(&fence->
head);
390 INIT_LIST_HEAD(&action_list);
399 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
403 spin_unlock_irqrestore(&fman->
lock, flags);
413 if (new_seqno != seqno) {
424 unsigned long irq_flags;
429 spin_unlock_irqrestore(&fman->
lock, irq_flags);
432 if ((signaled & flags) == flags)
440 spin_unlock_irqrestore(&fman->
lock, irq_flags);
442 return ((signaled & flags) == flags);
447 bool interruptible,
unsigned long timeout)
518 ret = vmw_fence_obj_init(fman, fence, seqno, mask,
534 static void vmw_user_fence_destroy(
struct vmw_fence_obj *fence)
548 static void vmw_user_fence_base_release(
struct ttm_base_object **p_base)
582 ufence = kzalloc(
sizeof(*ufence),
GFP_KERNEL);
588 ret = vmw_fence_obj_init(fman, &ufence->
fence, seqno,
589 mask, vmw_user_fence_destroy);
602 &vmw_user_fence_base_release,
NULL);
613 *p_fence = &ufence->
fence;
614 *p_handle = ufence->
base.hash.key;
618 tmp = &ufence->
fence;
632 unsigned long irq_flags;
647 kref_get(&fence->
kref);
648 spin_unlock_irq(&fman->
lock);
655 list_del_init(&fence->
head);
657 INIT_LIST_HEAD(&action_list);
664 spin_lock_irq(&fman->
lock);
667 kref_put(&fence->
kref, vmw_fence_obj_destroy_locked);
669 spin_unlock_irqrestore(&fman->
lock, irq_flags);
674 unsigned long irq_flags;
678 spin_unlock_irqrestore(&fman->
lock, irq_flags);
683 struct drm_file *file_priv)
699 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
700 (wait_timeout >> 26);
711 (
unsigned long)arg->
handle);
742 struct drm_file *file_priv)
756 (
unsigned long)arg->
handle);
764 spin_lock_irq(&fman->
lock);
768 spin_unlock_irq(&fman->
lock);
777 struct drm_file *file_priv)
804 struct drm_pending_event *
event;
805 unsigned long irq_flags;
809 if (list_empty(event_list))
815 event = eaction->
event;
817 spin_unlock_irqrestore(&fman->
lock, irq_flags);
818 event->destroy(event);
821 spin_unlock_irqrestore(&fman->
lock, irq_flags);
836 static void vmw_event_fence_action_seq_passed(
struct vmw_fence_action *action)
841 struct drm_pending_event *
event = eaction->
event;
842 struct drm_file *file_priv;
843 unsigned long irq_flags;
848 file_priv =
event->file_priv;
855 *eaction->
tv_sec = tv.tv_sec;
856 *eaction->
tv_usec = tv.tv_usec;
863 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
880 unsigned long irq_flags;
884 spin_unlock_irqrestore(&fman->
lock, irq_flags);
904 unsigned long irq_flags;
905 bool run_update =
false;
914 INIT_LIST_HEAD(&action_list);
924 run_update = vmw_fence_goal_check_locked(fence);
927 spin_unlock_irqrestore(&fman->
lock, irq_flags);
957 struct drm_pending_event *
event,
965 unsigned long irq_flags;
967 eaction = kzalloc(
sizeof(*eaction),
GFP_KERNEL);
973 eaction->
action.seq_passed = vmw_event_fence_action_seq_passed;
974 eaction->
action.cleanup = vmw_event_fence_action_cleanup;
984 spin_unlock_irqrestore(&fman->
lock, irq_flags);
1004 unsigned long irq_flags;
1009 ret = (file_priv->event_space <
sizeof(
event->event)) ? -
EBUSY : 0;
1011 file_priv->event_space -=
sizeof(event->
event);
1013 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1016 DRM_ERROR(
"Failed to allocate event space for this file.\n");
1023 DRM_ERROR(
"Failed to allocate an event.\n");
1029 event->event.base.length =
sizeof(*event);
1032 event->base.event = &
event->event.base;
1033 event->base.file_priv = file_priv;
1034 event->base.destroy = (
void (*) (
struct drm_pending_event *))
kfree;
1040 &event->
event.tv_sec,
1041 &event->
event.tv_usec,
1053 event->base.destroy(&event->
base);
1056 file_priv->event_space +=
sizeof(*event);
1057 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1063 struct drm_file *file_priv)
1086 DRM_ERROR(
"Fence event invalid fence object handle "
1088 (
unsigned long)arg->
handle);
1095 if (user_fence_rep !=
NULL) {
1101 DRM_ERROR(
"Failed to reference a fence "
1103 goto out_no_ref_obj;
1105 handle = base->
hash.key;
1119 DRM_ERROR(
"Fence event failed to create fence.\n");
1139 DRM_ERROR(
"Failed to attach event to fence.\n");
1148 if (user_fence_rep !=
NULL)