Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
drivers
gpu
drm
vmwgfx
vmwgfx_drv.h
Go to the documentation of this file.
1
/**************************************************************************
2
*
3
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4
* All Rights Reserved.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
13
*
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
16
* of the Software.
17
*
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
* USE OR OTHER DEALINGS IN THE SOFTWARE.
25
*
26
**************************************************************************/
27
28
#ifndef _VMWGFX_DRV_H_
29
#define _VMWGFX_DRV_H_
30
31
#include "
vmwgfx_reg.h
"
32
#include <
drm/drmP.h
>
33
#include <
drm/vmwgfx_drm.h
>
34
#include <
drm/drm_hashtab.h
>
35
#include <
linux/suspend.h
>
36
#include <
drm/ttm/ttm_bo_driver.h
>
37
#include <
drm/ttm/ttm_object.h
>
38
#include <
drm/ttm/ttm_lock.h
>
39
#include <
drm/ttm/ttm_execbuf_util.h
>
40
#include <
drm/ttm/ttm_module.h
>
41
#include "
vmwgfx_fence.h
"
42
43
#define VMWGFX_DRIVER_DATE "20120209"
44
#define VMWGFX_DRIVER_MAJOR 2
45
#define VMWGFX_DRIVER_MINOR 4
46
#define VMWGFX_DRIVER_PATCHLEVEL 0
47
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49
#define VMWGFX_MAX_RELOCATIONS 2048
50
#define VMWGFX_MAX_VALIDATIONS 2048
51
#define VMWGFX_MAX_DISPLAYS 16
52
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
53
54
#define VMW_PL_GMR TTM_PL_PRIV0
55
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
56
57
#define VMW_RES_CONTEXT ttm_driver_type0
58
#define VMW_RES_SURFACE ttm_driver_type1
59
#define VMW_RES_STREAM ttm_driver_type2
60
#define VMW_RES_FENCE ttm_driver_type3
61
62
struct
vmw_fpriv
{
63
struct
drm_master *
locked_master
;
64
struct
ttm_object_file
*
tfile
;
65
struct
list_head
fence_events
;
66
};
67
68
struct
vmw_dma_buffer
{
69
struct
ttm_buffer_object
base
;
70
struct
list_head
validate_list
;
71
bool
gmr_bound
;
72
uint32_t
cur_validate_node
;
73
bool
on_validate_list
;
74
};
75
76
struct
vmw_resource
{
77
struct
kref
kref
;
78
struct
vmw_private
*
dev_priv
;
79
struct
idr
*
idr
;
80
int
id
;
81
enum
ttm_object_type
res_type
;
82
bool
avail
;
83
void
(*
remove_from_lists
) (
struct
vmw_resource
*
res
);
84
void
(*
hw_destroy
) (
struct
vmw_resource
*
res
);
85
void
(*
res_free
) (
struct
vmw_resource
*
res
);
86
struct
list_head
validate_head
;
87
struct
list_head
query_head
;
/* Protected by the cmdbuf mutex */
88
/* TODO is a generic snooper needed? */
89
#if 0
90
void
(*
snoop
)(
struct
vmw_resource
*
res
,
91
struct
ttm_object_file
*tfile,
92
SVGA3dCmdHeader
*
header
);
93
void
*snoop_priv;
94
#endif
95
};
96
97
struct
vmw_cursor_snooper
{
98
struct
drm_crtc
*
crtc
;
99
size_t
age
;
100
uint32_t
*
image
;
101
};
102
103
struct
vmw_framebuffer
;
104
struct
vmw_surface_offset
;
105
106
struct
vmw_surface
{
107
struct
vmw_resource
res
;
108
struct
list_head
lru_head
;
/* Protected by the resource lock */
109
uint32_t
flags
;
110
uint32_t
format
;
111
uint32_t
mip_levels
[
DRM_VMW_MAX_SURFACE_FACES
];
112
struct
drm_vmw_size
*
sizes
;
113
uint32_t
num_sizes
;
114
115
bool
scanout
;
116
117
/* TODO so far just a extra pointer */
118
struct
vmw_cursor_snooper
snooper
;
119
struct
ttm_buffer_object
*
backup
;
120
struct
vmw_surface_offset
*
offsets
;
121
uint32_t
backup_size
;
122
};
123
124
struct
vmw_marker_queue
{
125
struct
list_head
head
;
126
struct
timespec
lag
;
127
struct
timespec
lag_time
;
128
spinlock_t
lock
;
129
};
130
131
struct
vmw_fifo_state
{
132
unsigned
long
reserved_size
;
133
__le32
*
dynamic_buffer
;
134
__le32
*
static_buffer
;
135
unsigned
long
static_buffer_size
;
136
bool
using_bounce_buffer
;
137
uint32_t
capabilities
;
138
struct
mutex
fifo_mutex
;
139
struct
rw_semaphore
rwsem
;
140
struct
vmw_marker_queue
marker_queue
;
141
};
142
143
struct
vmw_relocation
{
144
SVGAGuestPtr
*
location
;
145
uint32_t
index
;
146
};
147
148
struct
vmw_sw_context
{
149
struct
ida
bo_list
;
150
uint32_t
last_cid
;
151
bool
cid_valid
;
152
bool
kernel
;
153
struct
vmw_resource
*
cur_ctx
;
154
uint32_t
last_sid
;
155
uint32_t
sid_translation
;
156
bool
sid_valid
;
157
struct
ttm_object_file
*
tfile
;
158
struct
list_head
validate_nodes
;
159
struct
vmw_relocation
relocs
[
VMWGFX_MAX_RELOCATIONS
];
160
uint32_t
cur_reloc
;
161
struct
ttm_validate_buffer
val_bufs
[
VMWGFX_MAX_VALIDATIONS
];
162
uint32_t
cur_val_buf
;
163
uint32_t
*
cmd_bounce
;
164
uint32_t
cmd_bounce_size
;
165
struct
list_head
resource_list
;
166
uint32_t
fence_flags
;
167
struct
list_head
query_list
;
168
struct
ttm_buffer_object
*
cur_query_bo
;
169
uint32_t
cur_query_cid
;
170
bool
query_cid_valid
;
171
};
172
173
struct
vmw_legacy_display
;
174
struct
vmw_overlay
;
175
176
struct
vmw_master
{
177
struct
ttm_lock
lock
;
178
struct
mutex
fb_surf_mutex
;
179
struct
list_head
fb_surf
;
180
};
181
182
struct
vmw_vga_topology_state
{
183
uint32_t
width
;
184
uint32_t
height
;
185
uint32_t
primary
;
186
uint32_t
pos_x
;
187
uint32_t
pos_y
;
188
};
189
190
struct
vmw_private
{
191
struct
ttm_bo_device
bdev
;
192
struct
ttm_bo_global_ref
bo_global_ref
;
193
struct
drm_global_reference
mem_global_ref
;
194
195
struct
vmw_fifo_state
fifo
;
196
197
struct
drm_device
*
dev
;
198
unsigned
long
vmw_chipset
;
199
unsigned
int
io_start
;
200
uint32_t
vram_start
;
201
uint32_t
vram_size
;
202
uint32_t
mmio_start
;
203
uint32_t
mmio_size
;
204
uint32_t
fb_max_width
;
205
uint32_t
fb_max_height
;
206
uint32_t
initial_width
;
207
uint32_t
initial_height
;
208
__le32
__iomem
*
mmio_virt
;
209
int
mmio_mtrr
;
210
uint32_t
capabilities
;
211
uint32_t
max_gmr_descriptors
;
212
uint32_t
max_gmr_ids
;
213
uint32_t
max_gmr_pages
;
214
uint32_t
memory_size
;
215
bool
has_gmr
;
216
struct
mutex
hw_mutex
;
217
218
/*
219
* VGA registers.
220
*/
221
222
struct
vmw_vga_topology_state
vga_save
[
VMWGFX_MAX_DISPLAYS
];
223
uint32_t
vga_width
;
224
uint32_t
vga_height
;
225
uint32_t
vga_bpp
;
226
uint32_t
vga_bpl
;
227
uint32_t
vga_pitchlock
;
228
229
uint32_t
num_displays
;
230
231
/*
232
* Framebuffer info.
233
*/
234
235
void
*
fb_info
;
236
struct
vmw_legacy_display
*
ldu_priv
;
237
struct
vmw_screen_object_display
*
sou_priv
;
238
struct
vmw_overlay
*
overlay_priv
;
239
240
/*
241
* Context and surface management.
242
*/
243
244
rwlock_t
resource_lock
;
245
struct
idr
context_idr
;
246
struct
idr
surface_idr
;
247
struct
idr
stream_idr
;
248
249
/*
250
* Block lastclose from racing with firstopen.
251
*/
252
253
struct
mutex
init_mutex
;
254
255
/*
256
* A resource manager for kernel-only surfaces and
257
* contexts.
258
*/
259
260
struct
ttm_object_device
*
tdev
;
261
262
/*
263
* Fencing and IRQs.
264
*/
265
266
atomic_t
marker_seq
;
267
wait_queue_head_t
fence_queue
;
268
wait_queue_head_t
fifo_queue
;
269
int
fence_queue_waiters
;
/* Protected by hw_mutex */
270
int
goal_queue_waiters
;
/* Protected by hw_mutex */
271
atomic_t
fifo_queue_waiters
;
272
uint32_t
last_read_seqno
;
273
spinlock_t
irq_lock
;
274
struct
vmw_fence_manager
*
fman
;
275
uint32_t
irq_mask
;
276
277
/*
278
* Device state
279
*/
280
281
uint32_t
traces_state
;
282
uint32_t
enable_state
;
283
uint32_t
config_done_state
;
284
292
struct
vmw_sw_context
ctx
;
293
struct
mutex
cmdbuf_mutex
;
294
299
bool
stealth
;
300
bool
is_opened
;
301
bool
enable_fb
;
302
307
struct
vmw_master
*
active_master
;
308
struct
vmw_master
fbdev_master
;
309
struct
notifier_block
pm_nb
;
310
bool
suspended
;
311
312
struct
mutex
release_mutex
;
313
uint32_t
num_3d_resources
;
314
315
/*
316
* Query processing. These members
317
* are protected by the cmdbuf mutex.
318
*/
319
320
struct
ttm_buffer_object
*
dummy_query_bo
;
321
struct
ttm_buffer_object
*
pinned_bo
;
322
uint32_t
query_cid
;
323
bool
dummy_query_bo_pinned
;
324
325
/*
326
* Surface swapping. The "surface_lru" list is protected by the
327
* resource lock in order to be able to destroy a surface and take
328
* it off the lru atomically. "used_memory_size" is currently
329
* protected by the cmdbuf mutex for simplicity.
330
*/
331
332
struct
list_head
surface_lru
;
333
uint32_t
used_memory_size
;
334
};
335
336
static
inline
struct
vmw_private
*vmw_priv(
struct
drm_device
*
dev
)
337
{
338
return
(
struct
vmw_private
*)dev->dev_private;
339
}
340
341
static
inline
struct
vmw_fpriv
*
vmw_fpriv
(
struct
drm_file *file_priv)
342
{
343
return
(
struct
vmw_fpriv
*)file_priv->driver_priv;
344
}
345
346
static
inline
struct
vmw_master
*
vmw_master
(
struct
drm_master *master)
347
{
348
return
(
struct
vmw_master
*) master->driver_priv;
349
}
350
351
static
inline
void
vmw_write(
struct
vmw_private
*
dev_priv
,
352
unsigned
int
offset
,
uint32_t
value
)
353
{
354
outl
(offset, dev_priv->
io_start
+
VMWGFX_INDEX_PORT
);
355
outl
(value, dev_priv->
io_start
+
VMWGFX_VALUE_PORT
);
356
}
357
358
static
inline
uint32_t
vmw_read(
struct
vmw_private
*
dev_priv
,
359
unsigned
int
offset
)
360
{
361
uint32_t
val
;
362
363
outl
(offset, dev_priv->
io_start
+
VMWGFX_INDEX_PORT
);
364
val =
inl
(dev_priv->
io_start
+
VMWGFX_VALUE_PORT
);
365
return
val
;
366
}
367
368
int
vmw_3d_resource_inc
(
struct
vmw_private
*
dev_priv
,
bool
unhide_svga);
369
void
vmw_3d_resource_dec
(
struct
vmw_private
*
dev_priv
,
bool
hide_svga);
370
375
extern
int
vmw_gmr_bind
(
struct
vmw_private
*
dev_priv
,
376
struct
page
*
pages
[],
377
unsigned
long
num_pages
,
378
int
gmr_id);
379
extern
void
vmw_gmr_unbind
(
struct
vmw_private
*
dev_priv
,
int
gmr_id);
380
385
extern
struct
vmw_resource
*
vmw_context_alloc
(
struct
vmw_private
*
dev_priv
);
386
extern
void
vmw_resource_unreference
(
struct
vmw_resource
**p_res);
387
extern
struct
vmw_resource
*
vmw_resource_reference
(
struct
vmw_resource
*
res
);
388
extern
int
vmw_context_destroy_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
389
struct
drm_file *file_priv);
390
extern
int
vmw_context_define_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
391
struct
drm_file *file_priv);
392
extern
int
vmw_context_check
(
struct
vmw_private
*
dev_priv
,
393
struct
ttm_object_file
*tfile,
394
int
id
,
395
struct
vmw_resource
**p_res);
396
extern
int
vmw_user_lookup_handle
(
struct
vmw_private
*
dev_priv
,
397
struct
ttm_object_file
*tfile,
398
uint32_t
handle
,
399
struct
vmw_surface
**out_surf,
400
struct
vmw_dma_buffer
**out_buf);
401
extern
void
vmw_surface_res_free
(
struct
vmw_resource
*
res
);
402
extern
int
vmw_surface_init
(
struct
vmw_private
*
dev_priv
,
403
struct
vmw_surface
*srf,
404
void
(*
res_free
) (
struct
vmw_resource
*
res
));
405
extern
int
vmw_user_surface_lookup_handle
(
struct
vmw_private
*dev_priv,
406
struct
ttm_object_file
*tfile,
407
uint32_t
handle
,
408
struct
vmw_surface
**
out
);
409
extern
int
vmw_surface_destroy_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
410
struct
drm_file *file_priv);
411
extern
int
vmw_surface_define_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
412
struct
drm_file *file_priv);
413
extern
int
vmw_surface_reference_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
414
struct
drm_file *file_priv);
415
extern
int
vmw_surface_check
(
struct
vmw_private
*dev_priv,
416
struct
ttm_object_file
*tfile,
417
uint32_t
handle
,
int
*
id
);
418
extern
int
vmw_surface_validate
(
struct
vmw_private
*dev_priv,
419
struct
vmw_surface
*srf);
420
extern
void
vmw_dmabuf_bo_free
(
struct
ttm_buffer_object
*bo);
421
extern
int
vmw_dmabuf_init
(
struct
vmw_private
*dev_priv,
422
struct
vmw_dma_buffer
*vmw_bo,
423
size_t
size
,
struct
ttm_placement
*placement,
424
bool
interuptable,
425
void
(*bo_free) (
struct
ttm_buffer_object
*bo));
426
extern
int
vmw_dmabuf_alloc_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
427
struct
drm_file *file_priv);
428
extern
int
vmw_dmabuf_unref_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
429
struct
drm_file *file_priv);
430
extern
uint32_t
vmw_dmabuf_validate_node
(
struct
ttm_buffer_object
*bo,
431
uint32_t
cur_validate_node);
432
extern
void
vmw_dmabuf_validate_clear
(
struct
ttm_buffer_object
*bo);
433
extern
int
vmw_user_dmabuf_lookup
(
struct
ttm_object_file
*tfile,
434
uint32_t
id
,
struct
vmw_dma_buffer
**
out
);
435
extern
int
vmw_stream_claim_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
436
struct
drm_file *file_priv);
437
extern
int
vmw_stream_unref_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
438
struct
drm_file *file_priv);
439
extern
int
vmw_user_stream_lookup
(
struct
vmw_private
*dev_priv,
440
struct
ttm_object_file
*tfile,
441
uint32_t
*inout_id,
442
struct
vmw_resource
**
out
);
443
extern
void
vmw_resource_unreserve
(
struct
list_head
*
list
);
444
448
extern
int
vmw_dmabuf_to_placement
(
struct
vmw_private
*vmw_priv,
449
struct
vmw_dma_buffer
*bo,
450
struct
ttm_placement
*placement,
451
bool
interruptible);
452
extern
int
vmw_dmabuf_to_vram
(
struct
vmw_private
*dev_priv,
453
struct
vmw_dma_buffer
*
buf
,
454
bool
pin
,
bool
interruptible);
455
extern
int
vmw_dmabuf_to_vram_or_gmr
(
struct
vmw_private
*dev_priv,
456
struct
vmw_dma_buffer
*
buf
,
457
bool
pin
,
bool
interruptible);
458
extern
int
vmw_dmabuf_to_start_of_vram
(
struct
vmw_private
*vmw_priv,
459
struct
vmw_dma_buffer
*bo,
460
bool
pin
,
bool
interruptible);
461
extern
int
vmw_dmabuf_unpin
(
struct
vmw_private
*vmw_priv,
462
struct
vmw_dma_buffer
*bo,
463
bool
interruptible);
464
extern
void
vmw_bo_get_guest_ptr
(
const
struct
ttm_buffer_object
*
buf
,
465
SVGAGuestPtr
*
ptr
);
466
extern
void
vmw_bo_pin
(
struct
ttm_buffer_object
*bo,
bool
pin
);
467
472
extern
int
vmw_getparam_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
473
struct
drm_file *file_priv);
474
extern
int
vmw_get_cap_3d_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
475
struct
drm_file *file_priv);
476
extern
int
vmw_present_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
477
struct
drm_file *file_priv);
478
extern
int
vmw_present_readback_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
479
struct
drm_file *file_priv);
480
extern
unsigned
int
vmw_fops_poll
(
struct
file
*filp,
481
struct
poll_table_struct
*
wait
);
482
extern
ssize_t
vmw_fops_read
(
struct
file
*filp,
char
__user *
buffer
,
483
size_t
count
, loff_t *
offset
);
484
489
extern
int
vmw_fifo_init
(
struct
vmw_private
*dev_priv,
490
struct
vmw_fifo_state
*
fifo
);
491
extern
void
vmw_fifo_release
(
struct
vmw_private
*dev_priv,
492
struct
vmw_fifo_state
*
fifo
);
493
extern
void
*
vmw_fifo_reserve
(
struct
vmw_private
*dev_priv,
uint32_t
bytes
);
494
extern
void
vmw_fifo_commit
(
struct
vmw_private
*dev_priv,
uint32_t
bytes
);
495
extern
int
vmw_fifo_send_fence
(
struct
vmw_private
*dev_priv,
496
uint32_t
*
seqno
);
497
extern
void
vmw_fifo_ping_host
(
struct
vmw_private
*dev_priv,
uint32_t
reason
);
498
extern
bool
vmw_fifo_have_3d
(
struct
vmw_private
*dev_priv);
499
extern
bool
vmw_fifo_have_pitchlock
(
struct
vmw_private
*dev_priv);
500
extern
int
vmw_fifo_emit_dummy_query
(
struct
vmw_private
*dev_priv,
501
uint32_t
cid
);
502
507
extern
int
vmw_ttm_global_init
(
struct
vmw_private
*dev_priv);
508
extern
void
vmw_ttm_global_release
(
struct
vmw_private
*dev_priv);
509
extern
int
vmw_mmap
(
struct
file
*filp,
struct
vm_area_struct
*vma);
510
515
extern
struct
ttm_placement
vmw_vram_placement
;
516
extern
struct
ttm_placement
vmw_vram_ne_placement
;
517
extern
struct
ttm_placement
vmw_vram_sys_placement
;
518
extern
struct
ttm_placement
vmw_vram_gmr_placement
;
519
extern
struct
ttm_placement
vmw_vram_gmr_ne_placement
;
520
extern
struct
ttm_placement
vmw_sys_placement
;
521
extern
struct
ttm_placement
vmw_evictable_placement
;
522
extern
struct
ttm_placement
vmw_srf_placement
;
523
extern
struct
ttm_bo_driver
vmw_bo_driver
;
524
extern
int
vmw_dma_quiescent
(
struct
drm_device
*
dev
);
525
530
extern
int
vmw_execbuf_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
531
struct
drm_file *file_priv);
532
extern
int
vmw_execbuf_process
(
struct
drm_file *file_priv,
533
struct
vmw_private
*dev_priv,
534
void
__user *user_commands,
535
void
*kernel_commands,
536
uint32_t
command_size,
537
uint64_t
throttle_us,
538
struct
drm_vmw_fence_rep
__user
539
*user_fence_rep,
540
struct
vmw_fence_obj
**out_fence);
541
542
extern
void
543
vmw_execbuf_release_pinned_bo
(
struct
vmw_private
*dev_priv,
544
bool
only_on_cid_match,
uint32_t
cid
);
545
546
extern
int
vmw_execbuf_fence_commands
(
struct
drm_file *file_priv,
547
struct
vmw_private
*dev_priv,
548
struct
vmw_fence_obj
**p_fence,
549
uint32_t
*p_handle);
550
extern
void
vmw_execbuf_copy_fence_user
(
struct
vmw_private
*dev_priv,
551
struct
vmw_fpriv
*vmw_fp,
552
int
ret
,
553
struct
drm_vmw_fence_rep
__user
554
*user_fence_rep,
555
struct
vmw_fence_obj
*fence,
556
uint32_t
fence_handle);
557
562
extern
irqreturn_t
vmw_irq_handler
(
DRM_IRQ_ARGS
);
563
extern
int
vmw_wait_seqno
(
struct
vmw_private
*dev_priv,
bool
lazy,
564
uint32_t
seqno
,
bool
interruptible,
565
unsigned
long
timeout);
566
extern
void
vmw_irq_preinstall
(
struct
drm_device
*
dev
);
567
extern
int
vmw_irq_postinstall
(
struct
drm_device
*
dev
);
568
extern
void
vmw_irq_uninstall
(
struct
drm_device
*
dev
);
569
extern
bool
vmw_seqno_passed
(
struct
vmw_private
*dev_priv,
570
uint32_t
seqno
);
571
extern
int
vmw_fallback_wait
(
struct
vmw_private
*dev_priv,
572
bool
lazy,
573
bool
fifo_idle,
574
uint32_t
seqno
,
575
bool
interruptible,
576
unsigned
long
timeout);
577
extern
void
vmw_update_seqno
(
struct
vmw_private
*dev_priv,
578
struct
vmw_fifo_state
*fifo_state);
579
extern
void
vmw_seqno_waiter_add
(
struct
vmw_private
*dev_priv);
580
extern
void
vmw_seqno_waiter_remove
(
struct
vmw_private
*dev_priv);
581
extern
void
vmw_goal_waiter_add
(
struct
vmw_private
*dev_priv);
582
extern
void
vmw_goal_waiter_remove
(
struct
vmw_private
*dev_priv);
583
589
extern
void
vmw_marker_queue_init
(
struct
vmw_marker_queue
*
queue
);
590
extern
void
vmw_marker_queue_takedown
(
struct
vmw_marker_queue
*
queue
);
591
extern
int
vmw_marker_push
(
struct
vmw_marker_queue
*
queue
,
592
uint32_t
seqno
);
593
extern
int
vmw_marker_pull
(
struct
vmw_marker_queue
*
queue
,
594
uint32_t
signaled_seqno);
595
extern
int
vmw_wait_lag
(
struct
vmw_private
*dev_priv,
596
struct
vmw_marker_queue
*
queue
,
uint32_t
us);
597
602
int
vmw_fb_init
(
struct
vmw_private
*vmw_priv);
603
int
vmw_fb_close
(
struct
vmw_private
*dev_priv);
604
int
vmw_fb_off
(
struct
vmw_private
*vmw_priv);
605
int
vmw_fb_on
(
struct
vmw_private
*vmw_priv);
606
611
int
vmw_kms_init
(
struct
vmw_private
*dev_priv);
612
int
vmw_kms_close
(
struct
vmw_private
*dev_priv);
613
int
vmw_kms_save_vga
(
struct
vmw_private
*vmw_priv);
614
int
vmw_kms_restore_vga
(
struct
vmw_private
*vmw_priv);
615
int
vmw_kms_cursor_bypass_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
616
struct
drm_file *file_priv);
617
void
vmw_kms_cursor_post_execbuf
(
struct
vmw_private
*dev_priv);
618
void
vmw_kms_cursor_snoop
(
struct
vmw_surface
*srf,
619
struct
ttm_object_file
*tfile,
620
struct
ttm_buffer_object
*bo,
621
SVGA3dCmdHeader
*
header
);
622
int
vmw_kms_write_svga
(
struct
vmw_private
*vmw_priv,
623
unsigned
width
,
unsigned
height
,
unsigned
pitch,
624
unsigned
bpp
,
unsigned
depth
);
625
void
vmw_kms_idle_workqueues
(
struct
vmw_master
*vmaster);
626
bool
vmw_kms_validate_mode_vram
(
struct
vmw_private
*dev_priv,
627
uint32_t
pitch,
628
uint32_t
height
);
629
u32
vmw_get_vblank_counter
(
struct
drm_device
*
dev
,
int
crtc
);
630
int
vmw_enable_vblank
(
struct
drm_device
*
dev
,
int
crtc
);
631
void
vmw_disable_vblank
(
struct
drm_device
*
dev
,
int
crtc
);
632
int
vmw_kms_present
(
struct
vmw_private
*dev_priv,
633
struct
drm_file *file_priv,
634
struct
vmw_framebuffer
*vfb,
635
struct
vmw_surface
*surface,
636
uint32_t
sid
,
int32_t
destX,
int32_t
destY,
637
struct
drm_vmw_rect
*clips,
638
uint32_t
num_clips);
639
int
vmw_kms_readback
(
struct
vmw_private
*dev_priv,
640
struct
drm_file *file_priv,
641
struct
vmw_framebuffer
*vfb,
642
struct
drm_vmw_fence_rep
__user *user_fence_rep,
643
struct
drm_vmw_rect
*clips,
644
uint32_t
num_clips);
645
int
vmw_kms_update_layout_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
646
struct
drm_file *file_priv);
647
648
int
vmw_dumb_create
(
struct
drm_file *file_priv,
649
struct
drm_device
*
dev
,
650
struct
drm_mode_create_dumb
*args);
651
652
int
vmw_dumb_map_offset
(
struct
drm_file *file_priv,
653
struct
drm_device
*
dev
,
uint32_t
handle
,
654
uint64_t
*
offset
);
655
int
vmw_dumb_destroy
(
struct
drm_file *file_priv,
656
struct
drm_device
*
dev
,
657
uint32_t
handle
);
662
int
vmw_overlay_init
(
struct
vmw_private
*dev_priv);
663
int
vmw_overlay_close
(
struct
vmw_private
*dev_priv);
664
int
vmw_overlay_ioctl
(
struct
drm_device
*
dev
,
void
*
data
,
665
struct
drm_file *file_priv);
666
int
vmw_overlay_stop_all
(
struct
vmw_private
*dev_priv);
667
int
vmw_overlay_resume_all
(
struct
vmw_private
*dev_priv);
668
int
vmw_overlay_pause_all
(
struct
vmw_private
*dev_priv);
669
int
vmw_overlay_claim
(
struct
vmw_private
*dev_priv,
uint32_t
*
out
);
670
int
vmw_overlay_unref
(
struct
vmw_private
*dev_priv,
uint32_t
stream_id);
671
int
vmw_overlay_num_overlays
(
struct
vmw_private
*dev_priv);
672
int
vmw_overlay_num_free_overlays
(
struct
vmw_private
*dev_priv);
673
678
extern
const
struct
ttm_mem_type_manager_func
vmw_gmrid_manager_func
;
679
684
static
inline
void
vmw_surface_unreference(
struct
vmw_surface
**srf)
685
{
686
struct
vmw_surface
*tmp_srf = *srf;
687
struct
vmw_resource
*
res
= &tmp_srf->
res
;
688
*srf =
NULL
;
689
690
vmw_resource_unreference
(&res);
691
}
692
693
static
inline
struct
vmw_surface
*vmw_surface_reference(
struct
vmw_surface
*srf)
694
{
695
(
void
)
vmw_resource_reference
(&srf->
res
);
696
return
srf;
697
}
698
699
static
inline
void
vmw_dmabuf_unreference(
struct
vmw_dma_buffer
**
buf
)
700
{
701
struct
vmw_dma_buffer
*tmp_buf = *
buf
;
702
struct
ttm_buffer_object
*bo = &tmp_buf->
base
;
703
*buf =
NULL
;
704
705
ttm_bo_unref
(&bo);
706
}
707
708
static
inline
struct
vmw_dma_buffer
*vmw_dmabuf_reference(
struct
vmw_dma_buffer
*
buf
)
709
{
710
if
(ttm_bo_reference(&buf->
base
))
711
return
buf
;
712
return
NULL
;
713
}
714
715
static
inline
struct
ttm_mem_global
*vmw_mem_glob(
struct
vmw_private
*dev_priv)
716
{
717
return
(
struct
ttm_mem_global
*) dev_priv->
mem_global_ref
.object;
718
}
719
#endif
Generated on Thu Jan 10 2013 13:33:51 for Linux Kernel by
1.8.2