41 struct drm_file * file_priv,
65 if (radeon_check_offset(dev_priv, off))
73 radeon_priv = file_priv->driver_priv;
82 if (radeon_check_offset(dev_priv, off)) {
83 DRM_DEBUG(
"offset fixed up to 0x%x\n", (
unsigned int)off);
92 struct drm_file *file_priv,
99 data = drm_buffer_pointer_to_dword(buf,
102 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
103 DRM_ERROR(
"Invalid depth buffer offset\n");
110 data = drm_buffer_pointer_to_dword(buf,
113 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
114 DRM_ERROR(
"Invalid colour buffer offset\n");
125 data = drm_buffer_pointer_to_dword(buf, 0);
126 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
127 DRM_ERROR(
"Invalid R200 texture offset\n");
135 data = drm_buffer_pointer_to_dword(buf,
137 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
138 DRM_ERROR(
"Invalid R100 texture offset\n");
150 for (i = 0; i < 5; i++) {
151 data = drm_buffer_pointer_to_dword(buf, i);
152 if (radeon_check_and_fixup_offset(dev_priv,
156 (
"Invalid R200 cubic texture offset\n");
167 for (i = 0; i < 5; i++) {
168 data = drm_buffer_pointer_to_dword(buf, i);
169 if (radeon_check_and_fixup_offset(dev_priv,
173 (
"Invalid R100 cubic texture offset\n");
266 DRM_ERROR(
"Unknown state packet ID %d\n",
id);
275 struct drm_file *file_priv,
280 u32 *
cmd = drm_buffer_pointer_to_dword(cmdbuf->
buffer, 0);
288 DRM_ERROR(
"Not a type 3 packet\n");
292 if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->
buffer)) {
293 DRM_ERROR(
"Packet size larger than size of data provided\n");
297 switch (*cmd & 0xff00) {
318 DRM_ERROR(
"Invalid 3d packet for r100-class chip\n");
326 DRM_ERROR(
"Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
332 cmd = drm_buffer_pointer_to_dword(cmdbuf->
buffer, 1);
334 narrays = *cmd & ~0xc000;
337 while ((k < narrays) && (i < (count + 2))) {
339 cmd = drm_buffer_pointer_to_dword(cmdbuf->
buffer, i);
340 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
343 (
"Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
352 cmd = drm_buffer_pointer_to_dword(cmdbuf->
buffer, i);
354 if (radeon_check_and_fixup_offset(dev_priv,
358 (
"Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
366 if ((k != narrays) || (i != (count + 2))) {
368 (
"Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
369 k, i, narrays, count + 1);
376 DRM_ERROR(
"Invalid 3d packet for r200-class chip\n");
380 cmd = drm_buffer_pointer_to_dword(cmdbuf->
buffer, 1);
381 if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
382 DRM_ERROR(
"Invalid rndr_gen_indx offset\n");
389 DRM_ERROR(
"Invalid 3d packet for r100-class chip\n");
393 cmd = drm_buffer_pointer_to_dword(cmdbuf->
buffer, 1);
394 if ((*cmd & 0x8000ffff) != 0x80000810) {
395 DRM_ERROR(
"Invalid indx_buffer reg address %08X\n", *cmd);
398 cmd = drm_buffer_pointer_to_dword(cmdbuf->
buffer, 2);
399 if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
400 DRM_ERROR(
"Invalid indx_buffer offset is %08X\n", *cmd);
409 cmd = drm_buffer_pointer_to_dword(cmdbuf->
buffer, 1);
412 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->
buffer, 2);
413 offset = *cmd2 << 10;
414 if (radeon_check_and_fixup_offset
415 (dev_priv, file_priv, &offset)) {
416 DRM_ERROR(
"Invalid first packet offset\n");
419 *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
424 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->
buffer, 3);
425 offset = *cmd3 << 10;
426 if (radeon_check_and_fixup_offset
427 (dev_priv, file_priv, &offset)) {
428 DRM_ERROR(
"Invalid second packet offset\n");
431 *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
436 DRM_ERROR(
"Invalid packet type %x\n", *cmd & 0xff00);
452 DRM_DEBUG(
" box: x1=%d y1=%d x2=%d y2=%d\n",
453 box->
x1, box->
y1, box->
x2, box->
y2);
466 struct drm_file *file_priv,
472 DRM_DEBUG(
"dirty=0x%08x\n", dirty);
475 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
477 DRM_ERROR(
"Invalid depth buffer offset\n");
481 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
483 DRM_ERROR(
"Invalid depth buffer offset\n");
570 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
571 &tex[0].pp_txoffset)) {
572 DRM_ERROR(
"Invalid texture offset for unit 0\n");
590 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
591 &tex[1].pp_txoffset)) {
592 DRM_ERROR(
"Invalid texture offset for unit 1\n");
610 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
611 &tex[2].pp_txoffset)) {
612 DRM_ERROR(
"Invalid texture offset for unit 2\n");
635 struct drm_file *file_priv,
648 return radeon_emit_state(dev_priv, file_priv, &state->
context,
682 "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
712 "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
724 "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
767 int x,
int y,
int w,
int h,
int r,
int g,
int b)
777 color = (((r & 0xf8) << 8) |
778 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
782 color = (((0xff) << 24) | (r << 16) | (g << 8) |
b);
820 if (dev_priv->
stats.last_frame_reads > 1 ||
821 dev_priv->
stats.last_clear_reads > dev_priv->
stats.clears) {
825 if (dev_priv->
stats.freelist_loops) {
832 radeon_clear_box(dev_priv, master_priv, 4, 4, 8, 8, 255, 0, 255);
837 radeon_clear_box(dev_priv, master_priv, 16, 4, 8, 8, 255, 0, 0);
845 radeon_clear_box(dev_priv, master_priv, 40, 4, 8, 8, 255, 255, 0);
850 radeon_clear_box(dev_priv, master_priv, 64, 4, 8, 8, 0, 255, 0);
855 if (dev_priv->
stats.requested_bufs) {
856 if (dev_priv->
stats.requested_bufs > 100)
857 dev_priv->
stats.requested_bufs = 100;
859 radeon_clear_box(dev_priv, master_priv, 4, 16,
860 dev_priv->
stats.requested_bufs, 4,
872 static void radeon_cp_dispatch_clear(
struct drm_device *
dev,
873 struct drm_master *master,
881 int nbox = sarea_priv->
nbox;
884 u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
887 DRM_DEBUG(
"flags = 0x%x\n", flags);
889 dev_priv->
stats.clears++;
902 printk_once(
KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
925 for (i = 0; i < nbox; i++) {
928 int w = pbox[
i].
x2 -
x;
929 int h = pbox[
i].
y2 -
y;
931 DRM_DEBUG(
"%d,%d-%d,%d flags 0x%x\n",
934 if (flags & RADEON_FRONT) {
986 int depthpixperline =
1015 clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
1036 tempRB3D_DEPTHCLEARVALUE);
1044 for (i = 0; i < nbox; i++) {
1045 int tileoffset, nrtilesx, nrtilesy,
j;
1057 ((pbox[
i].
y1 >> 3) * depthpixperline +
1060 ((pbox[
i].
x2 & ~63) -
1061 (pbox[i].x1 & ~63)) >> 4;
1063 (pbox[
i].
y2 >> 3) - (pbox[i].
y1 >> 3);
1064 for (j = 0; j <= nrtilesy; j++) {
1075 tileoffset += depthpixperline >> 6;
1081 ((pbox[
i].
y1 >> 3) * depthpixperline +
1084 (pbox[
i].
x2 >> 5) - (pbox[i].x1 >> 5);
1086 (pbox[
i].
y2 >> 3) - (pbox[i].
y1 >> 3);
1087 for (j = 0; j <= nrtilesy; j++) {
1102 tileoffset += depthpixperline >> 5;
1108 ((pbox[
i].
y1 >> 4) * depthpixperline +
1111 ((pbox[
i].
x2 & ~63) -
1112 (pbox[i].x1 & ~63)) >> 4;
1114 (pbox[
i].
y2 >> 4) - (pbox[i].
y1 >> 4);
1115 for (j = 0; j <= nrtilesy; j++) {
1125 tileoffset += depthpixperline >> 6;
1143 OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
1158 int tempRB3D_ZSTENCILCNTL;
1159 int tempRB3D_STENCILREFMASK;
1160 int tempRB3D_PLANEMASK;
1162 int tempSE_VTE_CNTL;
1163 int tempSE_VTX_FMT_0;
1164 int tempSE_VTX_FMT_1;
1165 int tempSE_VAP_CNTL;
1166 int tempRE_AUX_SCISSOR_CNTL;
1174 tempRB3D_STENCILREFMASK = 0x0;
1176 tempSE_CNTL = depth_clear->
se_cntl;
1184 tempRB3D_PLANEMASK = 0x0;
1186 tempRE_AUX_SCISSOR_CNTL = 0x0;
1195 tempSE_VTX_FMT_1 = 0x0;
1216 tempRB3D_STENCILREFMASK = 0x00000000;
1223 if (flags & RADEON_USE_HIERZ) {
1235 tempRB3D_STENCILREFMASK);
1249 for (i = 0; i < nbox; i++) {
1254 radeon_emit_clip_rect(dev_priv, &sarea_priv->
boxes[i]);
1275 }
else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
1281 if (flags & RADEON_DEPTH) {
1287 if (flags & RADEON_STENCIL) {
1292 rb3d_stencilrefmask = 0x00000000;
1295 if (flags & RADEON_USE_COMP_ZBUF) {
1299 if (flags & RADEON_USE_HIERZ) {
1320 for (i = 0; i < nbox; i++) {
1325 radeon_emit_clip_rect(dev_priv, &sarea_priv->
boxes[i]);
1371 static void radeon_cp_dispatch_swap(
struct drm_device *dev,
struct drm_master *master)
1376 int nbox = sarea_priv->
nbox;
1385 radeon_cp_performance_boxes(dev_priv, master_priv);
1396 for (i = 0; i < nbox; i++) {
1399 int w = pbox[
i].
x2 -
x;
1400 int h = pbox[
i].
y2 -
y;
1402 DRM_DEBUG(
"%d,%d-%d,%d\n", x, y, w, h);
1407 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1457 DRM_DEBUG(
"pfCurrentPage=%d\n",
1464 radeon_cp_performance_boxes(dev_priv, master_priv);
1496 static int bad_prim_vertex_nr(
int primitive,
int nr)
1503 return (nr & 1) || nr == 0;
1510 return nr % 3 || nr == 0;
1528 static void radeon_cp_dispatch_vertex(
struct drm_device * dev,
1529 struct drm_file *file_priv,
1530 struct drm_buf * buf,
1538 int nbox = sarea_priv->
nbox;
1542 DRM_DEBUG(
"hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
1547 DRM_ERROR(
"bad prim %x numverts %d\n",
1555 radeon_emit_clip_rect(dev_priv, &sarea_priv->
boxes[i]);
1600 static void radeon_cp_dispatch_indirect(
struct drm_device * dev,
1601 struct drm_buf * buf,
int start,
int end)
1605 DRM_DEBUG(
"buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
1609 + buf->offset +
start);
1610 int dwords = (end - start + 3) /
sizeof(
u32);
1618 ((
char *)dev->agp_buffer_map->handle
1619 + buf->offset +
start);
1634 static void radeon_cp_dispatch_indices(
struct drm_device *dev,
1635 struct drm_master *master,
1636 struct drm_buf * elt_buf,
1648 int nbox = sarea_priv->
nbox;
1650 DRM_DEBUG(
"hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
1655 if (bad_prim_vertex_nr(prim->
prim, count)) {
1656 DRM_ERROR(
"bad prim %x count %d\n", prim->
prim, count);
1660 if (start >= prim->
finish || (prim->
start & 0x7)) {
1661 DRM_ERROR(
"buffer prim %d\n", prim->
prim);
1667 data = (
u32 *) ((
char *)dev->agp_buffer_map->handle +
1668 elt_buf->offset + prim->
start);
1674 data[4] = (prim->
prim |
1682 radeon_emit_clip_rect(dev_priv, &sarea_priv->
boxes[i]);
1684 radeon_cp_dispatch_indirect(dev, elt_buf,
1692 #define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
1694 static int radeon_cp_dispatch_texture(
struct drm_device * dev,
1695 struct drm_file *file_priv,
1700 struct drm_buf *
buf;
1704 int size, dwords, tex_width, blit_width, spitch;
1707 u32 texpitch, microtile;
1711 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->
offset)) {
1712 DRM_ERROR(
"Invalid destination offset\n");
1735 tex_width = tex->
width * 4;
1736 blit_width = image->
width * 4;
1745 tex_width = tex->
width * 2;
1746 blit_width = image->
width * 2;
1751 tex_width = tex->
width * 1;
1752 blit_width = image->
width * 1;
1755 DRM_ERROR(
"invalid texture format %d\n", tex->
format);
1758 spitch = blit_width >> 6;
1759 if (spitch == 0 && image->
height > 1)
1762 texpitch = tex->
pitch;
1765 if (tex_width < 64) {
1774 if (!radeon_check_offset(dev_priv, tex->
offset + image->
height *
1776 DRM_ERROR(
"Invalid final destination offset\n");
1780 DRM_DEBUG(
"tex=%dx%d blit=%d\n", tex_width, tex->
height, blit_width);
1783 DRM_DEBUG(
"tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
1793 size = height * blit_width;
1797 size = height * blit_width;
1798 }
else if (size < 4 && size > 0) {
1800 }
else if (size == 0) {
1810 DRM_DEBUG(
"EAGAIN\n");
1819 (
u32 *) ((
char *)dev->agp_buffer_map->handle + buf->offset);
1822 #define RADEON_COPY_MT(_buf, _data, _width) \
1824 if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
1825 DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
1840 if (tex_width >= 64 || tex_width <= 16) {
1842 (
int)(tex_width *
sizeof(
u32)));
1843 }
else if (tex_width == 32) {
1848 }
else if (tex_width >= 64 || tex_width == 16) {
1850 (
int)(dwords *
sizeof(
u32)));
1851 }
else if (tex_width < 16) {
1852 for (i = 0; i < tex->
height; i++) {
1857 }
else if (tex_width == 32) {
1860 for (i = 0; i < tex->
height; i += 2) {
1873 if (tex_width >= 32) {
1878 (
int)(dwords *
sizeof(
u32)));
1884 for (i = 0; i < tex->
height; i++) {
1892 #undef RADEON_COPY_MT
1893 byte_offset = (image->
y & ~2047) * blit_width;
1894 buf->file_priv = file_priv;
1899 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1907 OUT_RING((spitch << 22) | (offset >> 10));
1908 OUT_RING((texpitch << 22) | ((tex->
offset >> 10) + (byte_offset >> 10)));
1910 OUT_RING((image->
x << 16) | (image->
y % 2048));
1922 }
while (image->
height > 0);
1937 static void radeon_cp_dispatch_stipple(
struct drm_device * dev,
u32 * stipple)
1950 for (i = 0; i < 32; i++) {
1957 static void radeon_apply_surface_regs(
int surf_index,
1960 if (!dev_priv->
mmio)
1966 dev_priv->
surfaces[surf_index].flags);
1968 dev_priv->
surfaces[surf_index].lower);
1970 dev_priv->
surfaces[surf_index].upper);
1986 struct drm_file *file_priv)
1990 int virt_surface_index;
1993 new_lower =
new->address;
1994 new_upper = new_lower +
new->size - 1;
1997 if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
1999 RADEON_SURF_ADDRESS_FIXED_MASK)
2000 || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
2005 if ((dev_priv->
surfaces[i].refcount != 0) &&
2006 (((new_lower >= dev_priv->
surfaces[i].lower) &&
2007 (new_lower < dev_priv->surfaces[i].upper)) ||
2008 ((new_lower < dev_priv->surfaces[i].lower) &&
2009 (new_upper > dev_priv->
surfaces[i].lower)))) {
2018 if (i == 2 * RADEON_MAX_SURFACES) {
2021 virt_surface_index =
i;
2026 if ((dev_priv->
surfaces[i].refcount == 1) &&
2027 (new->flags == dev_priv->
surfaces[i].flags) &&
2028 (new_upper + 1 == dev_priv->
surfaces[i].lower)) {
2031 s->
lower = new_lower;
2032 s->
upper = new_upper;
2033 s->
flags =
new->flags;
2038 return virt_surface_index;
2042 if ((dev_priv->
surfaces[i].refcount == 1) &&
2043 (new->flags == dev_priv->
surfaces[i].flags) &&
2044 (new_lower == dev_priv->
surfaces[i].upper + 1)) {
2047 s->
lower = new_lower;
2048 s->
upper = new_upper;
2049 s->
flags =
new->flags;
2054 return virt_surface_index;
2060 if (dev_priv->
surfaces[i].refcount == 0) {
2063 s->
lower = new_lower;
2064 s->
upper = new_upper;
2065 s->
flags =
new->flags;
2072 return virt_surface_index;
2080 static int free_surface(
struct drm_file *file_priv,
2117 static void radeon_surfaces_release(
struct drm_file *file_priv,
2123 free_surface(file_priv, dev_priv,
2131 static int radeon_surface_alloc(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
2136 if (alloc_surface(alloc, dev_priv, file_priv) == -1)
2142 static int radeon_surface_free(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
2147 if (free_surface(file_priv, dev_priv, memfree->
address))
2153 static int radeon_cp_clear(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
2162 LOCK_TEST_WITH_RETURN(dev, file_priv);
2170 sarea_priv->
nbox *
sizeof(depth_boxes[0])))
2173 radeon_cp_dispatch_clear(dev, file_priv->master, clear, depth_boxes);
2181 static int radeon_do_init_pageflip(
struct drm_device *dev,
struct drm_master *master)
2210 static int radeon_cp_flip(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
2215 LOCK_TEST_WITH_RETURN(dev, file_priv);
2220 radeon_do_init_pageflip(dev, file_priv->master);
2228 static int radeon_cp_swap(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
2236 LOCK_TEST_WITH_RETURN(dev, file_priv);
2246 radeon_cp_dispatch_swap(dev, file_priv->master);
2253 static int radeon_cp_vertex(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
2258 struct drm_device_dma *
dma = dev->dma;
2259 struct drm_buf *
buf;
2263 LOCK_TEST_WITH_RETURN(dev, file_priv);
2267 DRM_DEBUG(
"pid=%d index=%d count=%d discard=%d\n",
2270 if (vertex->
idx < 0 || vertex->
idx >= dma->buf_count) {
2271 DRM_ERROR(
"buffer index %d (of %d max)\n",
2272 vertex->
idx, dma->buf_count - 1);
2276 DRM_ERROR(
"buffer prim %d\n", vertex->
prim);
2283 buf = dma->buflist[vertex->
idx];
2285 if (buf->file_priv != file_priv) {
2286 DRM_ERROR(
"process %d using buffer owned by %p\n",
2291 DRM_ERROR(
"sending pending buffer %d\n", vertex->
idx);
2297 if (vertex->
count) {
2298 buf->used = vertex->
count;
2301 if (radeon_emit_state(dev_priv, file_priv,
2304 sarea_priv->
dirty)) {
2305 DRM_ERROR(
"radeon_emit_state failed\n");
2321 radeon_cp_dispatch_vertex(dev, file_priv, buf, &prim);
2332 static int radeon_cp_indices(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
2337 struct drm_device_dma *dma = dev->dma;
2338 struct drm_buf *
buf;
2343 LOCK_TEST_WITH_RETURN(dev, file_priv);
2347 DRM_DEBUG(
"pid=%d index=%d start=%d end=%d discard=%d\n",
2351 if (elts->
idx < 0 || elts->
idx >= dma->buf_count) {
2352 DRM_ERROR(
"buffer index %d (of %d max)\n",
2353 elts->
idx, dma->buf_count - 1);
2357 DRM_ERROR(
"buffer prim %d\n", elts->
prim);
2364 buf = dma->buflist[elts->
idx];
2366 if (buf->file_priv != file_priv) {
2367 DRM_ERROR(
"process %d using buffer owned by %p\n",
2372 DRM_ERROR(
"sending pending buffer %d\n", elts->
idx);
2379 if (elts->
start & 0x7) {
2380 DRM_ERROR(
"misaligned buffer 0x%x\n", elts->
start);
2383 if (elts->
start < buf->used) {
2384 DRM_ERROR(
"no header 0x%x - 0x%x\n", elts->
start, buf->used);
2388 buf->used = elts->
end;
2391 if (radeon_emit_state(dev_priv, file_priv,
2394 sarea_priv->
dirty)) {
2395 DRM_ERROR(
"radeon_emit_state failed\n");
2414 radeon_cp_dispatch_indices(dev, file_priv->master, buf, &prim);
2423 static int radeon_cp_texture(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
2430 LOCK_TEST_WITH_RETURN(dev, file_priv);
2433 DRM_ERROR(
"null texture image!\n");
2448 ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
2453 static int radeon_cp_stipple(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
2459 LOCK_TEST_WITH_RETURN(dev, file_priv);
2466 radeon_cp_dispatch_stipple(dev, mask);
2472 static int radeon_cp_indirect(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
2475 struct drm_device_dma *dma = dev->dma;
2476 struct drm_buf *
buf;
2480 LOCK_TEST_WITH_RETURN(dev, file_priv);
2482 DRM_DEBUG(
"idx=%d s=%d e=%d d=%d\n",
2486 if (indirect->
idx < 0 || indirect->
idx >= dma->buf_count) {
2487 DRM_ERROR(
"buffer index %d (of %d max)\n",
2488 indirect->
idx, dma->buf_count - 1);
2492 buf = dma->buflist[indirect->
idx];
2494 if (buf->file_priv != file_priv) {
2495 DRM_ERROR(
"process %d using buffer owned by %p\n",
2500 DRM_ERROR(
"sending pending buffer %d\n", indirect->
idx);
2504 if (indirect->
start < buf->used) {
2505 DRM_ERROR(
"reusing indirect: start=0x%x actual=0x%x\n",
2506 indirect->
start, buf->used);
2513 buf->used = indirect->
end;
2528 radeon_cp_dispatch_indirect(dev, buf, indirect->
start, indirect->
end);
2539 static int radeon_cp_vertex2(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
2544 struct drm_device_dma *dma = dev->dma;
2545 struct drm_buf *
buf;
2548 unsigned char laststate;
2550 LOCK_TEST_WITH_RETURN(dev, file_priv);
2554 DRM_DEBUG(
"pid=%d index=%d discard=%d\n",
2557 if (vertex->
idx < 0 || vertex->
idx >= dma->buf_count) {
2558 DRM_ERROR(
"buffer index %d (of %d max)\n",
2559 vertex->
idx, dma->buf_count - 1);
2566 buf = dma->buflist[vertex->
idx];
2568 if (buf->file_priv != file_priv) {
2569 DRM_ERROR(
"process %d using buffer owned by %p\n",
2575 DRM_ERROR(
"sending pending buffer %d\n", vertex->
idx);
2582 for (laststate = 0xff, i = 0; i < vertex->
nr_prims; i++) {
2597 if (radeon_emit_state2(dev_priv, file_priv, &state)) {
2598 DRM_ERROR(
"radeon_emit_state2 failed\n");
2614 radeon_cp_dispatch_indices(dev, file_priv->master, buf, &tclprim);
2619 radeon_cp_dispatch_vertex(dev, file_priv, buf, &tclprim);
2622 if (sarea_priv->
nbox == 1)
2623 sarea_priv->
nbox = 0;
2635 struct drm_file *file_priv,
2649 if (sz *
sizeof(
u32) > drm_buffer_unprocessed(cmdbuf->
buffer)) {
2650 DRM_ERROR(
"Packet size provided larger than data provided\n");
2654 if (radeon_check_and_fixup_packets(dev_priv, file_priv,
id,
2656 DRM_ERROR(
"Packet verification failed\n");
2672 int sz = header.
scalars.count;
2673 int start = header.
scalars.offset;
2674 int stride = header.
scalars.stride;
2692 int sz = header.
scalars.count;
2693 int start = ((
unsigned int)header.
scalars.offset) + 0x100;
2694 int stride = header.
scalars.stride;
2710 int sz = header.
vectors.count;
2711 int start = header.
vectors.offset;
2712 int stride = header.
vectors.stride;
2736 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->
buffer))
2750 static int radeon_emit_packet3(
struct drm_device * dev,
2751 struct drm_file *file_priv,
2761 if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
2763 DRM_ERROR(
"Packet verification failed\n");
2774 static int radeon_emit_packet3_cliprect(
struct drm_device *dev,
2775 struct drm_file *file_priv,
2789 if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
2791 DRM_ERROR(
"Packet verification failed\n");
2799 if (i < cmdbuf->nbox) {
2819 radeon_emit_clip_rect(dev_priv, &box);
2826 }
while (++i < cmdbuf->nbox);
2827 if (cmdbuf->
nbox == 1)
2832 drm_buffer_advance(cmdbuf->
buffer, cmdsz * 4);
2836 static int radeon_emit_wait(
struct drm_device * dev,
int flags)
2841 DRM_DEBUG(
"%x\n", flags);
2865 static int radeon_cp_cmdbuf(
struct drm_device *dev,
void *data,
2866 struct drm_file *file_priv)
2869 struct drm_device_dma *dma = dev->dma;
2870 struct drm_buf *buf =
NULL;
2876 LOCK_TEST_WITH_RETURN(dev, file_priv);
2881 if (cmdbuf->
bufsz > 64 * 1024 || cmdbuf->
bufsz < 0) {
2889 if (cmdbuf->
bufsz != 0) {
2904 orig_nbox = cmdbuf->
nbox;
2916 while (drm_buffer_unprocessed(cmdbuf->
buffer) >=
sizeof(stack_header)) {
2920 sizeof(stack_header), &stack_header);
2922 switch (header->
header.cmd_type) {
2924 DRM_DEBUG(
"RADEON_CMD_PACKET\n");
2925 if (radeon_emit_packets
2926 (dev_priv, file_priv, *header, cmdbuf)) {
2927 DRM_ERROR(
"radeon_emit_packets failed\n");
2933 DRM_DEBUG(
"RADEON_CMD_SCALARS\n");
2934 if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
2935 DRM_ERROR(
"radeon_emit_scalars failed\n");
2941 DRM_DEBUG(
"RADEON_CMD_VECTORS\n");
2942 if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
2943 DRM_ERROR(
"radeon_emit_vectors failed\n");
2949 DRM_DEBUG(
"RADEON_CMD_DMA_DISCARD\n");
2950 idx = header->
dma.buf_idx;
2951 if (idx < 0 || idx >= dma->buf_count) {
2952 DRM_ERROR(
"buffer index %d (of %d max)\n",
2953 idx, dma->buf_count - 1);
2957 buf = dma->buflist[
idx];
2958 if (buf->file_priv != file_priv || buf->pending) {
2959 DRM_ERROR(
"bad buffer %p %p %d\n",
2960 buf->file_priv, file_priv,
2969 DRM_DEBUG(
"RADEON_CMD_PACKET3\n");
2970 if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
2971 DRM_ERROR(
"radeon_emit_packet3 failed\n");
2977 DRM_DEBUG(
"RADEON_CMD_PACKET3_CLIP\n");
2978 if (radeon_emit_packet3_cliprect
2979 (dev, file_priv, cmdbuf, orig_nbox)) {
2980 DRM_ERROR(
"radeon_emit_packet3_clip failed\n");
2986 DRM_DEBUG(
"RADEON_CMD_SCALARS2\n");
2987 if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
2988 DRM_ERROR(
"radeon_emit_scalars2 failed\n");
2994 DRM_DEBUG(
"RADEON_CMD_WAIT\n");
2995 if (radeon_emit_wait(dev, header->
wait.flags)) {
2996 DRM_ERROR(
"radeon_emit_wait failed\n");
3001 DRM_DEBUG(
"RADEON_CMD_VECLINEAR\n");
3002 if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
3003 DRM_ERROR(
"radeon_emit_veclinear failed\n");
3009 DRM_ERROR(
"bad cmd_type %d at byte %d\n",
3011 cmdbuf->
buffer->iterator);
3019 DRM_DEBUG(
"DONE\n");
3028 static int radeon_cp_getparam(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
3036 switch (param->
param) {
3041 dev_priv->
stats.last_frame_reads++;
3048 dev_priv->
stats.last_clear_reads++;
3055 value = drm_dev_to_irq(dev);
3061 value = dev_priv->
mmio->offset;
3066 #if BITS_PER_LONG == 32
3113 DRM_DEBUG(
"Invalid parameter %d\n", param->
param);
3118 DRM_ERROR(
"copy_to_user\n");
3125 static int radeon_cp_setparam(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
3132 switch (sp->
param) {
3134 radeon_priv = file_priv->driver_priv;
3139 if (sp->
value == 0) {
3140 DRM_DEBUG(
"color tiling disabled\n");
3145 }
else if (sp->
value == 1) {
3146 DRM_DEBUG(
"color tiling enabled\n");
3169 DRM_DEBUG(
"Invalid parameter %d\n", sp->
param);
3185 if (dev->dev_private) {
3190 radeon_surfaces_release(file_priv, dev_priv);
3211 file_priv->driver_priv = radeon_priv;
3223 file_priv->driver_priv;
3229 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT,
radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3230 DRM_IOCTL_DEF_DRV(RADEON_CP_START,
radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3231 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP,
radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3232 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET,
radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3237 DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
3238 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
3239 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
3240 DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
3241 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
3242 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
3243 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3244 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
3245 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
3246 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
3247 DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
3253 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
3254 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
3255 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),