31 #define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000
32 #define SAVAGE_EVENT_USEC_TIMEOUT 5000000
33 #define SAVAGE_FREELIST_DEBUG 0
47 DRM_ERROR(
"Trying to emit %d words "
48 "(more than guaranteed space in COB)\n", n);
54 if ((status & mask) < threshold)
60 DRM_ERROR(
"failed!\n");
61 DRM_INFO(
" status=0x%08x, threshold=0x%08x\n", status, threshold);
81 DRM_ERROR(
"failed!\n");
82 DRM_INFO(
" status=0x%08x\n", status);
102 DRM_ERROR(
"failed!\n");
103 DRM_INFO(
" status=0x%08x\n", status);
128 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
129 (status & 0xffff) == 0)
135 DRM_ERROR(
"failed!\n");
136 DRM_INFO(
" status=0x%08x, e=0x%04x\n", status, e);
150 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
151 (status & 0xffff) == 0)
157 DRM_ERROR(
"failed!\n");
158 DRM_INFO(
" status=0x%08x, e=0x%04x\n", status, e);
178 count = (count + 1) & 0xffff;
209 struct drm_device_dma *
dma = dev->dma;
213 DRM_DEBUG(
"count=%d\n", dma->buf_count);
223 for (i = 0; i < dma->buf_count; i++) {
224 buf = dma->buflist[
i];
225 entry = buf->dev_private;
239 static struct drm_buf *savage_freelist_get(
struct drm_device * dev)
257 DRM_DEBUG(
" head=0x%04x %d\n", event, wrap);
259 if (tail->
buf && (
TEST_AGE(&tail->
age, event, wrap) || event == 0)) {
268 DRM_DEBUG(
"returning NULL, tail->buf=%p!\n", tail->
buf);
280 DRM_ERROR(
"entry already on freelist.\n");
322 unsigned int wrap,
i;
358 DRM_ERROR(
"wait_evnt failed!\n");
372 DRM_DEBUG(
"cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
375 if (cur + nr_pages < dev_priv->nr_dma_pages) {
395 for (i = cur; nr_pages > 0; ++
i, --nr_pages) {
398 DRM_ERROR(
"unflushed page %u: used=%u\n",
410 DRM_DEBUG(
"cur=%u, cur->used=%u, n=%u\n",
436 DRM_DEBUG(
"first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
437 "pad=%u, align=%u\n",
455 phys_addr = dev_priv->
cmd_dma->offset +
461 DRM_DEBUG(
"phys_addr=%lx, len=%u\n",
462 phys_addr | dev_priv->
dma_type, len);
475 for (i = first; i <
cur; ++
i) {
496 DRM_DEBUG(
"first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
510 DRM_DEBUG(
"first=%u, cur=%u, cur->used=%u\n",
515 i <= dev_priv->current_dma_page && dev_priv->
dma_pages[i].
used;
521 if (i < dev_priv->current_dma_page &&
523 DRM_ERROR(
"partial DMA page %u: used=%u",
543 if (dev_priv ==
NULL)
546 dev->dev_private = (
void *)dev_priv;
565 unsigned long mmio_base, fb_base, fb_size, aperture_base;
570 unsigned int fb_rsrc, aper_rsrc;
573 dev_priv->
mtrr[0].handle = -1;
574 dev_priv->
mtrr[1].handle = -1;
575 dev_priv->
mtrr[2].handle = -1;
587 dev_priv->
mtrr[0].base = fb_base;
588 dev_priv->
mtrr[0].size = 0x01000000;
589 dev_priv->
mtrr[0].handle =
590 drm_mtrr_add(dev_priv->
mtrr[0].base,
591 dev_priv->
mtrr[0].size, DRM_MTRR_WC);
592 dev_priv->
mtrr[1].base = fb_base + 0x02000000;
593 dev_priv->
mtrr[1].size = 0x02000000;
594 dev_priv->
mtrr[1].handle =
595 drm_mtrr_add(dev_priv->
mtrr[1].base,
596 dev_priv->
mtrr[1].size, DRM_MTRR_WC);
597 dev_priv->
mtrr[2].base = fb_base + 0x04000000;
598 dev_priv->
mtrr[2].size = 0x04000000;
599 dev_priv->
mtrr[2].handle =
600 drm_mtrr_add(dev_priv->
mtrr[2].base,
601 dev_priv->
mtrr[2].size, DRM_MTRR_WC);
603 DRM_ERROR(
"strange pci_resource_len %08llx\n",
619 dev_priv->
mtrr[0].base = fb_base;
620 dev_priv->
mtrr[0].size = 0x08000000;
621 dev_priv->
mtrr[0].handle =
622 drm_mtrr_add(dev_priv->
mtrr[0].base,
623 dev_priv->
mtrr[0].size, DRM_MTRR_WC);
625 DRM_ERROR(
"strange pci_resource_len %08llx\n",
663 for (i = 0; i < 3; ++
i)
664 if (dev_priv->
mtrr[i].handle >= 0)
665 drm_mtrr_del(dev_priv->
mtrr[i].handle,
666 dev_priv->
mtrr[i].base,
667 dev_priv->
mtrr[i].size, DRM_MTRR_WC);
684 DRM_ERROR(
"invalid frame buffer bpp %d!\n", init->
fb_bpp);
688 DRM_ERROR(
"invalid depth buffer bpp %d!\n", init->
fb_bpp);
693 DRM_ERROR(
"invalid dma memory type %d!\n", init->
dma_type);
715 if (!dev_priv->
sarea) {
716 DRM_ERROR(
"could not find sarea!\n");
717 savage_do_cleanup_bci(dev);
723 DRM_ERROR(
"could not find shadow status region!\n");
724 savage_do_cleanup_bci(dev);
732 dev->agp_buffer_map = drm_core_findmap(dev,
734 if (!dev->agp_buffer_map) {
735 DRM_ERROR(
"could not find DMA buffer region!\n");
736 savage_do_cleanup_bci(dev);
740 if (!dev->agp_buffer_map->handle) {
741 DRM_ERROR(
"failed to ioremap DMA buffer region!\n");
742 savage_do_cleanup_bci(dev);
750 DRM_ERROR(
"could not find agp texture region!\n");
751 savage_do_cleanup_bci(dev);
760 DRM_ERROR(
"command DMA not supported on "
761 "Savage3D/MX/IX.\n");
762 savage_do_cleanup_bci(dev);
765 if (dev->dma && dev->dma->buflist) {
766 DRM_ERROR(
"command and vertex DMA not supported "
767 "at the same time.\n");
768 savage_do_cleanup_bci(dev);
773 DRM_ERROR(
"could not find command DMA region!\n");
774 savage_do_cleanup_bci(dev);
779 DRM_ERROR(
"AGP command DMA region is not a "
781 savage_do_cleanup_bci(dev);
785 if (!dev_priv->
cmd_dma->handle) {
786 DRM_ERROR(
"failed to ioremap command "
788 savage_do_cleanup_bci(dev);
792 DRM_ERROR(
"PCI command DMA region is not a "
793 "_DRM_CONSISTENT map!\n");
794 savage_do_cleanup_bci(dev);
803 DRM_DEBUG(
"falling back to faked command DMA.\n");
810 DRM_ERROR(
"could not allocate faked DMA buffer!\n");
811 savage_do_cleanup_bci(dev);
815 dev_priv->
dma_flush = savage_fake_dma_flush;
824 unsigned int color_tile_format;
825 unsigned int depth_tile_format;
826 unsigned int front_stride, back_stride, depth_stride;
828 color_tile_format = dev_priv->
fb_bpp == 16 ?
830 depth_tile_format = dev_priv->
depth_bpp == 16 ?
867 dev_priv->
wait_fifo = savage_bci_wait_fifo_shadow;
868 dev_priv->
wait_evnt = savage_bci_wait_event_shadow;
873 dev_priv->
wait_fifo = savage_bci_wait_fifo_s3d;
875 dev_priv->
wait_fifo = savage_bci_wait_fifo_s4;
877 dev_priv->
wait_evnt = savage_bci_wait_event_reg;
886 if (savage_freelist_init(dev) < 0) {
887 DRM_ERROR(
"could not initialize freelist\n");
888 savage_do_cleanup_bci(dev);
892 if (savage_dma_init(dev_priv) < 0) {
893 DRM_ERROR(
"could not initialize command DMA\n");
894 savage_do_cleanup_bci(dev);
901 static int savage_do_cleanup_bci(
struct drm_device * dev)
913 dev->agp_buffer_map && dev->agp_buffer_map->handle) {
918 dev->agp_buffer_map =
NULL;
926 static int savage_bci_init(
struct drm_device *dev,
void *
data,
struct drm_file *file_priv)
930 LOCK_TEST_WITH_RETURN(dev, file_priv);
932 switch (init->
func) {
933 case SAVAGE_INIT_BCI:
934 return savage_do_init_bci(dev, init);
935 case SAVAGE_CLEANUP_BCI:
936 return savage_do_cleanup_bci(dev);
942 static int savage_bci_event_emit(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
949 LOCK_TEST_WITH_RETURN(dev, file_priv);
957 static int savage_bci_event_wait(
struct drm_device *dev,
void *data,
struct drm_file *file_priv)
961 unsigned int event_e, hw_e;
962 unsigned int event_w, hw_w;
975 event_e =
event->count & 0xffff;
976 event_w =
event->count >> 16;
982 if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
985 return dev_priv->
wait_evnt(dev_priv, event_e);
992 static int savage_bci_get_buffers(
struct drm_device *dev,
993 struct drm_file *file_priv,
1000 buf = savage_freelist_get(dev);
1004 buf->file_priv = file_priv;
1007 &buf->idx,
sizeof(buf->idx)))
1010 &buf->total,
sizeof(buf->total)))
1020 struct drm_device_dma *dma = dev->dma;
1024 LOCK_TEST_WITH_RETURN(dev, file_priv);
1029 DRM_ERROR(
"Process %d trying to send %d buffers via drmDMA\n",
1037 DRM_ERROR(
"Process %d trying to get %d buffers (of %d max)\n",
1045 ret = savage_bci_get_buffers(dev, file_priv, d);
1053 struct drm_device_dma *dma = dev->dma;
1055 int release_idlelock = 0;
1065 if (file_priv->master && file_priv->master->lock.hw_lock) {
1067 release_idlelock = 1;
1070 for (i = 0; i < dma->buf_count; i++) {
1071 struct drm_buf *buf = dma->buflist[
i];
1074 if (buf->file_priv == file_priv && buf_priv &&
1077 DRM_DEBUG(
"reclaimed from client\n");
1084 if (release_idlelock)
1089 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1091 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
1092 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),