Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
savage_bci.c
Go to the documentation of this file.
1 /* savage_bci.c -- BCI support for Savage
2  *
3  * Copyright 2004 Felix Kuehling
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sub license,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial portions
15  * of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20  * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22  * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 #include <drm/drmP.h>
26 #include <drm/savage_drm.h>
27 #include "savage_drv.h"
28 
29 /* Need a long timeout for shadow status updates can take a while
30  * and so can waiting for events when the queue is full. */
31 #define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
32 #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
33 #define SAVAGE_FREELIST_DEBUG 0
34 
35 static int savage_do_cleanup_bci(struct drm_device *dev);
36 
37 static int
38 savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
39 {
40  uint32_t mask = dev_priv->status_used_mask;
43  int i;
44 
45 #if SAVAGE_BCI_DEBUG
46  if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
47  DRM_ERROR("Trying to emit %d words "
48  "(more than guaranteed space in COB)\n", n);
49 #endif
50 
51  for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
53  status = dev_priv->status_ptr[0];
54  if ((status & mask) < threshold)
55  return 0;
56  DRM_UDELAY(1);
57  }
58 
59 #if SAVAGE_BCI_DEBUG
60  DRM_ERROR("failed!\n");
61  DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
62 #endif
63  return -EBUSY;
64 }
65 
66 static int
67 savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
68 {
69  uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
71  int i;
72 
73  for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
75  if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
76  return 0;
77  DRM_UDELAY(1);
78  }
79 
80 #if SAVAGE_BCI_DEBUG
81  DRM_ERROR("failed!\n");
82  DRM_INFO(" status=0x%08x\n", status);
83 #endif
84  return -EBUSY;
85 }
86 
87 static int
88 savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
89 {
90  uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
92  int i;
93 
94  for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
96  if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
97  return 0;
98  DRM_UDELAY(1);
99  }
100 
101 #if SAVAGE_BCI_DEBUG
102  DRM_ERROR("failed!\n");
103  DRM_INFO(" status=0x%08x\n", status);
104 #endif
105  return -EBUSY;
106 }
107 
108 /*
109  * Waiting for events.
110  *
111  * The BIOSresets the event tag to 0 on mode changes. Therefore we
112  * never emit 0 to the event tag. If we find a 0 event tag we know the
113  * BIOS stomped on it and return success assuming that the BIOS waited
114  * for engine idle.
115  *
116  * Note: if the Xserver uses the event tag it has to follow the same
117  * rule. Otherwise there may be glitches every 2^16 events.
118  */
119 static int
120 savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
121 {
123  int i;
124 
125  for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
127  status = dev_priv->status_ptr[1];
128  if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
129  (status & 0xffff) == 0)
130  return 0;
131  DRM_UDELAY(1);
132  }
133 
134 #if SAVAGE_BCI_DEBUG
135  DRM_ERROR("failed!\n");
136  DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
137 #endif
138 
139  return -EBUSY;
140 }
141 
142 static int
143 savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
144 {
146  int i;
147 
148  for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
150  if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
151  (status & 0xffff) == 0)
152  return 0;
153  DRM_UDELAY(1);
154  }
155 
156 #if SAVAGE_BCI_DEBUG
157  DRM_ERROR("failed!\n");
158  DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
159 #endif
160 
161  return -EBUSY;
162 }
163 
165  unsigned int flags)
166 {
167  uint16_t count;
168  BCI_LOCALS;
169 
170  if (dev_priv->status_ptr) {
171  /* coordinate with Xserver */
172  count = dev_priv->status_ptr[1023];
173  if (count < dev_priv->event_counter)
174  dev_priv->event_wrap++;
175  } else {
176  count = dev_priv->event_counter;
177  }
178  count = (count + 1) & 0xffff;
179  if (count == 0) {
180  count++; /* See the comment above savage_wait_event_*. */
181  dev_priv->event_wrap++;
182  }
183  dev_priv->event_counter = count;
184  if (dev_priv->status_ptr)
185  dev_priv->status_ptr[1023] = (uint32_t) count;
186 
187  if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
188  unsigned int wait_cmd = BCI_CMD_WAIT;
189  if ((flags & SAVAGE_WAIT_2D))
190  wait_cmd |= BCI_CMD_WAIT_2D;
191  if ((flags & SAVAGE_WAIT_3D))
192  wait_cmd |= BCI_CMD_WAIT_3D;
193  BEGIN_BCI(2);
194  BCI_WRITE(wait_cmd);
195  } else {
196  BEGIN_BCI(1);
197  }
199 
200  return count;
201 }
202 
203 /*
204  * Freelist management
205  */
206 static int savage_freelist_init(struct drm_device * dev)
207 {
208  drm_savage_private_t *dev_priv = dev->dev_private;
209  struct drm_device_dma *dma = dev->dma;
210  struct drm_buf *buf;
212  int i;
213  DRM_DEBUG("count=%d\n", dma->buf_count);
214 
215  dev_priv->head.next = &dev_priv->tail;
216  dev_priv->head.prev = NULL;
217  dev_priv->head.buf = NULL;
218 
219  dev_priv->tail.next = NULL;
220  dev_priv->tail.prev = &dev_priv->head;
221  dev_priv->tail.buf = NULL;
222 
223  for (i = 0; i < dma->buf_count; i++) {
224  buf = dma->buflist[i];
225  entry = buf->dev_private;
226 
227  SET_AGE(&entry->age, 0, 0);
228  entry->buf = buf;
229 
230  entry->next = dev_priv->head.next;
231  entry->prev = &dev_priv->head;
232  dev_priv->head.next->prev = entry;
233  dev_priv->head.next = entry;
234  }
235 
236  return 0;
237 }
238 
239 static struct drm_buf *savage_freelist_get(struct drm_device * dev)
240 {
241  drm_savage_private_t *dev_priv = dev->dev_private;
242  drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
243  uint16_t event;
244  unsigned int wrap;
245  DRM_DEBUG("\n");
246 
248  if (dev_priv->status_ptr)
249  event = dev_priv->status_ptr[1] & 0xffff;
250  else
251  event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
252  wrap = dev_priv->event_wrap;
253  if (event > dev_priv->event_counter)
254  wrap--; /* hardware hasn't passed the last wrap yet */
255 
256  DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
257  DRM_DEBUG(" head=0x%04x %d\n", event, wrap);
258 
259  if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
262  prev->next = next;
263  next->prev = prev;
264  tail->next = tail->prev = NULL;
265  return tail->buf;
266  }
267 
268  DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
269  return NULL;
270 }
271 
272 void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf)
273 {
274  drm_savage_private_t *dev_priv = dev->dev_private;
275  drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
276 
277  DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
278 
279  if (entry->next != NULL || entry->prev != NULL) {
280  DRM_ERROR("entry already on freelist.\n");
281  return;
282  }
283 
284  prev = &dev_priv->head;
285  next = prev->next;
286  prev->next = entry;
287  next->prev = entry;
288  entry->prev = prev;
289  entry->next = next;
290 }
291 
292 /*
293  * Command DMA
294  */
295 static int savage_dma_init(drm_savage_private_t * dev_priv)
296 {
297  unsigned int i;
298 
299  dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
300  (SAVAGE_DMA_PAGE_SIZE * 4);
301  dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) *
302  dev_priv->nr_dma_pages, GFP_KERNEL);
303  if (dev_priv->dma_pages == NULL)
304  return -ENOMEM;
305 
306  for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
307  SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
308  dev_priv->dma_pages[i].used = 0;
309  dev_priv->dma_pages[i].flushed = 0;
310  }
311  SET_AGE(&dev_priv->last_dma_age, 0, 0);
312 
313  dev_priv->first_dma_page = 0;
314  dev_priv->current_dma_page = 0;
315 
316  return 0;
317 }
318 
320 {
321  uint16_t event;
322  unsigned int wrap, i;
323  event = savage_bci_emit_event(dev_priv, 0);
324  wrap = dev_priv->event_wrap;
325  for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
326  SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
327  dev_priv->dma_pages[i].used = 0;
328  dev_priv->dma_pages[i].flushed = 0;
329  }
330  SET_AGE(&dev_priv->last_dma_age, event, wrap);
331  dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
332 }
333 
334 void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page)
335 {
336  uint16_t event;
337  unsigned int wrap;
338 
339  /* Faked DMA buffer pages don't age. */
340  if (dev_priv->cmd_dma == &dev_priv->fake_dma)
341  return;
342 
344  if (dev_priv->status_ptr)
345  event = dev_priv->status_ptr[1] & 0xffff;
346  else
347  event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
348  wrap = dev_priv->event_wrap;
349  if (event > dev_priv->event_counter)
350  wrap--; /* hardware hasn't passed the last wrap yet */
351 
352  if (dev_priv->dma_pages[page].age.wrap > wrap ||
353  (dev_priv->dma_pages[page].age.wrap == wrap &&
354  dev_priv->dma_pages[page].age.event > event)) {
355  if (dev_priv->wait_evnt(dev_priv,
356  dev_priv->dma_pages[page].age.event)
357  < 0)
358  DRM_ERROR("wait_evnt failed!\n");
359  }
360 }
361 
362 uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n)
363 {
364  unsigned int cur = dev_priv->current_dma_page;
365  unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
366  dev_priv->dma_pages[cur].used;
367  unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
369  uint32_t *dma_ptr;
370  unsigned int i;
371 
372  DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
373  cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
374 
375  if (cur + nr_pages < dev_priv->nr_dma_pages) {
376  dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
377  cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
378  if (n < rest)
379  rest = n;
380  dev_priv->dma_pages[cur].used += rest;
381  n -= rest;
382  cur++;
383  } else {
384  dev_priv->dma_flush(dev_priv);
385  nr_pages =
387  for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
388  dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
389  dev_priv->dma_pages[i].used = 0;
390  dev_priv->dma_pages[i].flushed = 0;
391  }
392  dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle;
393  dev_priv->first_dma_page = cur = 0;
394  }
395  for (i = cur; nr_pages > 0; ++i, --nr_pages) {
396 #if SAVAGE_DMA_DEBUG
397  if (dev_priv->dma_pages[i].used) {
398  DRM_ERROR("unflushed page %u: used=%u\n",
399  i, dev_priv->dma_pages[i].used);
400  }
401 #endif
402  if (n > SAVAGE_DMA_PAGE_SIZE)
403  dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
404  else
405  dev_priv->dma_pages[i].used = n;
407  }
408  dev_priv->current_dma_page = --i;
409 
410  DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
411  i, dev_priv->dma_pages[i].used, n);
412 
413  savage_dma_wait(dev_priv, dev_priv->current_dma_page);
414 
415  return dma_ptr;
416 }
417 
418 static void savage_dma_flush(drm_savage_private_t * dev_priv)
419 {
420  unsigned int first = dev_priv->first_dma_page;
421  unsigned int cur = dev_priv->current_dma_page;
422  uint16_t event;
423  unsigned int wrap, pad, align, len, i;
424  unsigned long phys_addr;
425  BCI_LOCALS;
426 
427  if (first == cur &&
428  dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
429  return;
430 
431  /* pad length to multiples of 2 entries
432  * align start of next DMA block to multiles of 8 entries */
433  pad = -dev_priv->dma_pages[cur].used & 1;
434  align = -(dev_priv->dma_pages[cur].used + pad) & 7;
435 
436  DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
437  "pad=%u, align=%u\n",
438  first, cur, dev_priv->dma_pages[first].flushed,
439  dev_priv->dma_pages[cur].used, pad, align);
440 
441  /* pad with noops */
442  if (pad) {
443  uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
444  cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
445  dev_priv->dma_pages[cur].used += pad;
446  while (pad != 0) {
447  *dma_ptr++ = BCI_CMD_WAIT;
448  pad--;
449  }
450  }
451 
453 
454  /* do flush ... */
455  phys_addr = dev_priv->cmd_dma->offset +
456  (first * SAVAGE_DMA_PAGE_SIZE +
457  dev_priv->dma_pages[first].flushed) * 4;
458  len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
459  dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
460 
461  DRM_DEBUG("phys_addr=%lx, len=%u\n",
462  phys_addr | dev_priv->dma_type, len);
463 
464  BEGIN_BCI(3);
466  BCI_WRITE(phys_addr | dev_priv->dma_type);
467  BCI_DMA(len);
468 
469  /* fix alignment of the start of the next block */
470  dev_priv->dma_pages[cur].used += align;
471 
472  /* age DMA pages */
473  event = savage_bci_emit_event(dev_priv, 0);
474  wrap = dev_priv->event_wrap;
475  for (i = first; i < cur; ++i) {
476  SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
477  dev_priv->dma_pages[i].used = 0;
478  dev_priv->dma_pages[i].flushed = 0;
479  }
480  /* age the current page only when it's full */
481  if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
482  SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
483  dev_priv->dma_pages[cur].used = 0;
484  dev_priv->dma_pages[cur].flushed = 0;
485  /* advance to next page */
486  cur++;
487  if (cur == dev_priv->nr_dma_pages)
488  cur = 0;
489  dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
490  } else {
491  dev_priv->first_dma_page = cur;
492  dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
493  }
494  SET_AGE(&dev_priv->last_dma_age, event, wrap);
495 
496  DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
497  dev_priv->dma_pages[cur].used,
498  dev_priv->dma_pages[cur].flushed);
499 }
500 
501 static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
502 {
503  unsigned int i, j;
504  BCI_LOCALS;
505 
506  if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
507  dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
508  return;
509 
510  DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
511  dev_priv->first_dma_page, dev_priv->current_dma_page,
512  dev_priv->dma_pages[dev_priv->current_dma_page].used);
513 
514  for (i = dev_priv->first_dma_page;
515  i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
516  ++i) {
517  uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
520  /* Sanity check: all pages except the last one must be full. */
521  if (i < dev_priv->current_dma_page &&
522  dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
523  DRM_ERROR("partial DMA page %u: used=%u",
524  i, dev_priv->dma_pages[i].used);
525  }
526 #endif
527  BEGIN_BCI(dev_priv->dma_pages[i].used);
528  for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
529  BCI_WRITE(dma_ptr[j]);
530  }
531  dev_priv->dma_pages[i].used = 0;
532  }
533 
534  /* reset to first page */
535  dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
536 }
537 
538 int savage_driver_load(struct drm_device *dev, unsigned long chipset)
539 {
540  drm_savage_private_t *dev_priv;
541 
542  dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
543  if (dev_priv == NULL)
544  return -ENOMEM;
545 
546  dev->dev_private = (void *)dev_priv;
547 
548  dev_priv->chipset = (enum savage_family)chipset;
549 
550  pci_set_master(dev->pdev);
551 
552  return 0;
553 }
554 
555 
556 /*
557  * Initialize mappings. On Savage4 and SavageIX the alignment
558  * and size of the aperture is not suitable for automatic MTRR setup
559  * in drm_addmap. Therefore we add them manually before the maps are
560  * initialized, and tear them down on last close.
561  */
563 {
564  drm_savage_private_t *dev_priv = dev->dev_private;
565  unsigned long mmio_base, fb_base, fb_size, aperture_base;
566  /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
567  * in case we decide we need information on the BAR for BSD in the
568  * future.
569  */
570  unsigned int fb_rsrc, aper_rsrc;
571  int ret = 0;
572 
573  dev_priv->mtrr[0].handle = -1;
574  dev_priv->mtrr[1].handle = -1;
575  dev_priv->mtrr[2].handle = -1;
576  if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
577  fb_rsrc = 0;
578  fb_base = pci_resource_start(dev->pdev, 0);
579  fb_size = SAVAGE_FB_SIZE_S3;
580  mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
581  aper_rsrc = 0;
582  aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
583  /* this should always be true */
584  if (pci_resource_len(dev->pdev, 0) == 0x08000000) {
585  /* Don't make MMIO write-cobining! We need 3
586  * MTRRs. */
587  dev_priv->mtrr[0].base = fb_base;
588  dev_priv->mtrr[0].size = 0x01000000;
589  dev_priv->mtrr[0].handle =
590  drm_mtrr_add(dev_priv->mtrr[0].base,
591  dev_priv->mtrr[0].size, DRM_MTRR_WC);
592  dev_priv->mtrr[1].base = fb_base + 0x02000000;
593  dev_priv->mtrr[1].size = 0x02000000;
594  dev_priv->mtrr[1].handle =
595  drm_mtrr_add(dev_priv->mtrr[1].base,
596  dev_priv->mtrr[1].size, DRM_MTRR_WC);
597  dev_priv->mtrr[2].base = fb_base + 0x04000000;
598  dev_priv->mtrr[2].size = 0x04000000;
599  dev_priv->mtrr[2].handle =
600  drm_mtrr_add(dev_priv->mtrr[2].base,
601  dev_priv->mtrr[2].size, DRM_MTRR_WC);
602  } else {
603  DRM_ERROR("strange pci_resource_len %08llx\n",
604  (unsigned long long)
605  pci_resource_len(dev->pdev, 0));
606  }
607  } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
608  dev_priv->chipset != S3_SAVAGE2000) {
609  mmio_base = pci_resource_start(dev->pdev, 0);
610  fb_rsrc = 1;
611  fb_base = pci_resource_start(dev->pdev, 1);
612  fb_size = SAVAGE_FB_SIZE_S4;
613  aper_rsrc = 1;
614  aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
615  /* this should always be true */
616  if (pci_resource_len(dev->pdev, 1) == 0x08000000) {
617  /* Can use one MTRR to cover both fb and
618  * aperture. */
619  dev_priv->mtrr[0].base = fb_base;
620  dev_priv->mtrr[0].size = 0x08000000;
621  dev_priv->mtrr[0].handle =
622  drm_mtrr_add(dev_priv->mtrr[0].base,
623  dev_priv->mtrr[0].size, DRM_MTRR_WC);
624  } else {
625  DRM_ERROR("strange pci_resource_len %08llx\n",
626  (unsigned long long)
627  pci_resource_len(dev->pdev, 1));
628  }
629  } else {
630  mmio_base = pci_resource_start(dev->pdev, 0);
631  fb_rsrc = 1;
632  fb_base = pci_resource_start(dev->pdev, 1);
633  fb_size = pci_resource_len(dev->pdev, 1);
634  aper_rsrc = 2;
635  aperture_base = pci_resource_start(dev->pdev, 2);
636  /* Automatic MTRR setup will do the right thing. */
637  }
638 
639  ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
640  _DRM_READ_ONLY, &dev_priv->mmio);
641  if (ret)
642  return ret;
643 
644  ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
645  _DRM_WRITE_COMBINING, &dev_priv->fb);
646  if (ret)
647  return ret;
648 
649  ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
651  &dev_priv->aperture);
652  return ret;
653 }
654 
655 /*
656  * Delete MTRRs and free device-private data.
657  */
659 {
660  drm_savage_private_t *dev_priv = dev->dev_private;
661  int i;
662 
663  for (i = 0; i < 3; ++i)
664  if (dev_priv->mtrr[i].handle >= 0)
665  drm_mtrr_del(dev_priv->mtrr[i].handle,
666  dev_priv->mtrr[i].base,
667  dev_priv->mtrr[i].size, DRM_MTRR_WC);
668 }
669 
671 {
672  drm_savage_private_t *dev_priv = dev->dev_private;
673 
674  kfree(dev_priv);
675 
676  return 0;
677 }
678 
679 static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
680 {
681  drm_savage_private_t *dev_priv = dev->dev_private;
682 
683  if (init->fb_bpp != 16 && init->fb_bpp != 32) {
684  DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
685  return -EINVAL;
686  }
687  if (init->depth_bpp != 16 && init->depth_bpp != 32) {
688  DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
689  return -EINVAL;
690  }
691  if (init->dma_type != SAVAGE_DMA_AGP &&
692  init->dma_type != SAVAGE_DMA_PCI) {
693  DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
694  return -EINVAL;
695  }
696 
697  dev_priv->cob_size = init->cob_size;
698  dev_priv->bci_threshold_lo = init->bci_threshold_lo;
699  dev_priv->bci_threshold_hi = init->bci_threshold_hi;
700  dev_priv->dma_type = init->dma_type;
701 
702  dev_priv->fb_bpp = init->fb_bpp;
703  dev_priv->front_offset = init->front_offset;
704  dev_priv->front_pitch = init->front_pitch;
705  dev_priv->back_offset = init->back_offset;
706  dev_priv->back_pitch = init->back_pitch;
707  dev_priv->depth_bpp = init->depth_bpp;
708  dev_priv->depth_offset = init->depth_offset;
709  dev_priv->depth_pitch = init->depth_pitch;
710 
711  dev_priv->texture_offset = init->texture_offset;
712  dev_priv->texture_size = init->texture_size;
713 
714  dev_priv->sarea = drm_getsarea(dev);
715  if (!dev_priv->sarea) {
716  DRM_ERROR("could not find sarea!\n");
717  savage_do_cleanup_bci(dev);
718  return -EINVAL;
719  }
720  if (init->status_offset != 0) {
721  dev_priv->status = drm_core_findmap(dev, init->status_offset);
722  if (!dev_priv->status) {
723  DRM_ERROR("could not find shadow status region!\n");
724  savage_do_cleanup_bci(dev);
725  return -EINVAL;
726  }
727  } else {
728  dev_priv->status = NULL;
729  }
730  if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
731  dev->agp_buffer_token = init->buffers_offset;
732  dev->agp_buffer_map = drm_core_findmap(dev,
733  init->buffers_offset);
734  if (!dev->agp_buffer_map) {
735  DRM_ERROR("could not find DMA buffer region!\n");
736  savage_do_cleanup_bci(dev);
737  return -EINVAL;
738  }
739  drm_core_ioremap(dev->agp_buffer_map, dev);
740  if (!dev->agp_buffer_map->handle) {
741  DRM_ERROR("failed to ioremap DMA buffer region!\n");
742  savage_do_cleanup_bci(dev);
743  return -ENOMEM;
744  }
745  }
746  if (init->agp_textures_offset) {
747  dev_priv->agp_textures =
748  drm_core_findmap(dev, init->agp_textures_offset);
749  if (!dev_priv->agp_textures) {
750  DRM_ERROR("could not find agp texture region!\n");
751  savage_do_cleanup_bci(dev);
752  return -EINVAL;
753  }
754  } else {
755  dev_priv->agp_textures = NULL;
756  }
757 
758  if (init->cmd_dma_offset) {
759  if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
760  DRM_ERROR("command DMA not supported on "
761  "Savage3D/MX/IX.\n");
762  savage_do_cleanup_bci(dev);
763  return -EINVAL;
764  }
765  if (dev->dma && dev->dma->buflist) {
766  DRM_ERROR("command and vertex DMA not supported "
767  "at the same time.\n");
768  savage_do_cleanup_bci(dev);
769  return -EINVAL;
770  }
771  dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
772  if (!dev_priv->cmd_dma) {
773  DRM_ERROR("could not find command DMA region!\n");
774  savage_do_cleanup_bci(dev);
775  return -EINVAL;
776  }
777  if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
778  if (dev_priv->cmd_dma->type != _DRM_AGP) {
779  DRM_ERROR("AGP command DMA region is not a "
780  "_DRM_AGP map!\n");
781  savage_do_cleanup_bci(dev);
782  return -EINVAL;
783  }
784  drm_core_ioremap(dev_priv->cmd_dma, dev);
785  if (!dev_priv->cmd_dma->handle) {
786  DRM_ERROR("failed to ioremap command "
787  "DMA region!\n");
788  savage_do_cleanup_bci(dev);
789  return -ENOMEM;
790  }
791  } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
792  DRM_ERROR("PCI command DMA region is not a "
793  "_DRM_CONSISTENT map!\n");
794  savage_do_cleanup_bci(dev);
795  return -EINVAL;
796  }
797  } else {
798  dev_priv->cmd_dma = NULL;
799  }
800 
801  dev_priv->dma_flush = savage_dma_flush;
802  if (!dev_priv->cmd_dma) {
803  DRM_DEBUG("falling back to faked command DMA.\n");
804  dev_priv->fake_dma.offset = 0;
805  dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
806  dev_priv->fake_dma.type = _DRM_SHM;
807  dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE,
808  GFP_KERNEL);
809  if (!dev_priv->fake_dma.handle) {
810  DRM_ERROR("could not allocate faked DMA buffer!\n");
811  savage_do_cleanup_bci(dev);
812  return -ENOMEM;
813  }
814  dev_priv->cmd_dma = &dev_priv->fake_dma;
815  dev_priv->dma_flush = savage_fake_dma_flush;
816  }
817 
818  dev_priv->sarea_priv =
819  (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle +
820  init->sarea_priv_offset);
821 
822  /* setup bitmap descriptors */
823  {
824  unsigned int color_tile_format;
825  unsigned int depth_tile_format;
826  unsigned int front_stride, back_stride, depth_stride;
827  if (dev_priv->chipset <= S3_SAVAGE4) {
828  color_tile_format = dev_priv->fb_bpp == 16 ?
830  depth_tile_format = dev_priv->depth_bpp == 16 ?
832  } else {
833  color_tile_format = SAVAGE_BD_TILE_DEST;
834  depth_tile_format = SAVAGE_BD_TILE_DEST;
835  }
836  front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
837  back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
838  depth_stride =
839  dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
840 
841  dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
842  (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
843  (color_tile_format << SAVAGE_BD_TILE_SHIFT);
844 
845  dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
846  (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
847  (color_tile_format << SAVAGE_BD_TILE_SHIFT);
848 
849  dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
850  (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
851  (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
852  }
853 
854  /* setup status and bci ptr */
855  dev_priv->event_counter = 0;
856  dev_priv->event_wrap = 0;
857  dev_priv->bci_ptr = (volatile uint32_t *)
858  ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
859  if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
861  } else {
863  }
864  if (dev_priv->status != NULL) {
865  dev_priv->status_ptr =
866  (volatile uint32_t *)dev_priv->status->handle;
867  dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
868  dev_priv->wait_evnt = savage_bci_wait_event_shadow;
869  dev_priv->status_ptr[1023] = dev_priv->event_counter;
870  } else {
871  dev_priv->status_ptr = NULL;
872  if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
873  dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
874  } else {
875  dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
876  }
877  dev_priv->wait_evnt = savage_bci_wait_event_reg;
878  }
879 
880  /* cliprect functions */
881  if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
883  else
885 
886  if (savage_freelist_init(dev) < 0) {
887  DRM_ERROR("could not initialize freelist\n");
888  savage_do_cleanup_bci(dev);
889  return -ENOMEM;
890  }
891 
892  if (savage_dma_init(dev_priv) < 0) {
893  DRM_ERROR("could not initialize command DMA\n");
894  savage_do_cleanup_bci(dev);
895  return -ENOMEM;
896  }
897 
898  return 0;
899 }
900 
901 static int savage_do_cleanup_bci(struct drm_device * dev)
902 {
903  drm_savage_private_t *dev_priv = dev->dev_private;
904 
905  if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
906  kfree(dev_priv->fake_dma.handle);
907  } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
908  dev_priv->cmd_dma->type == _DRM_AGP &&
909  dev_priv->dma_type == SAVAGE_DMA_AGP)
910  drm_core_ioremapfree(dev_priv->cmd_dma, dev);
911 
912  if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
913  dev->agp_buffer_map && dev->agp_buffer_map->handle) {
914  drm_core_ioremapfree(dev->agp_buffer_map, dev);
915  /* make sure the next instance (which may be running
916  * in PCI mode) doesn't try to use an old
917  * agp_buffer_map. */
918  dev->agp_buffer_map = NULL;
919  }
920 
921  kfree(dev_priv->dma_pages);
922 
923  return 0;
924 }
925 
926 static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
927 {
928  drm_savage_init_t *init = data;
929 
930  LOCK_TEST_WITH_RETURN(dev, file_priv);
931 
932  switch (init->func) {
933  case SAVAGE_INIT_BCI:
934  return savage_do_init_bci(dev, init);
935  case SAVAGE_CLEANUP_BCI:
936  return savage_do_cleanup_bci(dev);
937  }
938 
939  return -EINVAL;
940 }
941 
942 static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
943 {
944  drm_savage_private_t *dev_priv = dev->dev_private;
945  drm_savage_event_emit_t *event = data;
946 
947  DRM_DEBUG("\n");
948 
949  LOCK_TEST_WITH_RETURN(dev, file_priv);
950 
951  event->count = savage_bci_emit_event(dev_priv, event->flags);
952  event->count |= dev_priv->event_wrap << 16;
953 
954  return 0;
955 }
956 
957 static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
958 {
959  drm_savage_private_t *dev_priv = dev->dev_private;
960  drm_savage_event_wait_t *event = data;
961  unsigned int event_e, hw_e;
962  unsigned int event_w, hw_w;
963 
964  DRM_DEBUG("\n");
965 
967  if (dev_priv->status_ptr)
968  hw_e = dev_priv->status_ptr[1] & 0xffff;
969  else
970  hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
971  hw_w = dev_priv->event_wrap;
972  if (hw_e > dev_priv->event_counter)
973  hw_w--; /* hardware hasn't passed the last wrap yet */
974 
975  event_e = event->count & 0xffff;
976  event_w = event->count >> 16;
977 
978  /* Don't need to wait if
979  * - event counter wrapped since the event was emitted or
980  * - the hardware has advanced up to or over the event to wait for.
981  */
982  if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
983  return 0;
984  else
985  return dev_priv->wait_evnt(dev_priv, event_e);
986 }
987 
988 /*
989  * DMA buffer management
990  */
991 
992 static int savage_bci_get_buffers(struct drm_device *dev,
993  struct drm_file *file_priv,
994  struct drm_dma *d)
995 {
996  struct drm_buf *buf;
997  int i;
998 
999  for (i = d->granted_count; i < d->request_count; i++) {
1000  buf = savage_freelist_get(dev);
1001  if (!buf)
1002  return -EAGAIN;
1003 
1004  buf->file_priv = file_priv;
1005 
1006  if (DRM_COPY_TO_USER(&d->request_indices[i],
1007  &buf->idx, sizeof(buf->idx)))
1008  return -EFAULT;
1009  if (DRM_COPY_TO_USER(&d->request_sizes[i],
1010  &buf->total, sizeof(buf->total)))
1011  return -EFAULT;
1012 
1013  d->granted_count++;
1014  }
1015  return 0;
1016 }
1017 
1018 int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
1019 {
1020  struct drm_device_dma *dma = dev->dma;
1021  struct drm_dma *d = data;
1022  int ret = 0;
1023 
1024  LOCK_TEST_WITH_RETURN(dev, file_priv);
1025 
1026  /* Please don't send us buffers.
1027  */
1028  if (d->send_count != 0) {
1029  DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1031  return -EINVAL;
1032  }
1033 
1034  /* We'll send you buffers.
1035  */
1036  if (d->request_count < 0 || d->request_count > dma->buf_count) {
1037  DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1038  DRM_CURRENTPID, d->request_count, dma->buf_count);
1039  return -EINVAL;
1040  }
1041 
1042  d->granted_count = 0;
1043 
1044  if (d->request_count) {
1045  ret = savage_bci_get_buffers(dev, file_priv, d);
1046  }
1047 
1048  return ret;
1049 }
1050 
1051 void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1052 {
1053  struct drm_device_dma *dma = dev->dma;
1054  drm_savage_private_t *dev_priv = dev->dev_private;
1055  int release_idlelock = 0;
1056  int i;
1057 
1058  if (!dma)
1059  return;
1060  if (!dev_priv)
1061  return;
1062  if (!dma->buflist)
1063  return;
1064 
1065  if (file_priv->master && file_priv->master->lock.hw_lock) {
1066  drm_idlelock_take(&file_priv->master->lock);
1067  release_idlelock = 1;
1068  }
1069 
1070  for (i = 0; i < dma->buf_count; i++) {
1071  struct drm_buf *buf = dma->buflist[i];
1072  drm_savage_buf_priv_t *buf_priv = buf->dev_private;
1073 
1074  if (buf->file_priv == file_priv && buf_priv &&
1075  buf_priv->next == NULL && buf_priv->prev == NULL) {
1076  uint16_t event;
1077  DRM_DEBUG("reclaimed from client\n");
1078  event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1079  SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1080  savage_freelist_put(dev, buf);
1081  }
1082  }
1083 
1084  if (release_idlelock)
1085  drm_idlelock_release(&file_priv->master->lock);
1086 }
1087 
1088 struct drm_ioctl_desc savage_ioctls[] = {
1089  DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1090  DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1091  DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
1092  DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
1093 };
1094 
1095 int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);