Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
nouveau_dma.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include <core/client.h>
28 
29 #include "nouveau_drm.h"
30 #include "nouveau_dma.h"
31 
32 void
33 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
34 {
35  bool is_iomem;
36  u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem);
37  mem = &mem[chan->dma.cur];
38  if (is_iomem)
39  memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
40  else
41  memcpy(mem, data, nr_dwords * 4);
42  chan->dma.cur += nr_dwords;
43 }
44 
45 /* Fetch and adjust GPU GET pointer
46  *
47  * Returns:
48  * value >= 0, the adjusted GET pointer
49  * -EINVAL if GET pointer currently outside main push buffer
50  * -EBUSY if timeout exceeded
51  */
52 static inline int
53 READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
54 {
55  uint64_t val;
56 
57  val = nv_ro32(chan->object, chan->user_get);
58  if (chan->user_get_hi)
59  val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32;
60 
61  /* reset counter as long as GET is still advancing, this is
62  * to avoid misdetecting a GPU lockup if the GPU happens to
63  * just be processing an operation that takes a long time
64  */
65  if (val != *prev_get) {
66  *prev_get = val;
67  *timeout = 0;
68  }
69 
70  if ((++*timeout & 0xff) == 0) {
71  udelay(1);
72  if (*timeout > 100000)
73  return -EBUSY;
74  }
75 
76  if (val < chan->push.vma.offset ||
77  val > chan->push.vma.offset + (chan->dma.max << 2))
78  return -EINVAL;
79 
80  return (val - chan->push.vma.offset) >> 2;
81 }
82 
83 void
84 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
85  int delta, int length)
86 {
87  struct nouveau_bo *pb = chan->push.buffer;
88  struct nouveau_vma *vma;
89  int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
90  u64 offset;
91 
92  vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
93  BUG_ON(!vma);
94  offset = vma->offset + delta;
95 
96  BUG_ON(chan->dma.ib_free < 1);
97 
99  nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
100 
101  chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
102 
104  /* Flush writes. */
105  nouveau_bo_rd32(pb, 0);
106 
107  nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
108  chan->dma.ib_free--;
109 }
110 
111 static int
112 nv50_dma_push_wait(struct nouveau_channel *chan, int count)
113 {
114  uint32_t cnt = 0, prev_get = 0;
115 
116  while (chan->dma.ib_free < count) {
117  uint32_t get = nv_ro32(chan->object, 0x88);
118  if (get != prev_get) {
119  prev_get = get;
120  cnt = 0;
121  }
122 
123  if ((++cnt & 0xff) == 0) {
124  DRM_UDELAY(1);
125  if (cnt > 100000)
126  return -EBUSY;
127  }
128 
129  chan->dma.ib_free = get - chan->dma.ib_put;
130  if (chan->dma.ib_free <= 0)
131  chan->dma.ib_free += chan->dma.ib_max;
132  }
133 
134  return 0;
135 }
136 
137 static int
138 nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
139 {
140  uint64_t prev_get = 0;
141  int ret, cnt = 0;
142 
143  ret = nv50_dma_push_wait(chan, slots + 1);
144  if (unlikely(ret))
145  return ret;
146 
147  while (chan->dma.free < count) {
148  int get = READ_GET(chan, &prev_get, &cnt);
149  if (unlikely(get < 0)) {
150  if (get == -EINVAL)
151  continue;
152 
153  return get;
154  }
155 
156  if (get <= chan->dma.cur) {
157  chan->dma.free = chan->dma.max - chan->dma.cur;
158  if (chan->dma.free >= count)
159  break;
160 
161  FIRE_RING(chan);
162  do {
163  get = READ_GET(chan, &prev_get, &cnt);
164  if (unlikely(get < 0)) {
165  if (get == -EINVAL)
166  continue;
167  return get;
168  }
169  } while (get == 0);
170  chan->dma.cur = 0;
171  chan->dma.put = 0;
172  }
173 
174  chan->dma.free = get - chan->dma.cur - 1;
175  }
176 
177  return 0;
178 }
179 
180 int
181 nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
182 {
183  uint64_t prev_get = 0;
184  int cnt = 0, get;
185 
186  if (chan->dma.ib_max)
187  return nv50_dma_wait(chan, slots, size);
188 
189  while (chan->dma.free < size) {
190  get = READ_GET(chan, &prev_get, &cnt);
191  if (unlikely(get == -EBUSY))
192  return -EBUSY;
193 
194  /* loop until we have a usable GET pointer. the value
195  * we read from the GPU may be outside the main ring if
196  * PFIFO is processing a buffer called from the main ring,
197  * discard these values until something sensible is seen.
198  *
199  * the other case we discard GET is while the GPU is fetching
200  * from the SKIPS area, so the code below doesn't have to deal
201  * with some fun corner cases.
202  */
203  if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
204  continue;
205 
206  if (get <= chan->dma.cur) {
207  /* engine is fetching behind us, or is completely
208  * idle (GET == PUT) so we have free space up until
209  * the end of the push buffer
210  *
211  * we can only hit that path once per call due to
212  * looping back to the beginning of the push buffer,
213  * we'll hit the fetching-ahead-of-us path from that
214  * point on.
215  *
216  * the *one* exception to that rule is if we read
217  * GET==PUT, in which case the below conditional will
218  * always succeed and break us out of the wait loop.
219  */
220  chan->dma.free = chan->dma.max - chan->dma.cur;
221  if (chan->dma.free >= size)
222  break;
223 
224  /* not enough space left at the end of the push buffer,
225  * instruct the GPU to jump back to the start right
226  * after processing the currently pending commands.
227  */
228  OUT_RING(chan, chan->push.vma.offset | 0x20000000);
229 
230  /* wait for GET to depart from the skips area.
231  * prevents writing GET==PUT and causing a race
232  * condition that causes us to think the GPU is
233  * idle when it's not.
234  */
235  do {
236  get = READ_GET(chan, &prev_get, &cnt);
237  if (unlikely(get == -EBUSY))
238  return -EBUSY;
239  if (unlikely(get == -EINVAL))
240  continue;
241  } while (get <= NOUVEAU_DMA_SKIPS);
243 
244  /* we're now submitting commands at the start of
245  * the push buffer.
246  */
247  chan->dma.cur =
248  chan->dma.put = NOUVEAU_DMA_SKIPS;
249  }
250 
251  /* engine fetching ahead of us, we have space up until the
252  * current GET pointer. the "- 1" is to ensure there's
253  * space left to emit a jump back to the beginning of the
254  * push buffer if we require it. we can never get GET == PUT
255  * here, so this is safe.
256  */
257  chan->dma.free = get - chan->dma.cur - 1;
258  }
259 
260  return 0;
261 }
262