drm/nouveau: allocate kernel's notifier object at end of block
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / gpu / drm / nouveau / nouveau_dma.c
blobb368ed74aad75e13fce3995a25a4effa75a1a7fe
1 /*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "drmP.h"
28 #include "drm.h"
29 #include "nouveau_drv.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_ramht.h"
33 void
34 nouveau_dma_pre_init(struct nouveau_channel *chan)
36 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
37 struct nouveau_bo *pushbuf = chan->pushbuf_bo;
39 if (dev_priv->card_type >= NV_50) {
40 const int ib_size = pushbuf->bo.mem.size / 2;
42 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
43 chan->dma.ib_max = (ib_size / 8) - 1;
44 chan->dma.ib_put = 0;
45 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
47 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
48 } else {
49 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
52 chan->dma.put = 0;
53 chan->dma.cur = chan->dma.put;
54 chan->dma.free = chan->dma.max - chan->dma.cur;
57 int
58 nouveau_dma_init(struct nouveau_channel *chan)
60 struct drm_device *dev = chan->dev;
61 struct drm_nouveau_private *dev_priv = dev->dev_private;
62 int ret, i;
64 if (dev_priv->card_type >= NV_C0) {
65 ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
66 if (ret)
67 return ret;
69 ret = RING_SPACE(chan, 2);
70 if (ret)
71 return ret;
73 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
74 OUT_RING (chan, 0x00009039);
75 FIRE_RING (chan);
76 return 0;
79 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
80 ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
81 0x0039 : 0x5039);
82 if (ret)
83 return ret;
85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
86 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
87 &chan->m2mf_ntfy);
88 if (ret)
89 return ret;
91 /* Insert NOPS for NOUVEAU_DMA_SKIPS */
92 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
93 if (ret)
94 return ret;
96 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
97 OUT_RING(chan, 0);
99 /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
100 ret = RING_SPACE(chan, 4);
101 if (ret)
102 return ret;
103 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
104 OUT_RING(chan, NvM2MF);
105 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
106 OUT_RING(chan, NvNotify0);
108 /* Sit back and pray the channel works.. */
109 FIRE_RING(chan);
111 return 0;
114 void
115 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
117 bool is_iomem;
118 u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
119 mem = &mem[chan->dma.cur];
120 if (is_iomem)
121 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
122 else
123 memcpy(mem, data, nr_dwords * 4);
124 chan->dma.cur += nr_dwords;
127 /* Fetch and adjust GPU GET pointer
129 * Returns:
130 * value >= 0, the adjusted GET pointer
131 * -EINVAL if GET pointer currently outside main push buffer
132 * -EBUSY if timeout exceeded
134 static inline int
135 READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
137 uint32_t val;
139 val = nvchan_rd32(chan, chan->user_get);
141 /* reset counter as long as GET is still advancing, this is
142 * to avoid misdetecting a GPU lockup if the GPU happens to
143 * just be processing an operation that takes a long time
145 if (val != *prev_get) {
146 *prev_get = val;
147 *timeout = 0;
150 if ((++*timeout & 0xff) == 0) {
151 DRM_UDELAY(1);
152 if (*timeout > 100000)
153 return -EBUSY;
156 if (val < chan->pushbuf_base ||
157 val > chan->pushbuf_base + (chan->dma.max << 2))
158 return -EINVAL;
160 return (val - chan->pushbuf_base) >> 2;
163 void
164 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
165 int delta, int length)
167 struct nouveau_bo *pb = chan->pushbuf_bo;
168 uint64_t offset = bo->bo.offset + delta;
169 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
171 BUG_ON(chan->dma.ib_free < 1);
172 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
173 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
175 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
177 DRM_MEMORYBARRIER();
178 /* Flush writes. */
179 nouveau_bo_rd32(pb, 0);
181 nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
182 chan->dma.ib_free--;
185 static int
186 nv50_dma_push_wait(struct nouveau_channel *chan, int count)
188 uint32_t cnt = 0, prev_get = 0;
190 while (chan->dma.ib_free < count) {
191 uint32_t get = nvchan_rd32(chan, 0x88);
192 if (get != prev_get) {
193 prev_get = get;
194 cnt = 0;
197 if ((++cnt & 0xff) == 0) {
198 DRM_UDELAY(1);
199 if (cnt > 100000)
200 return -EBUSY;
203 chan->dma.ib_free = get - chan->dma.ib_put;
204 if (chan->dma.ib_free <= 0)
205 chan->dma.ib_free += chan->dma.ib_max;
208 return 0;
211 static int
212 nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
214 uint32_t cnt = 0, prev_get = 0;
215 int ret;
217 ret = nv50_dma_push_wait(chan, slots + 1);
218 if (unlikely(ret))
219 return ret;
221 while (chan->dma.free < count) {
222 int get = READ_GET(chan, &prev_get, &cnt);
223 if (unlikely(get < 0)) {
224 if (get == -EINVAL)
225 continue;
227 return get;
230 if (get <= chan->dma.cur) {
231 chan->dma.free = chan->dma.max - chan->dma.cur;
232 if (chan->dma.free >= count)
233 break;
235 FIRE_RING(chan);
236 do {
237 get = READ_GET(chan, &prev_get, &cnt);
238 if (unlikely(get < 0)) {
239 if (get == -EINVAL)
240 continue;
241 return get;
243 } while (get == 0);
244 chan->dma.cur = 0;
245 chan->dma.put = 0;
248 chan->dma.free = get - chan->dma.cur - 1;
251 return 0;
255 nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
257 uint32_t prev_get = 0, cnt = 0;
258 int get;
260 if (chan->dma.ib_max)
261 return nv50_dma_wait(chan, slots, size);
263 while (chan->dma.free < size) {
264 get = READ_GET(chan, &prev_get, &cnt);
265 if (unlikely(get == -EBUSY))
266 return -EBUSY;
268 /* loop until we have a usable GET pointer. the value
269 * we read from the GPU may be outside the main ring if
270 * PFIFO is processing a buffer called from the main ring,
271 * discard these values until something sensible is seen.
273 * the other case we discard GET is while the GPU is fetching
274 * from the SKIPS area, so the code below doesn't have to deal
275 * with some fun corner cases.
277 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
278 continue;
280 if (get <= chan->dma.cur) {
281 /* engine is fetching behind us, or is completely
282 * idle (GET == PUT) so we have free space up until
283 * the end of the push buffer
285 * we can only hit that path once per call due to
286 * looping back to the beginning of the push buffer,
287 * we'll hit the fetching-ahead-of-us path from that
288 * point on.
290 * the *one* exception to that rule is if we read
291 * GET==PUT, in which case the below conditional will
292 * always succeed and break us out of the wait loop.
294 chan->dma.free = chan->dma.max - chan->dma.cur;
295 if (chan->dma.free >= size)
296 break;
298 /* not enough space left at the end of the push buffer,
299 * instruct the GPU to jump back to the start right
300 * after processing the currently pending commands.
302 OUT_RING(chan, chan->pushbuf_base | 0x20000000);
304 /* wait for GET to depart from the skips area.
305 * prevents writing GET==PUT and causing a race
306 * condition that causes us to think the GPU is
307 * idle when it's not.
309 do {
310 get = READ_GET(chan, &prev_get, &cnt);
311 if (unlikely(get == -EBUSY))
312 return -EBUSY;
313 if (unlikely(get == -EINVAL))
314 continue;
315 } while (get <= NOUVEAU_DMA_SKIPS);
316 WRITE_PUT(NOUVEAU_DMA_SKIPS);
318 /* we're now submitting commands at the start of
319 * the push buffer.
321 chan->dma.cur =
322 chan->dma.put = NOUVEAU_DMA_SKIPS;
325 /* engine fetching ahead of us, we have space up until the
326 * current GET pointer. the "- 1" is to ensure there's
327 * space left to emit a jump back to the beginning of the
328 * push buffer if we require it. we can never get GET == PUT
329 * here, so this is safe.
331 chan->dma.free = get - chan->dma.cur - 1;
334 return 0;