drm/i915: drop KM_USER0 argument to k(un)map_atomic
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / gpu / drm / nouveau / nouveau_sgdma.c
blobb75258a9fe44d544521431133ecd64ec0136637d
1 #include "drmP.h"
2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
10 struct nouveau_sgdma_be {
11 struct ttm_backend backend;
12 struct drm_device *dev;
14 dma_addr_t *pages;
15 unsigned nr_pages;
16 bool unmap_pages;
18 u64 offset;
19 bool bound;
22 static int
23 nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
24 struct page **pages, struct page *dummy_read_page,
25 dma_addr_t *dma_addrs)
27 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
28 struct drm_device *dev = nvbe->dev;
29 int i;
31 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
33 nvbe->pages = dma_addrs;
34 nvbe->nr_pages = num_pages;
35 nvbe->unmap_pages = true;
37 /* this code path isn't called and is incorrect anyways */
38 if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
39 nvbe->unmap_pages = false;
40 return 0;
43 for (i = 0; i < num_pages; i++) {
44 nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
45 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
46 if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
47 nvbe->nr_pages = --i;
48 be->func->clear(be);
49 return -EFAULT;
53 return 0;
56 static void
57 nouveau_sgdma_clear(struct ttm_backend *be)
59 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
60 struct drm_device *dev = nvbe->dev;
62 if (nvbe->bound)
63 be->func->unbind(be);
65 if (nvbe->unmap_pages) {
66 while (nvbe->nr_pages--) {
67 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
68 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
73 static void
74 nouveau_sgdma_destroy(struct ttm_backend *be)
76 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
78 if (be) {
79 NV_DEBUG(nvbe->dev, "\n");
81 if (nvbe) {
82 if (nvbe->pages)
83 be->func->clear(be);
84 kfree(nvbe);
89 static int
90 nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
92 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
93 struct drm_device *dev = nvbe->dev;
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
96 unsigned i, j, pte;
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
100 nvbe->offset = mem->start << PAGE_SHIFT;
101 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
102 for (i = 0; i < nvbe->nr_pages; i++) {
103 dma_addr_t dma_offset = nvbe->pages[i];
104 uint32_t offset_l = lower_32_bits(dma_offset);
106 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
107 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
108 offset_l += NV_CTXDMA_PAGE_SIZE;
112 nvbe->bound = true;
113 return 0;
116 static int
117 nv04_sgdma_unbind(struct ttm_backend *be)
119 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
120 struct drm_device *dev = nvbe->dev;
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
123 unsigned i, j, pte;
125 NV_DEBUG(dev, "\n");
127 if (!nvbe->bound)
128 return 0;
130 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
131 for (i = 0; i < nvbe->nr_pages; i++) {
132 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
133 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
136 nvbe->bound = false;
137 return 0;
140 static struct ttm_backend_func nv04_sgdma_backend = {
141 .populate = nouveau_sgdma_populate,
142 .clear = nouveau_sgdma_clear,
143 .bind = nv04_sgdma_bind,
144 .unbind = nv04_sgdma_unbind,
145 .destroy = nouveau_sgdma_destroy
148 static void
149 nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
151 struct drm_device *dev = nvbe->dev;
153 nv_wr32(dev, 0x100810, 0x00000022);
154 if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
155 NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
156 nv_rd32(dev, 0x100810));
157 nv_wr32(dev, 0x100810, 0x00000000);
160 static int
161 nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
163 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
164 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
165 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
166 dma_addr_t *list = nvbe->pages;
167 u32 pte = mem->start << 2;
168 u32 cnt = nvbe->nr_pages;
170 nvbe->offset = mem->start << PAGE_SHIFT;
172 while (cnt--) {
173 nv_wo32(pgt, pte, (*list++ >> 7) | 1);
174 pte += 4;
177 nv41_sgdma_flush(nvbe);
178 nvbe->bound = true;
179 return 0;
182 static int
183 nv41_sgdma_unbind(struct ttm_backend *be)
185 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
186 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
187 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
188 u32 pte = (nvbe->offset >> 12) << 2;
189 u32 cnt = nvbe->nr_pages;
191 while (cnt--) {
192 nv_wo32(pgt, pte, 0x00000000);
193 pte += 4;
196 nv41_sgdma_flush(nvbe);
197 nvbe->bound = false;
198 return 0;
201 static struct ttm_backend_func nv41_sgdma_backend = {
202 .populate = nouveau_sgdma_populate,
203 .clear = nouveau_sgdma_clear,
204 .bind = nv41_sgdma_bind,
205 .unbind = nv41_sgdma_unbind,
206 .destroy = nouveau_sgdma_destroy
209 static void
210 nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
212 struct drm_device *dev = nvbe->dev;
214 nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
215 nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
216 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
217 NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
218 nv_rd32(dev, 0x100808));
219 nv_wr32(dev, 0x100808, 0x00000000);
222 static void
223 nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
225 struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
226 dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
227 u32 pte, tmp[4];
229 pte = base >> 2;
230 base &= ~0x0000000f;
232 tmp[0] = nv_ro32(pgt, base + 0x0);
233 tmp[1] = nv_ro32(pgt, base + 0x4);
234 tmp[2] = nv_ro32(pgt, base + 0x8);
235 tmp[3] = nv_ro32(pgt, base + 0xc);
236 while (cnt--) {
237 u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
238 switch (pte++ & 0x3) {
239 case 0:
240 tmp[0] &= ~0x07ffffff;
241 tmp[0] |= addr;
242 break;
243 case 1:
244 tmp[0] &= ~0xf8000000;
245 tmp[0] |= addr << 27;
246 tmp[1] &= ~0x003fffff;
247 tmp[1] |= addr >> 5;
248 break;
249 case 2:
250 tmp[1] &= ~0xffc00000;
251 tmp[1] |= addr << 22;
252 tmp[2] &= ~0x0001ffff;
253 tmp[2] |= addr >> 10;
254 break;
255 case 3:
256 tmp[2] &= ~0xfffe0000;
257 tmp[2] |= addr << 17;
258 tmp[3] &= ~0x00000fff;
259 tmp[3] |= addr >> 15;
260 break;
264 tmp[3] |= 0x40000000;
266 nv_wo32(pgt, base + 0x0, tmp[0]);
267 nv_wo32(pgt, base + 0x4, tmp[1]);
268 nv_wo32(pgt, base + 0x8, tmp[2]);
269 nv_wo32(pgt, base + 0xc, tmp[3]);
272 static int
273 nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
275 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
276 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
277 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
278 dma_addr_t *list = nvbe->pages;
279 u32 pte = mem->start << 2, tmp[4];
280 u32 cnt = nvbe->nr_pages;
281 int i;
283 nvbe->offset = mem->start << PAGE_SHIFT;
285 if (pte & 0x0000000c) {
286 u32 max = 4 - ((pte >> 2) & 0x3);
287 u32 part = (cnt > max) ? max : cnt;
288 nv44_sgdma_fill(pgt, list, pte, part);
289 pte += (part << 2);
290 list += part;
291 cnt -= part;
294 while (cnt >= 4) {
295 for (i = 0; i < 4; i++)
296 tmp[i] = *list++ >> 12;
297 nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
298 nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
299 nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
300 nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
301 pte += 0x10;
302 cnt -= 4;
305 if (cnt)
306 nv44_sgdma_fill(pgt, list, pte, cnt);
308 nv44_sgdma_flush(nvbe);
309 nvbe->bound = true;
310 return 0;
313 static int
314 nv44_sgdma_unbind(struct ttm_backend *be)
316 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
317 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
318 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
319 u32 pte = (nvbe->offset >> 12) << 2;
320 u32 cnt = nvbe->nr_pages;
322 if (pte & 0x0000000c) {
323 u32 max = 4 - ((pte >> 2) & 0x3);
324 u32 part = (cnt > max) ? max : cnt;
325 nv44_sgdma_fill(pgt, NULL, pte, part);
326 pte += (part << 2);
327 cnt -= part;
330 while (cnt >= 4) {
331 nv_wo32(pgt, pte + 0x0, 0x00000000);
332 nv_wo32(pgt, pte + 0x4, 0x00000000);
333 nv_wo32(pgt, pte + 0x8, 0x00000000);
334 nv_wo32(pgt, pte + 0xc, 0x00000000);
335 pte += 0x10;
336 cnt -= 4;
339 if (cnt)
340 nv44_sgdma_fill(pgt, NULL, pte, cnt);
342 nv44_sgdma_flush(nvbe);
343 nvbe->bound = false;
344 return 0;
347 static struct ttm_backend_func nv44_sgdma_backend = {
348 .populate = nouveau_sgdma_populate,
349 .clear = nouveau_sgdma_clear,
350 .bind = nv44_sgdma_bind,
351 .unbind = nv44_sgdma_unbind,
352 .destroy = nouveau_sgdma_destroy
355 static int
356 nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
358 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
359 struct nouveau_mem *node = mem->mm_node;
360 /* noop: bound in move_notify() */
361 node->pages = nvbe->pages;
362 nvbe->pages = (dma_addr_t *)node;
363 nvbe->bound = true;
364 return 0;
367 static int
368 nv50_sgdma_unbind(struct ttm_backend *be)
370 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
371 struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
372 /* noop: unbound in move_notify() */
373 nvbe->pages = node->pages;
374 node->pages = NULL;
375 nvbe->bound = false;
376 return 0;
379 static struct ttm_backend_func nv50_sgdma_backend = {
380 .populate = nouveau_sgdma_populate,
381 .clear = nouveau_sgdma_clear,
382 .bind = nv50_sgdma_bind,
383 .unbind = nv50_sgdma_unbind,
384 .destroy = nouveau_sgdma_destroy
387 struct ttm_backend *
388 nouveau_sgdma_init_ttm(struct drm_device *dev)
390 struct drm_nouveau_private *dev_priv = dev->dev_private;
391 struct nouveau_sgdma_be *nvbe;
393 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
394 if (!nvbe)
395 return NULL;
397 nvbe->dev = dev;
399 nvbe->backend.func = dev_priv->gart_info.func;
400 return &nvbe->backend;
404 nouveau_sgdma_init(struct drm_device *dev)
406 struct drm_nouveau_private *dev_priv = dev->dev_private;
407 struct nouveau_gpuobj *gpuobj = NULL;
408 u32 aper_size, align;
409 int ret;
411 if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev))
412 aper_size = 512 * 1024 * 1024;
413 else
414 aper_size = 64 * 1024 * 1024;
416 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
417 * christmas. The cards before it have them, the cards after
418 * it have them, why is NV44 so unloved?
420 dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
421 if (!dev_priv->gart_info.dummy.page)
422 return -ENOMEM;
424 dev_priv->gart_info.dummy.addr =
425 pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
426 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
427 if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
428 NV_ERROR(dev, "error mapping dummy page\n");
429 __free_page(dev_priv->gart_info.dummy.page);
430 dev_priv->gart_info.dummy.page = NULL;
431 return -ENOMEM;
434 if (dev_priv->card_type >= NV_50) {
435 dev_priv->gart_info.aper_base = 0;
436 dev_priv->gart_info.aper_size = aper_size;
437 dev_priv->gart_info.type = NOUVEAU_GART_HW;
438 dev_priv->gart_info.func = &nv50_sgdma_backend;
439 } else
440 if (0 && pci_is_pcie(dev->pdev) &&
441 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
442 if (nv44_graph_class(dev)) {
443 dev_priv->gart_info.func = &nv44_sgdma_backend;
444 align = 512 * 1024;
445 } else {
446 dev_priv->gart_info.func = &nv41_sgdma_backend;
447 align = 16;
450 ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
451 NVOBJ_FLAG_ZERO_ALLOC |
452 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
453 if (ret) {
454 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
455 return ret;
458 dev_priv->gart_info.sg_ctxdma = gpuobj;
459 dev_priv->gart_info.aper_base = 0;
460 dev_priv->gart_info.aper_size = aper_size;
461 dev_priv->gart_info.type = NOUVEAU_GART_HW;
462 } else {
463 ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
464 NVOBJ_FLAG_ZERO_ALLOC |
465 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
466 if (ret) {
467 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
468 return ret;
471 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
472 (1 << 12) /* PT present */ |
473 (0 << 13) /* PT *not* linear */ |
474 (0 << 14) /* RW */ |
475 (2 << 16) /* PCI */);
476 nv_wo32(gpuobj, 4, aper_size - 1);
478 dev_priv->gart_info.sg_ctxdma = gpuobj;
479 dev_priv->gart_info.aper_base = 0;
480 dev_priv->gart_info.aper_size = aper_size;
481 dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
482 dev_priv->gart_info.func = &nv04_sgdma_backend;
485 return 0;
488 void
489 nouveau_sgdma_takedown(struct drm_device *dev)
491 struct drm_nouveau_private *dev_priv = dev->dev_private;
493 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
495 if (dev_priv->gart_info.dummy.page) {
496 pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
497 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
498 __free_page(dev_priv->gart_info.dummy.page);
499 dev_priv->gart_info.dummy.page = NULL;
503 uint32_t
504 nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
506 struct drm_nouveau_private *dev_priv = dev->dev_private;
507 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
508 int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
510 BUG_ON(dev_priv->card_type >= NV_50);
512 return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
513 (offset & NV_CTXDMA_PAGE_MASK);