Merge branch 'stable/ttm.pci-api.v5' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / gpu / drm / nouveau / nouveau_sgdma.c
blob07b115184b876f2e2577bba59fbae8c2be31fd80
1 #include "drmP.h"
2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
10 struct nouveau_sgdma_be {
11 struct ttm_backend backend;
12 struct drm_device *dev;
14 dma_addr_t *pages;
15 bool *ttm_alloced;
16 unsigned nr_pages;
18 u64 offset;
19 bool bound;
22 static int
23 nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
24 struct page **pages, struct page *dummy_read_page,
25 dma_addr_t *dma_addrs)
27 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
28 struct drm_device *dev = nvbe->dev;
30 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
32 if (nvbe->pages)
33 return -EINVAL;
35 nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
36 if (!nvbe->pages)
37 return -ENOMEM;
39 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
40 if (!nvbe->ttm_alloced)
41 return -ENOMEM;
43 nvbe->nr_pages = 0;
44 while (num_pages--) {
45 if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
46 nvbe->pages[nvbe->nr_pages] =
47 dma_addrs[nvbe->nr_pages];
48 nvbe->ttm_alloced[nvbe->nr_pages] = true;
49 } else {
50 nvbe->pages[nvbe->nr_pages] =
51 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
52 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
53 if (pci_dma_mapping_error(dev->pdev,
54 nvbe->pages[nvbe->nr_pages])) {
55 be->func->clear(be);
56 return -EFAULT;
60 nvbe->nr_pages++;
63 return 0;
66 static void
67 nouveau_sgdma_clear(struct ttm_backend *be)
69 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
70 struct drm_device *dev;
72 if (nvbe && nvbe->pages) {
73 dev = nvbe->dev;
74 NV_DEBUG(dev, "\n");
76 if (nvbe->bound)
77 be->func->unbind(be);
79 while (nvbe->nr_pages--) {
80 if (!nvbe->ttm_alloced[nvbe->nr_pages])
81 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
82 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
84 kfree(nvbe->pages);
85 kfree(nvbe->ttm_alloced);
86 nvbe->pages = NULL;
87 nvbe->ttm_alloced = NULL;
88 nvbe->nr_pages = 0;
92 static int
93 nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
95 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
96 struct drm_device *dev = nvbe->dev;
97 struct drm_nouveau_private *dev_priv = dev->dev_private;
98 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
99 unsigned i, j, pte;
101 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
103 nvbe->offset = mem->start << PAGE_SHIFT;
104 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
105 for (i = 0; i < nvbe->nr_pages; i++) {
106 dma_addr_t dma_offset = nvbe->pages[i];
107 uint32_t offset_l = lower_32_bits(dma_offset);
109 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
110 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
111 dma_offset += NV_CTXDMA_PAGE_SIZE;
115 nvbe->bound = true;
116 return 0;
119 static int
120 nouveau_sgdma_unbind(struct ttm_backend *be)
122 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
123 struct drm_device *dev = nvbe->dev;
124 struct drm_nouveau_private *dev_priv = dev->dev_private;
125 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
126 unsigned i, j, pte;
128 NV_DEBUG(dev, "\n");
130 if (!nvbe->bound)
131 return 0;
133 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
134 for (i = 0; i < nvbe->nr_pages; i++) {
135 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
136 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
139 nvbe->bound = false;
140 return 0;
143 static void
144 nouveau_sgdma_destroy(struct ttm_backend *be)
146 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
148 if (be) {
149 NV_DEBUG(nvbe->dev, "\n");
151 if (nvbe) {
152 if (nvbe->pages)
153 be->func->clear(be);
154 kfree(nvbe);
159 static int
160 nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
162 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
163 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
165 nvbe->offset = mem->start << PAGE_SHIFT;
167 nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
168 nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
169 nvbe->bound = true;
170 return 0;
173 static int
174 nv50_sgdma_unbind(struct ttm_backend *be)
176 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
177 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
179 if (!nvbe->bound)
180 return 0;
182 nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
183 nvbe->nr_pages << PAGE_SHIFT);
184 nvbe->bound = false;
185 return 0;
188 static struct ttm_backend_func nouveau_sgdma_backend = {
189 .populate = nouveau_sgdma_populate,
190 .clear = nouveau_sgdma_clear,
191 .bind = nouveau_sgdma_bind,
192 .unbind = nouveau_sgdma_unbind,
193 .destroy = nouveau_sgdma_destroy
196 static struct ttm_backend_func nv50_sgdma_backend = {
197 .populate = nouveau_sgdma_populate,
198 .clear = nouveau_sgdma_clear,
199 .bind = nv50_sgdma_bind,
200 .unbind = nv50_sgdma_unbind,
201 .destroy = nouveau_sgdma_destroy
204 struct ttm_backend *
205 nouveau_sgdma_init_ttm(struct drm_device *dev)
207 struct drm_nouveau_private *dev_priv = dev->dev_private;
208 struct nouveau_sgdma_be *nvbe;
210 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
211 if (!nvbe)
212 return NULL;
214 nvbe->dev = dev;
216 if (dev_priv->card_type < NV_50)
217 nvbe->backend.func = &nouveau_sgdma_backend;
218 else
219 nvbe->backend.func = &nv50_sgdma_backend;
220 return &nvbe->backend;
224 nouveau_sgdma_init(struct drm_device *dev)
226 struct drm_nouveau_private *dev_priv = dev->dev_private;
227 struct nouveau_gpuobj *gpuobj = NULL;
228 uint32_t aper_size, obj_size;
229 int i, ret;
231 if (dev_priv->card_type < NV_50) {
232 if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
233 aper_size = 64 * 1024 * 1024;
234 else
235 aper_size = 512 * 1024 * 1024;
237 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
238 obj_size += 8; /* ctxdma header */
240 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
241 NVOBJ_FLAG_ZERO_ALLOC |
242 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
243 if (ret) {
244 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
245 return ret;
248 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
249 (1 << 12) /* PT present */ |
250 (0 << 13) /* PT *not* linear */ |
251 (0 << 14) /* RW */ |
252 (2 << 16) /* PCI */);
253 nv_wo32(gpuobj, 4, aper_size - 1);
254 for (i = 2; i < 2 + (aper_size >> 12); i++)
255 nv_wo32(gpuobj, i * 4, 0x00000000);
257 dev_priv->gart_info.sg_ctxdma = gpuobj;
258 dev_priv->gart_info.aper_base = 0;
259 dev_priv->gart_info.aper_size = aper_size;
260 } else
261 if (dev_priv->chan_vm) {
262 ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
263 12, NV_MEM_ACCESS_RW,
264 &dev_priv->gart_info.vma);
265 if (ret)
266 return ret;
268 dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
269 dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
272 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
273 return 0;
276 void
277 nouveau_sgdma_takedown(struct drm_device *dev)
279 struct drm_nouveau_private *dev_priv = dev->dev_private;
281 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
282 nouveau_vm_put(&dev_priv->gart_info.vma);
285 uint32_t
286 nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
288 struct drm_nouveau_private *dev_priv = dev->dev_private;
289 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
290 int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
292 BUG_ON(dev_priv->card_type >= NV_50);
294 return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
295 (offset & NV_CTXDMA_PAGE_MASK);