2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
10 struct nouveau_sgdma_be
{
11 struct ttm_backend backend
;
12 struct drm_device
*dev
;
23 nouveau_sgdma_populate(struct ttm_backend
*be
, unsigned long num_pages
,
24 struct page
**pages
, struct page
*dummy_read_page
,
25 dma_addr_t
*dma_addrs
)
27 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
28 struct drm_device
*dev
= nvbe
->dev
;
30 NV_DEBUG(nvbe
->dev
, "num_pages = %ld\n", num_pages
);
35 nvbe
->pages
= kmalloc(sizeof(dma_addr_t
) * num_pages
, GFP_KERNEL
);
39 nvbe
->ttm_alloced
= kmalloc(sizeof(bool) * num_pages
, GFP_KERNEL
);
40 if (!nvbe
->ttm_alloced
)
45 if (dma_addrs
[nvbe
->nr_pages
] != DMA_ERROR_CODE
) {
46 nvbe
->pages
[nvbe
->nr_pages
] =
47 dma_addrs
[nvbe
->nr_pages
];
48 nvbe
->ttm_alloced
[nvbe
->nr_pages
] = true;
50 nvbe
->pages
[nvbe
->nr_pages
] =
51 pci_map_page(dev
->pdev
, pages
[nvbe
->nr_pages
], 0,
52 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
53 if (pci_dma_mapping_error(dev
->pdev
,
54 nvbe
->pages
[nvbe
->nr_pages
])) {
67 nouveau_sgdma_clear(struct ttm_backend
*be
)
69 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
70 struct drm_device
*dev
;
72 if (nvbe
&& nvbe
->pages
) {
79 while (nvbe
->nr_pages
--) {
80 if (!nvbe
->ttm_alloced
[nvbe
->nr_pages
])
81 pci_unmap_page(dev
->pdev
, nvbe
->pages
[nvbe
->nr_pages
],
82 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
85 kfree(nvbe
->ttm_alloced
);
87 nvbe
->ttm_alloced
= NULL
;
93 nouveau_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
95 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
96 struct drm_device
*dev
= nvbe
->dev
;
97 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
98 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
101 NV_DEBUG(dev
, "pg=0x%lx\n", mem
->start
);
103 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
104 pte
= (nvbe
->offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
105 for (i
= 0; i
< nvbe
->nr_pages
; i
++) {
106 dma_addr_t dma_offset
= nvbe
->pages
[i
];
107 uint32_t offset_l
= lower_32_bits(dma_offset
);
109 for (j
= 0; j
< PAGE_SIZE
/ NV_CTXDMA_PAGE_SIZE
; j
++, pte
++) {
110 nv_wo32(gpuobj
, (pte
* 4) + 0, offset_l
| 3);
111 dma_offset
+= NV_CTXDMA_PAGE_SIZE
;
120 nouveau_sgdma_unbind(struct ttm_backend
*be
)
122 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
123 struct drm_device
*dev
= nvbe
->dev
;
124 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
125 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
133 pte
= (nvbe
->offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
134 for (i
= 0; i
< nvbe
->nr_pages
; i
++) {
135 for (j
= 0; j
< PAGE_SIZE
/ NV_CTXDMA_PAGE_SIZE
; j
++, pte
++)
136 nv_wo32(gpuobj
, (pte
* 4) + 0, 0x00000000);
144 nouveau_sgdma_destroy(struct ttm_backend
*be
)
146 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
149 NV_DEBUG(nvbe
->dev
, "\n");
160 nv50_sgdma_bind(struct ttm_backend
*be
, struct ttm_mem_reg
*mem
)
162 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
163 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
165 nvbe
->offset
= mem
->start
<< PAGE_SHIFT
;
167 nouveau_vm_map_sg(&dev_priv
->gart_info
.vma
, nvbe
->offset
,
168 nvbe
->nr_pages
<< PAGE_SHIFT
, nvbe
->pages
);
174 nv50_sgdma_unbind(struct ttm_backend
*be
)
176 struct nouveau_sgdma_be
*nvbe
= (struct nouveau_sgdma_be
*)be
;
177 struct drm_nouveau_private
*dev_priv
= nvbe
->dev
->dev_private
;
182 nouveau_vm_unmap_at(&dev_priv
->gart_info
.vma
, nvbe
->offset
,
183 nvbe
->nr_pages
<< PAGE_SHIFT
);
188 static struct ttm_backend_func nouveau_sgdma_backend
= {
189 .populate
= nouveau_sgdma_populate
,
190 .clear
= nouveau_sgdma_clear
,
191 .bind
= nouveau_sgdma_bind
,
192 .unbind
= nouveau_sgdma_unbind
,
193 .destroy
= nouveau_sgdma_destroy
196 static struct ttm_backend_func nv50_sgdma_backend
= {
197 .populate
= nouveau_sgdma_populate
,
198 .clear
= nouveau_sgdma_clear
,
199 .bind
= nv50_sgdma_bind
,
200 .unbind
= nv50_sgdma_unbind
,
201 .destroy
= nouveau_sgdma_destroy
205 nouveau_sgdma_init_ttm(struct drm_device
*dev
)
207 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
208 struct nouveau_sgdma_be
*nvbe
;
210 nvbe
= kzalloc(sizeof(*nvbe
), GFP_KERNEL
);
216 if (dev_priv
->card_type
< NV_50
)
217 nvbe
->backend
.func
= &nouveau_sgdma_backend
;
219 nvbe
->backend
.func
= &nv50_sgdma_backend
;
220 return &nvbe
->backend
;
224 nouveau_sgdma_init(struct drm_device
*dev
)
226 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
227 struct nouveau_gpuobj
*gpuobj
= NULL
;
228 uint32_t aper_size
, obj_size
;
231 if (dev_priv
->card_type
< NV_50
) {
232 if(dev_priv
->ramin_rsvd_vram
< 2 * 1024 * 1024)
233 aper_size
= 64 * 1024 * 1024;
235 aper_size
= 512 * 1024 * 1024;
237 obj_size
= (aper_size
>> NV_CTXDMA_PAGE_SHIFT
) * 4;
238 obj_size
+= 8; /* ctxdma header */
240 ret
= nouveau_gpuobj_new(dev
, NULL
, obj_size
, 16,
241 NVOBJ_FLAG_ZERO_ALLOC
|
242 NVOBJ_FLAG_ZERO_FREE
, &gpuobj
);
244 NV_ERROR(dev
, "Error creating sgdma object: %d\n", ret
);
248 nv_wo32(gpuobj
, 0, NV_CLASS_DMA_IN_MEMORY
|
249 (1 << 12) /* PT present */ |
250 (0 << 13) /* PT *not* linear */ |
252 (2 << 16) /* PCI */);
253 nv_wo32(gpuobj
, 4, aper_size
- 1);
254 for (i
= 2; i
< 2 + (aper_size
>> 12); i
++)
255 nv_wo32(gpuobj
, i
* 4, 0x00000000);
257 dev_priv
->gart_info
.sg_ctxdma
= gpuobj
;
258 dev_priv
->gart_info
.aper_base
= 0;
259 dev_priv
->gart_info
.aper_size
= aper_size
;
261 if (dev_priv
->chan_vm
) {
262 ret
= nouveau_vm_get(dev_priv
->chan_vm
, 512 * 1024 * 1024,
263 12, NV_MEM_ACCESS_RW
,
264 &dev_priv
->gart_info
.vma
);
268 dev_priv
->gart_info
.aper_base
= dev_priv
->gart_info
.vma
.offset
;
269 dev_priv
->gart_info
.aper_size
= 512 * 1024 * 1024;
272 dev_priv
->gart_info
.type
= NOUVEAU_GART_SGDMA
;
277 nouveau_sgdma_takedown(struct drm_device
*dev
)
279 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
281 nouveau_gpuobj_ref(NULL
, &dev_priv
->gart_info
.sg_ctxdma
);
282 nouveau_vm_put(&dev_priv
->gart_info
.vma
);
286 nouveau_sgdma_get_physical(struct drm_device
*dev
, uint32_t offset
)
288 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
289 struct nouveau_gpuobj
*gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
290 int pte
= (offset
>> NV_CTXDMA_PAGE_SHIFT
) + 2;
292 BUG_ON(dev_priv
->card_type
>= NV_50
);
294 return (nv_ro32(gpuobj
, 4 * pte
) & ~NV_CTXDMA_PAGE_MASK
) |
295 (offset
& NV_CTXDMA_PAGE_MASK
);