2 * Copyright (C) 2006 Ben Skeggs.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 * Ben Skeggs <darktama@iinet.net.au>
35 #include "nouveau_drv.h"
36 #include "nouveau_drm.h"
38 /* NVidia uses context objects to drive drawing operations.
40 Context objects can be selected into 8 subchannels in the FIFO,
41 and then used via DMA command buffers.
43 A context object is referenced by a user defined handle (CARD32). The HW
44 looks up graphics objects in a hash table in the instance RAM.
46 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
47 the handle, the second one a bitfield, that contains the address of the
48 object in instance RAM.
50 The format of the second CARD32 seems to be:
54 15: 0 instance_addr >> 4
55 17:16 engine (here uses 1 = graphics)
56 28:24 channel id (here uses 0)
61 15: 0 instance_addr >> 4 (maybe 19-0)
62 21:20 engine (here uses 1 = graphics)
63 I'm unsure about the other bits, but using 0 seems to work.
65 The key into the hash table depends on the object handle and channel id and
69 nouveau_ramht_hash_handle(struct drm_device
*dev
, int channel
, uint32_t handle
)
71 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
75 NV_DEBUG(dev
, "ch%d handle=0x%08x\n", channel
, handle
);
77 for (i
= 32; i
> 0; i
-= dev_priv
->ramht_bits
) {
78 hash
^= (handle
& ((1 << dev_priv
->ramht_bits
) - 1));
79 handle
>>= dev_priv
->ramht_bits
;
82 if (dev_priv
->card_type
< NV_50
)
83 hash
^= channel
<< (dev_priv
->ramht_bits
- 4);
86 NV_DEBUG(dev
, "hash=0x%08x\n", hash
);
91 nouveau_ramht_entry_valid(struct drm_device
*dev
, struct nouveau_gpuobj
*ramht
,
94 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
95 uint32_t ctx
= nv_ro32(dev
, ramht
, (offset
+ 4)/4);
97 if (dev_priv
->card_type
< NV_40
)
98 return ((ctx
& NV_RAMHT_CONTEXT_VALID
) != 0);
103 nouveau_ramht_insert(struct drm_device
*dev
, struct nouveau_gpuobj_ref
*ref
)
105 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
106 struct nouveau_instmem_engine
*instmem
= &dev_priv
->engine
.instmem
;
107 struct nouveau_channel
*chan
= ref
->channel
;
108 struct nouveau_gpuobj
*ramht
= chan
->ramht
? chan
->ramht
->gpuobj
: NULL
;
109 uint32_t ctx
, co
, ho
;
112 NV_ERROR(dev
, "No hash table!\n");
116 if (dev_priv
->card_type
< NV_40
) {
117 ctx
= NV_RAMHT_CONTEXT_VALID
| (ref
->instance
>> 4) |
118 (chan
->id
<< NV_RAMHT_CONTEXT_CHANNEL_SHIFT
) |
119 (ref
->gpuobj
->engine
<< NV_RAMHT_CONTEXT_ENGINE_SHIFT
);
121 if (dev_priv
->card_type
< NV_50
) {
122 ctx
= (ref
->instance
>> 4) |
123 (chan
->id
<< NV40_RAMHT_CONTEXT_CHANNEL_SHIFT
) |
124 (ref
->gpuobj
->engine
<< NV40_RAMHT_CONTEXT_ENGINE_SHIFT
);
126 if (ref
->gpuobj
->engine
== NVOBJ_ENGINE_DISPLAY
) {
127 ctx
= (ref
->instance
<< 10) | 2;
129 ctx
= (ref
->instance
>> 4) |
130 ((ref
->gpuobj
->engine
<<
131 NV40_RAMHT_CONTEXT_ENGINE_SHIFT
));
135 instmem
->prepare_access(dev
, true);
136 co
= ho
= nouveau_ramht_hash_handle(dev
, chan
->id
, ref
->handle
);
138 if (!nouveau_ramht_entry_valid(dev
, ramht
, co
)) {
140 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
141 chan
->id
, co
, ref
->handle
, ctx
);
142 nv_wo32(dev
, ramht
, (co
+ 0)/4, ref
->handle
);
143 nv_wo32(dev
, ramht
, (co
+ 4)/4, ctx
);
145 list_add_tail(&ref
->list
, &chan
->ramht_refs
);
146 instmem
->finish_access(dev
);
149 NV_DEBUG(dev
, "collision ch%d 0x%08x: h=0x%08x\n",
150 chan
->id
, co
, nv_ro32(dev
, ramht
, co
/4));
153 if (co
>= dev_priv
->ramht_size
)
156 instmem
->finish_access(dev
);
158 NV_ERROR(dev
, "RAMHT space exhausted. ch=%d\n", chan
->id
);
163 nouveau_ramht_remove(struct drm_device
*dev
, struct nouveau_gpuobj_ref
*ref
)
165 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
166 struct nouveau_instmem_engine
*instmem
= &dev_priv
->engine
.instmem
;
167 struct nouveau_channel
*chan
= ref
->channel
;
168 struct nouveau_gpuobj
*ramht
= chan
->ramht
? chan
->ramht
->gpuobj
: NULL
;
172 NV_ERROR(dev
, "No hash table!\n");
176 instmem
->prepare_access(dev
, true);
177 co
= ho
= nouveau_ramht_hash_handle(dev
, chan
->id
, ref
->handle
);
179 if (nouveau_ramht_entry_valid(dev
, ramht
, co
) &&
180 (ref
->handle
== nv_ro32(dev
, ramht
, (co
/4)))) {
182 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
183 chan
->id
, co
, ref
->handle
,
184 nv_ro32(dev
, ramht
, (co
+ 4)));
185 nv_wo32(dev
, ramht
, (co
+ 0)/4, 0x00000000);
186 nv_wo32(dev
, ramht
, (co
+ 4)/4, 0x00000000);
188 list_del(&ref
->list
);
189 instmem
->finish_access(dev
);
194 if (co
>= dev_priv
->ramht_size
)
197 list_del(&ref
->list
);
198 instmem
->finish_access(dev
);
200 NV_ERROR(dev
, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
201 chan
->id
, ref
->handle
);
205 nouveau_gpuobj_new(struct drm_device
*dev
, struct nouveau_channel
*chan
,
206 uint32_t size
, int align
, uint32_t flags
,
207 struct nouveau_gpuobj
**gpuobj_ret
)
209 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
210 struct nouveau_engine
*engine
= &dev_priv
->engine
;
211 struct nouveau_gpuobj
*gpuobj
;
212 struct mem_block
*pramin
= NULL
;
215 NV_DEBUG(dev
, "ch%d size=%u align=%d flags=0x%08x\n",
216 chan
? chan
->id
: -1, size
, align
, flags
);
218 if (!dev_priv
|| !gpuobj_ret
|| *gpuobj_ret
!= NULL
)
221 gpuobj
= kzalloc(sizeof(*gpuobj
), GFP_KERNEL
);
224 NV_DEBUG(dev
, "gpuobj %p\n", gpuobj
);
225 gpuobj
->flags
= flags
;
226 gpuobj
->im_channel
= chan
;
228 list_add_tail(&gpuobj
->list
, &dev_priv
->gpuobj_list
);
230 /* Choose between global instmem heap, and per-channel private
231 * instmem heap. On <NV50 allow requests for private instmem
232 * to be satisfied from global heap if no per-channel area
236 if (chan
->ramin_heap
) {
237 NV_DEBUG(dev
, "private heap\n");
238 pramin
= chan
->ramin_heap
;
240 if (dev_priv
->card_type
< NV_50
) {
241 NV_DEBUG(dev
, "global heap fallback\n");
242 pramin
= dev_priv
->ramin_heap
;
245 NV_DEBUG(dev
, "global heap\n");
246 pramin
= dev_priv
->ramin_heap
;
250 NV_ERROR(dev
, "No PRAMIN heap!\n");
255 ret
= engine
->instmem
.populate(dev
, gpuobj
, &size
);
257 nouveau_gpuobj_del(dev
, &gpuobj
);
262 /* Allocate a chunk of the PRAMIN aperture */
263 gpuobj
->im_pramin
= nouveau_mem_alloc_block(pramin
, size
,
265 (struct drm_file
*)-2, 0);
266 if (!gpuobj
->im_pramin
) {
267 nouveau_gpuobj_del(dev
, &gpuobj
);
272 ret
= engine
->instmem
.bind(dev
, gpuobj
);
274 nouveau_gpuobj_del(dev
, &gpuobj
);
279 if (gpuobj
->flags
& NVOBJ_FLAG_ZERO_ALLOC
) {
282 engine
->instmem
.prepare_access(dev
, true);
283 for (i
= 0; i
< gpuobj
->im_pramin
->size
; i
+= 4)
284 nv_wo32(dev
, gpuobj
, i
/4, 0);
285 engine
->instmem
.finish_access(dev
);
288 *gpuobj_ret
= gpuobj
;
293 nouveau_gpuobj_early_init(struct drm_device
*dev
)
295 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
299 INIT_LIST_HEAD(&dev_priv
->gpuobj_list
);
305 nouveau_gpuobj_init(struct drm_device
*dev
)
307 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
312 if (dev_priv
->card_type
< NV_50
) {
313 ret
= nouveau_gpuobj_new_fake(dev
,
314 dev_priv
->ramht_offset
, ~0, dev_priv
->ramht_size
,
315 NVOBJ_FLAG_ZERO_ALLOC
| NVOBJ_FLAG_ALLOW_NO_REFS
,
316 &dev_priv
->ramht
, NULL
);
325 nouveau_gpuobj_takedown(struct drm_device
*dev
)
327 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
331 nouveau_gpuobj_del(dev
, &dev_priv
->ramht
);
335 nouveau_gpuobj_late_takedown(struct drm_device
*dev
)
337 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
338 struct nouveau_gpuobj
*gpuobj
= NULL
;
339 struct list_head
*entry
, *tmp
;
343 list_for_each_safe(entry
, tmp
, &dev_priv
->gpuobj_list
) {
344 gpuobj
= list_entry(entry
, struct nouveau_gpuobj
, list
);
346 NV_ERROR(dev
, "gpuobj %p still exists at takedown, refs=%d\n",
347 gpuobj
, gpuobj
->refcount
);
348 gpuobj
->refcount
= 0;
349 nouveau_gpuobj_del(dev
, &gpuobj
);
354 nouveau_gpuobj_del(struct drm_device
*dev
, struct nouveau_gpuobj
**pgpuobj
)
356 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
357 struct nouveau_engine
*engine
= &dev_priv
->engine
;
358 struct nouveau_gpuobj
*gpuobj
;
361 NV_DEBUG(dev
, "gpuobj %p\n", pgpuobj
? *pgpuobj
: NULL
);
363 if (!dev_priv
|| !pgpuobj
|| !(*pgpuobj
))
367 if (gpuobj
->refcount
!= 0) {
368 NV_ERROR(dev
, "gpuobj refcount is %d\n", gpuobj
->refcount
);
372 if (gpuobj
->im_pramin
&& (gpuobj
->flags
& NVOBJ_FLAG_ZERO_FREE
)) {
373 engine
->instmem
.prepare_access(dev
, true);
374 for (i
= 0; i
< gpuobj
->im_pramin
->size
; i
+= 4)
375 nv_wo32(dev
, gpuobj
, i
/4, 0);
376 engine
->instmem
.finish_access(dev
);
380 gpuobj
->dtor(dev
, gpuobj
);
382 if (gpuobj
->im_backing
&& !(gpuobj
->flags
& NVOBJ_FLAG_FAKE
))
383 engine
->instmem
.clear(dev
, gpuobj
);
385 if (gpuobj
->im_pramin
) {
386 if (gpuobj
->flags
& NVOBJ_FLAG_FAKE
)
387 kfree(gpuobj
->im_pramin
);
389 nouveau_mem_free_block(gpuobj
->im_pramin
);
392 list_del(&gpuobj
->list
);
400 nouveau_gpuobj_instance_get(struct drm_device
*dev
,
401 struct nouveau_channel
*chan
,
402 struct nouveau_gpuobj
*gpuobj
, uint32_t *inst
)
404 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
405 struct nouveau_gpuobj
*cpramin
;
407 /* <NV50 use PRAMIN address everywhere */
408 if (dev_priv
->card_type
< NV_50
) {
409 *inst
= gpuobj
->im_pramin
->start
;
413 if (chan
&& gpuobj
->im_channel
!= chan
) {
414 NV_ERROR(dev
, "Channel mismatch: obj %d, ref %d\n",
415 gpuobj
->im_channel
->id
, chan
->id
);
419 /* NV50 channel-local instance */
421 cpramin
= chan
->ramin
->gpuobj
;
422 *inst
= gpuobj
->im_pramin
->start
- cpramin
->im_pramin
->start
;
426 /* NV50 global (VRAM) instance */
427 if (!gpuobj
->im_channel
) {
428 /* ...from global heap */
429 if (!gpuobj
->im_backing
) {
430 NV_ERROR(dev
, "AII, no VRAM backing gpuobj\n");
433 *inst
= gpuobj
->im_backing_start
;
436 /* ...from local heap */
437 cpramin
= gpuobj
->im_channel
->ramin
->gpuobj
;
438 *inst
= cpramin
->im_backing_start
+
439 (gpuobj
->im_pramin
->start
- cpramin
->im_pramin
->start
);
447 nouveau_gpuobj_ref_add(struct drm_device
*dev
, struct nouveau_channel
*chan
,
448 uint32_t handle
, struct nouveau_gpuobj
*gpuobj
,
449 struct nouveau_gpuobj_ref
**ref_ret
)
451 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
452 struct nouveau_gpuobj_ref
*ref
;
456 NV_DEBUG(dev
, "ch%d h=0x%08x gpuobj=%p\n",
457 chan
? chan
->id
: -1, handle
, gpuobj
);
459 if (!dev_priv
|| !gpuobj
|| (ref_ret
&& *ref_ret
!= NULL
))
462 if (!chan
&& !ref_ret
)
465 if (gpuobj
->engine
== NVOBJ_ENGINE_SW
&& !gpuobj
->im_pramin
) {
469 ret
= nouveau_gpuobj_instance_get(dev
, chan
, gpuobj
, &instance
);
474 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
477 INIT_LIST_HEAD(&ref
->list
);
478 ref
->gpuobj
= gpuobj
;
480 ref
->instance
= instance
;
483 ref
->handle
= handle
;
485 ret
= nouveau_ramht_insert(dev
, ref
);
495 ref
->gpuobj
->refcount
++;
499 int nouveau_gpuobj_ref_del(struct drm_device
*dev
, struct nouveau_gpuobj_ref
**pref
)
501 struct nouveau_gpuobj_ref
*ref
;
503 NV_DEBUG(dev
, "ref %p\n", pref
? *pref
: NULL
);
505 if (!dev
|| !pref
|| *pref
== NULL
)
509 if (ref
->handle
!= ~0)
510 nouveau_ramht_remove(dev
, ref
);
513 ref
->gpuobj
->refcount
--;
515 if (ref
->gpuobj
->refcount
== 0) {
516 if (!(ref
->gpuobj
->flags
& NVOBJ_FLAG_ALLOW_NO_REFS
))
517 nouveau_gpuobj_del(dev
, &ref
->gpuobj
);
527 nouveau_gpuobj_new_ref(struct drm_device
*dev
,
528 struct nouveau_channel
*oc
, struct nouveau_channel
*rc
,
529 uint32_t handle
, uint32_t size
, int align
,
530 uint32_t flags
, struct nouveau_gpuobj_ref
**ref
)
532 struct nouveau_gpuobj
*gpuobj
= NULL
;
535 ret
= nouveau_gpuobj_new(dev
, oc
, size
, align
, flags
, &gpuobj
);
539 ret
= nouveau_gpuobj_ref_add(dev
, rc
, handle
, gpuobj
, ref
);
541 nouveau_gpuobj_del(dev
, &gpuobj
);
549 nouveau_gpuobj_ref_find(struct nouveau_channel
*chan
, uint32_t handle
,
550 struct nouveau_gpuobj_ref
**ref_ret
)
552 struct nouveau_gpuobj_ref
*ref
;
553 struct list_head
*entry
, *tmp
;
555 list_for_each_safe(entry
, tmp
, &chan
->ramht_refs
) {
556 ref
= list_entry(entry
, struct nouveau_gpuobj_ref
, list
);
558 if (ref
->handle
== handle
) {
569 nouveau_gpuobj_new_fake(struct drm_device
*dev
, uint32_t p_offset
,
570 uint32_t b_offset
, uint32_t size
,
571 uint32_t flags
, struct nouveau_gpuobj
**pgpuobj
,
572 struct nouveau_gpuobj_ref
**pref
)
574 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
575 struct nouveau_gpuobj
*gpuobj
= NULL
;
579 "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
580 p_offset
, b_offset
, size
, flags
);
582 gpuobj
= kzalloc(sizeof(*gpuobj
), GFP_KERNEL
);
585 NV_DEBUG(dev
, "gpuobj %p\n", gpuobj
);
586 gpuobj
->im_channel
= NULL
;
587 gpuobj
->flags
= flags
| NVOBJ_FLAG_FAKE
;
589 list_add_tail(&gpuobj
->list
, &dev_priv
->gpuobj_list
);
591 if (p_offset
!= ~0) {
592 gpuobj
->im_pramin
= kzalloc(sizeof(struct mem_block
),
594 if (!gpuobj
->im_pramin
) {
595 nouveau_gpuobj_del(dev
, &gpuobj
);
598 gpuobj
->im_pramin
->start
= p_offset
;
599 gpuobj
->im_pramin
->size
= size
;
602 if (b_offset
!= ~0) {
603 gpuobj
->im_backing
= (struct nouveau_bo
*)-1;
604 gpuobj
->im_backing_start
= b_offset
;
607 if (gpuobj
->flags
& NVOBJ_FLAG_ZERO_ALLOC
) {
608 dev_priv
->engine
.instmem
.prepare_access(dev
, true);
609 for (i
= 0; i
< gpuobj
->im_pramin
->size
; i
+= 4)
610 nv_wo32(dev
, gpuobj
, i
/4, 0);
611 dev_priv
->engine
.instmem
.finish_access(dev
);
615 i
= nouveau_gpuobj_ref_add(dev
, NULL
, 0, gpuobj
, pref
);
617 nouveau_gpuobj_del(dev
, &gpuobj
);
629 nouveau_gpuobj_class_instmem_size(struct drm_device
*dev
, int class)
631 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
633 /*XXX: dodgy hack for now */
634 if (dev_priv
->card_type
>= NV_50
)
636 if (dev_priv
->card_type
>= NV_40
)
642 DMA objects are used to reference a piece of memory in the
643 framebuffer, PCI or AGP address space. Each object is 16 bytes big
644 and looks as follows:
647 11:0 class (seems like I can always use 0 here)
648 12 page table present?
649 13 page entry linear?
650 15:14 access: 0 rw, 1 ro, 2 wo
651 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
652 31:20 dma adjust (bits 0-11 of the address)
654 dma limit (size of transfer)
656 1 0 readonly, 1 readwrite
657 31:12 dma frame address of the page (bits 12-31 of the address)
659 page table terminator, same value as the first pte, as does nvidia
660 rivatv uses 0xffffffff
662 Non linear page tables need a list of frame addresses afterwards,
663 the rivatv project has some info on this.
665 The method below creates a DMA object in instance RAM and returns a handle
666 to it that can be used to set up context objects.
669 nouveau_gpuobj_dma_new(struct nouveau_channel
*chan
, int class,
670 uint64_t offset
, uint64_t size
, int access
,
671 int target
, struct nouveau_gpuobj
**gpuobj
)
673 struct drm_device
*dev
= chan
->dev
;
674 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
675 struct nouveau_instmem_engine
*instmem
= &dev_priv
->engine
.instmem
;
678 NV_DEBUG(dev
, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
679 chan
->id
, class, offset
, size
);
680 NV_DEBUG(dev
, "access=%d target=%d\n", access
, target
);
683 case NV_DMA_TARGET_AGP
:
684 offset
+= dev_priv
->gart_info
.aper_base
;
690 ret
= nouveau_gpuobj_new(dev
, chan
,
691 nouveau_gpuobj_class_instmem_size(dev
, class),
692 16, NVOBJ_FLAG_ZERO_ALLOC
|
693 NVOBJ_FLAG_ZERO_FREE
, gpuobj
);
695 NV_ERROR(dev
, "Error creating gpuobj: %d\n", ret
);
699 instmem
->prepare_access(dev
, true);
701 if (dev_priv
->card_type
< NV_50
) {
702 uint32_t frame
, adjust
, pte_flags
= 0;
704 if (access
!= NV_DMA_ACCESS_RO
)
706 adjust
= offset
& 0x00000fff;
707 frame
= offset
& ~0x00000fff;
709 nv_wo32(dev
, *gpuobj
, 0, ((1<<12) | (1<<13) |
714 nv_wo32(dev
, *gpuobj
, 1, size
- 1);
715 nv_wo32(dev
, *gpuobj
, 2, frame
| pte_flags
);
716 nv_wo32(dev
, *gpuobj
, 3, frame
| pte_flags
);
718 uint64_t limit
= offset
+ size
- 1;
719 uint32_t flags0
, flags5
;
721 if (target
== NV_DMA_TARGET_VIDMEM
) {
729 nv_wo32(dev
, *gpuobj
, 0, flags0
| class);
730 nv_wo32(dev
, *gpuobj
, 1, lower_32_bits(limit
));
731 nv_wo32(dev
, *gpuobj
, 2, lower_32_bits(offset
));
732 nv_wo32(dev
, *gpuobj
, 3, ((upper_32_bits(limit
) & 0xff) << 24) |
733 (upper_32_bits(offset
) & 0xff));
734 nv_wo32(dev
, *gpuobj
, 5, flags5
);
737 instmem
->finish_access(dev
);
739 (*gpuobj
)->engine
= NVOBJ_ENGINE_SW
;
740 (*gpuobj
)->class = class;
745 nouveau_gpuobj_gart_dma_new(struct nouveau_channel
*chan
,
746 uint64_t offset
, uint64_t size
, int access
,
747 struct nouveau_gpuobj
**gpuobj
,
750 struct drm_device
*dev
= chan
->dev
;
751 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
754 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_AGP
||
755 (dev_priv
->card_type
>= NV_50
&&
756 dev_priv
->gart_info
.type
== NOUVEAU_GART_SGDMA
)) {
757 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
758 offset
+ dev_priv
->vm_gart_base
,
759 size
, access
, NV_DMA_TARGET_AGP
,
764 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_SGDMA
) {
765 *gpuobj
= dev_priv
->gart_info
.sg_ctxdma
;
766 if (offset
& ~0xffffffffULL
) {
767 NV_ERROR(dev
, "obj offset exceeds 32-bits\n");
771 *o_ret
= (uint32_t)offset
;
772 ret
= (*gpuobj
!= NULL
) ? 0 : -EINVAL
;
774 NV_ERROR(dev
, "Invalid GART type %d\n", dev_priv
->gart_info
.type
);
781 /* Context objects in the instance RAM have the following structure.
782 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
792 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
793 18 synchronize enable
794 19 endian: 1 big, 0 little
796 23 single step enable
797 24 patch status: 0 invalid, 1 valid
798 25 context_surface 0: 1 valid
799 26 context surface 1: 1 valid
800 27 context pattern: 1 valid
801 28 context rop: 1 valid
802 29,30 context beta, beta4
806 31:16 notify instance address
808 15:0 dma 0 instance address
809 31:16 dma 1 instance address
814 No idea what the exact format is. Here's what can be deducted:
817 11:0 class (maybe uses more bits here?)
820 25 patch status valid ?
822 15:0 DMA notifier (maybe 20:0)
824 15:0 DMA 0 instance (maybe 20:0)
827 15:0 DMA 1 instance (maybe 20:0)
833 nouveau_gpuobj_gr_new(struct nouveau_channel
*chan
, int class,
834 struct nouveau_gpuobj
**gpuobj
)
836 struct drm_device
*dev
= chan
->dev
;
837 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
840 NV_DEBUG(dev
, "ch%d class=0x%04x\n", chan
->id
, class);
842 ret
= nouveau_gpuobj_new(dev
, chan
,
843 nouveau_gpuobj_class_instmem_size(dev
, class),
845 NVOBJ_FLAG_ZERO_ALLOC
| NVOBJ_FLAG_ZERO_FREE
,
848 NV_ERROR(dev
, "Error creating gpuobj: %d\n", ret
);
852 dev_priv
->engine
.instmem
.prepare_access(dev
, true);
853 if (dev_priv
->card_type
>= NV_50
) {
854 nv_wo32(dev
, *gpuobj
, 0, class);
855 nv_wo32(dev
, *gpuobj
, 5, 0x00010000);
859 nv_wo32(dev
, *gpuobj
, 0, 0x00001030);
860 nv_wo32(dev
, *gpuobj
, 1, 0xFFFFFFFF);
863 if (dev_priv
->card_type
>= NV_40
) {
864 nv_wo32(dev
, *gpuobj
, 0, class);
866 nv_wo32(dev
, *gpuobj
, 2, 0x01000000);
870 nv_wo32(dev
, *gpuobj
, 0, class | 0x00080000);
872 nv_wo32(dev
, *gpuobj
, 0, class);
877 dev_priv
->engine
.instmem
.finish_access(dev
);
879 (*gpuobj
)->engine
= NVOBJ_ENGINE_GR
;
880 (*gpuobj
)->class = class;
885 nouveau_gpuobj_sw_new(struct nouveau_channel
*chan
, int class,
886 struct nouveau_gpuobj
**gpuobj_ret
)
888 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
889 struct nouveau_gpuobj
*gpuobj
;
891 if (!chan
|| !gpuobj_ret
|| *gpuobj_ret
!= NULL
)
894 gpuobj
= kzalloc(sizeof(*gpuobj
), GFP_KERNEL
);
897 gpuobj
->engine
= NVOBJ_ENGINE_SW
;
898 gpuobj
->class = class;
900 list_add_tail(&gpuobj
->list
, &dev_priv
->gpuobj_list
);
901 *gpuobj_ret
= gpuobj
;
906 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel
*chan
)
908 struct drm_device
*dev
= chan
->dev
;
909 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
910 struct nouveau_gpuobj
*pramin
= NULL
;
915 NV_DEBUG(dev
, "ch%d\n", chan
->id
);
917 /* Base amount for object storage (4KiB enough?) */
923 if (dev_priv
->card_type
== NV_50
) {
924 /* Various fixed table thingos */
925 size
+= 0x1400; /* mostly unknown stuff */
926 size
+= 0x4000; /* vm pd */
928 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
936 NV_DEBUG(dev
, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
937 chan
->id
, size
, base
);
938 ret
= nouveau_gpuobj_new_ref(dev
, NULL
, NULL
, 0, size
, 0x1000, 0,
941 NV_ERROR(dev
, "Error allocating channel PRAMIN: %d\n", ret
);
944 pramin
= chan
->ramin
->gpuobj
;
946 ret
= nouveau_mem_init_heap(&chan
->ramin_heap
,
947 pramin
->im_pramin
->start
+ base
, size
);
949 NV_ERROR(dev
, "Error creating PRAMIN heap: %d\n", ret
);
950 nouveau_gpuobj_ref_del(dev
, &chan
->ramin
);
958 nouveau_gpuobj_channel_init(struct nouveau_channel
*chan
,
959 uint32_t vram_h
, uint32_t tt_h
)
961 struct drm_device
*dev
= chan
->dev
;
962 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
963 struct nouveau_instmem_engine
*instmem
= &dev_priv
->engine
.instmem
;
964 struct nouveau_gpuobj
*vram
= NULL
, *tt
= NULL
;
967 INIT_LIST_HEAD(&chan
->ramht_refs
);
969 NV_DEBUG(dev
, "ch%d vram=0x%08x tt=0x%08x\n", chan
->id
, vram_h
, tt_h
);
971 /* Reserve a block of PRAMIN for the channel
972 *XXX: maybe on <NV50 too at some point
974 if (0 || dev_priv
->card_type
== NV_50
) {
975 ret
= nouveau_gpuobj_channel_init_pramin(chan
);
977 NV_ERROR(dev
, "init pramin\n");
983 * - Allocate per-channel page-directory
984 * - Map GART and VRAM into the channel's address space at the
985 * locations determined during init.
987 if (dev_priv
->card_type
>= NV_50
) {
988 uint32_t vm_offset
, pde
;
990 instmem
->prepare_access(dev
, true);
992 vm_offset
= (dev_priv
->chipset
& 0xf0) == 0x50 ? 0x1400 : 0x200;
993 vm_offset
+= chan
->ramin
->gpuobj
->im_pramin
->start
;
995 ret
= nouveau_gpuobj_new_fake(dev
, vm_offset
, ~0, 0x4000,
996 0, &chan
->vm_pd
, NULL
);
998 instmem
->finish_access(dev
);
1001 for (i
= 0; i
< 0x4000; i
+= 8) {
1002 nv_wo32(dev
, chan
->vm_pd
, (i
+0)/4, 0x00000000);
1003 nv_wo32(dev
, chan
->vm_pd
, (i
+4)/4, 0xdeadcafe);
1006 pde
= (dev_priv
->vm_gart_base
/ (512*1024*1024)) * 2;
1007 ret
= nouveau_gpuobj_ref_add(dev
, NULL
, 0,
1008 dev_priv
->gart_info
.sg_ctxdma
,
1011 instmem
->finish_access(dev
);
1014 nv_wo32(dev
, chan
->vm_pd
, pde
++,
1015 chan
->vm_gart_pt
->instance
| 0x03);
1016 nv_wo32(dev
, chan
->vm_pd
, pde
++, 0x00000000);
1018 pde
= (dev_priv
->vm_vram_base
/ (512*1024*1024)) * 2;
1019 for (i
= 0; i
< dev_priv
->vm_vram_pt_nr
; i
++) {
1020 ret
= nouveau_gpuobj_ref_add(dev
, NULL
, 0,
1021 dev_priv
->vm_vram_pt
[i
],
1022 &chan
->vm_vram_pt
[i
]);
1024 instmem
->finish_access(dev
);
1028 nv_wo32(dev
, chan
->vm_pd
, pde
++,
1029 chan
->vm_vram_pt
[i
]->instance
| 0x61);
1030 nv_wo32(dev
, chan
->vm_pd
, pde
++, 0x00000000);
1033 instmem
->finish_access(dev
);
1037 if (dev_priv
->card_type
< NV_50
) {
1038 ret
= nouveau_gpuobj_ref_add(dev
, NULL
, 0, dev_priv
->ramht
,
1043 ret
= nouveau_gpuobj_new_ref(dev
, chan
, chan
, 0,
1045 NVOBJ_FLAG_ZERO_ALLOC
,
1052 if (dev_priv
->card_type
>= NV_50
) {
1053 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
1054 0, dev_priv
->vm_end
,
1056 NV_DMA_TARGET_AGP
, &vram
);
1058 NV_ERROR(dev
, "Error creating VRAM ctxdma: %d\n", ret
);
1062 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
1063 0, dev_priv
->fb_available_size
,
1065 NV_DMA_TARGET_VIDMEM
, &vram
);
1067 NV_ERROR(dev
, "Error creating VRAM ctxdma: %d\n", ret
);
1072 ret
= nouveau_gpuobj_ref_add(dev
, chan
, vram_h
, vram
, NULL
);
1074 NV_ERROR(dev
, "Error referencing VRAM ctxdma: %d\n", ret
);
1078 /* TT memory ctxdma */
1079 if (dev_priv
->card_type
>= NV_50
) {
1082 if (dev_priv
->gart_info
.type
!= NOUVEAU_GART_NONE
) {
1083 ret
= nouveau_gpuobj_gart_dma_new(chan
, 0,
1084 dev_priv
->gart_info
.aper_size
,
1085 NV_DMA_ACCESS_RW
, &tt
, NULL
);
1087 NV_ERROR(dev
, "Invalid GART type %d\n", dev_priv
->gart_info
.type
);
1092 NV_ERROR(dev
, "Error creating TT ctxdma: %d\n", ret
);
1096 ret
= nouveau_gpuobj_ref_add(dev
, chan
, tt_h
, tt
, NULL
);
1098 NV_ERROR(dev
, "Error referencing TT ctxdma: %d\n", ret
);
1106 nouveau_gpuobj_channel_takedown(struct nouveau_channel
*chan
)
1108 struct drm_nouveau_private
*dev_priv
= chan
->dev
->dev_private
;
1109 struct drm_device
*dev
= chan
->dev
;
1110 struct list_head
*entry
, *tmp
;
1111 struct nouveau_gpuobj_ref
*ref
;
1114 NV_DEBUG(dev
, "ch%d\n", chan
->id
);
1116 if (!chan
->ramht_refs
.next
)
1119 list_for_each_safe(entry
, tmp
, &chan
->ramht_refs
) {
1120 ref
= list_entry(entry
, struct nouveau_gpuobj_ref
, list
);
1122 nouveau_gpuobj_ref_del(dev
, &ref
);
1125 nouveau_gpuobj_ref_del(dev
, &chan
->ramht
);
1127 nouveau_gpuobj_del(dev
, &chan
->vm_pd
);
1128 nouveau_gpuobj_ref_del(dev
, &chan
->vm_gart_pt
);
1129 for (i
= 0; i
< dev_priv
->vm_vram_pt_nr
; i
++)
1130 nouveau_gpuobj_ref_del(dev
, &chan
->vm_vram_pt
[i
]);
1132 if (chan
->ramin_heap
)
1133 nouveau_mem_takedown(&chan
->ramin_heap
);
1135 nouveau_gpuobj_ref_del(dev
, &chan
->ramin
);
1140 nouveau_gpuobj_suspend(struct drm_device
*dev
)
1142 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
1143 struct nouveau_gpuobj
*gpuobj
;
1146 if (dev_priv
->card_type
< NV_50
) {
1147 dev_priv
->susres
.ramin_copy
= vmalloc(dev_priv
->ramin_rsvd_vram
);
1148 if (!dev_priv
->susres
.ramin_copy
)
1151 for (i
= 0; i
< dev_priv
->ramin_rsvd_vram
; i
+= 4)
1152 dev_priv
->susres
.ramin_copy
[i
/4] = nv_ri32(dev
, i
);
1156 list_for_each_entry(gpuobj
, &dev_priv
->gpuobj_list
, list
) {
1157 if (!gpuobj
->im_backing
|| (gpuobj
->flags
& NVOBJ_FLAG_FAKE
))
1160 gpuobj
->im_backing_suspend
= vmalloc(gpuobj
->im_pramin
->size
);
1161 if (!gpuobj
->im_backing_suspend
) {
1162 nouveau_gpuobj_resume(dev
);
1166 dev_priv
->engine
.instmem
.prepare_access(dev
, false);
1167 for (i
= 0; i
< gpuobj
->im_pramin
->size
/ 4; i
++)
1168 gpuobj
->im_backing_suspend
[i
] = nv_ro32(dev
, gpuobj
, i
);
1169 dev_priv
->engine
.instmem
.finish_access(dev
);
1176 nouveau_gpuobj_suspend_cleanup(struct drm_device
*dev
)
1178 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
1179 struct nouveau_gpuobj
*gpuobj
;
1181 if (dev_priv
->card_type
< NV_50
) {
1182 vfree(dev_priv
->susres
.ramin_copy
);
1183 dev_priv
->susres
.ramin_copy
= NULL
;
1187 list_for_each_entry(gpuobj
, &dev_priv
->gpuobj_list
, list
) {
1188 if (!gpuobj
->im_backing_suspend
)
1191 vfree(gpuobj
->im_backing_suspend
);
1192 gpuobj
->im_backing_suspend
= NULL
;
1197 nouveau_gpuobj_resume(struct drm_device
*dev
)
1199 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
1200 struct nouveau_gpuobj
*gpuobj
;
1203 if (dev_priv
->card_type
< NV_50
) {
1204 for (i
= 0; i
< dev_priv
->ramin_rsvd_vram
; i
+= 4)
1205 nv_wi32(dev
, i
, dev_priv
->susres
.ramin_copy
[i
/4]);
1206 nouveau_gpuobj_suspend_cleanup(dev
);
1210 list_for_each_entry(gpuobj
, &dev_priv
->gpuobj_list
, list
) {
1211 if (!gpuobj
->im_backing_suspend
)
1214 dev_priv
->engine
.instmem
.prepare_access(dev
, true);
1215 for (i
= 0; i
< gpuobj
->im_pramin
->size
/ 4; i
++)
1216 nv_wo32(dev
, gpuobj
, i
, gpuobj
->im_backing_suspend
[i
]);
1217 dev_priv
->engine
.instmem
.finish_access(dev
);
1220 nouveau_gpuobj_suspend_cleanup(dev
);
1223 int nouveau_ioctl_grobj_alloc(struct drm_device
*dev
, void *data
,
1224 struct drm_file
*file_priv
)
1226 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
1227 struct drm_nouveau_grobj_alloc
*init
= data
;
1228 struct nouveau_pgraph_engine
*pgraph
= &dev_priv
->engine
.graph
;
1229 struct nouveau_pgraph_object_class
*grc
;
1230 struct nouveau_gpuobj
*gr
= NULL
;
1231 struct nouveau_channel
*chan
;
1234 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
1235 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init
->channel
, file_priv
, chan
);
1237 if (init
->handle
== ~0)
1240 grc
= pgraph
->grclass
;
1242 if (grc
->id
== init
->class)
1248 NV_ERROR(dev
, "Illegal object class: 0x%x\n", init
->class);
1252 if (nouveau_gpuobj_ref_find(chan
, init
->handle
, NULL
) == 0)
1256 ret
= nouveau_gpuobj_gr_new(chan
, grc
->id
, &gr
);
1258 ret
= nouveau_gpuobj_sw_new(chan
, grc
->id
, &gr
);
1261 NV_ERROR(dev
, "Error creating object: %d (%d/0x%08x)\n",
1262 ret
, init
->channel
, init
->handle
);
1266 ret
= nouveau_gpuobj_ref_add(dev
, chan
, init
->handle
, gr
, NULL
);
1268 NV_ERROR(dev
, "Error referencing object: %d (%d/0x%08x)\n",
1269 ret
, init
->channel
, init
->handle
);
1270 nouveau_gpuobj_del(dev
, &gr
);
1277 int nouveau_ioctl_gpuobj_free(struct drm_device
*dev
, void *data
,
1278 struct drm_file
*file_priv
)
1280 struct drm_nouveau_gpuobj_free
*objfree
= data
;
1281 struct nouveau_gpuobj_ref
*ref
;
1282 struct nouveau_channel
*chan
;
1285 NOUVEAU_CHECK_INITIALISED_WITH_RETURN
;
1286 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree
->channel
, file_priv
, chan
);
1288 ret
= nouveau_gpuobj_ref_find(chan
, objfree
->handle
, &ref
);
1291 nouveau_gpuobj_ref_del(dev
, &ref
);