2 * Copyright 2005-2006 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
29 #include "nouveau_dma.h"
30 #include "nouveau_ramht.h"
33 nouveau_channel_pushbuf_init(struct nouveau_channel
*chan
)
35 u32 mem
= nouveau_vram_pushbuf
? TTM_PL_FLAG_VRAM
: TTM_PL_FLAG_TT
;
36 struct drm_device
*dev
= chan
->dev
;
37 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
40 /* allocate buffer object */
41 ret
= nouveau_bo_new(dev
, NULL
, 65536, 0, mem
, 0, 0, &chan
->pushbuf_bo
);
45 ret
= nouveau_bo_pin(chan
->pushbuf_bo
, mem
);
49 ret
= nouveau_bo_map(chan
->pushbuf_bo
);
53 /* create DMA object covering the entire memtype where the push
54 * buffer resides, userspace can submit its own push buffers from
55 * anywhere within the same memtype.
57 chan
->pushbuf_base
= chan
->pushbuf_bo
->bo
.offset
;
58 if (dev_priv
->card_type
>= NV_50
) {
59 if (dev_priv
->card_type
< NV_C0
) {
60 ret
= nouveau_gpuobj_dma_new(chan
,
61 NV_CLASS_DMA_IN_MEMORY
, 0,
67 chan
->pushbuf_base
= chan
->pushbuf_bo
->vma
.offset
;
69 if (chan
->pushbuf_bo
->bo
.mem
.mem_type
== TTM_PL_TT
) {
70 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
71 dev_priv
->gart_info
.aper_size
,
76 if (dev_priv
->card_type
!= NV_04
) {
77 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
, 0,
78 dev_priv
->fb_available_size
,
83 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
84 * exact reason for existing :) PCI access to cmdbuf in
87 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_IN_MEMORY
,
88 pci_resource_start(dev
->pdev
, 1),
89 dev_priv
->fb_available_size
,
97 NV_ERROR(dev
, "error initialising pushbuf: %d\n", ret
);
98 nouveau_gpuobj_ref(NULL
, &chan
->pushbuf
);
99 if (chan
->pushbuf_bo
) {
100 nouveau_bo_unmap(chan
->pushbuf_bo
);
101 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
108 /* allocates and initializes a fifo for user space consumption */
110 nouveau_channel_alloc(struct drm_device
*dev
, struct nouveau_channel
**chan_ret
,
111 struct drm_file
*file_priv
,
112 uint32_t vram_handle
, uint32_t gart_handle
)
114 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
115 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
116 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(file_priv
);
117 struct nouveau_channel
*chan
;
121 /* allocate and lock channel structure */
122 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
126 chan
->file_priv
= file_priv
;
127 chan
->vram_handle
= vram_handle
;
128 chan
->gart_handle
= gart_handle
;
130 kref_init(&chan
->ref
);
131 atomic_set(&chan
->users
, 1);
132 mutex_init(&chan
->mutex
);
133 mutex_lock(&chan
->mutex
);
135 /* allocate hw channel id */
136 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
137 for (chan
->id
= 0; chan
->id
< pfifo
->channels
; chan
->id
++) {
138 if (!dev_priv
->channels
.ptr
[chan
->id
]) {
139 nouveau_channel_ref(chan
, &dev_priv
->channels
.ptr
[chan
->id
]);
143 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
145 if (chan
->id
== pfifo
->channels
) {
146 mutex_unlock(&chan
->mutex
);
151 NV_DEBUG(dev
, "initialising channel %d\n", chan
->id
);
152 INIT_LIST_HEAD(&chan
->nvsw
.vbl_wait
);
153 INIT_LIST_HEAD(&chan
->nvsw
.flip
);
154 INIT_LIST_HEAD(&chan
->fence
.pending
);
156 /* setup channel's memory and vm */
157 ret
= nouveau_gpuobj_channel_init(chan
, vram_handle
, gart_handle
);
159 NV_ERROR(dev
, "gpuobj %d\n", ret
);
160 nouveau_channel_put(&chan
);
164 /* Allocate space for per-channel fixed notifier memory */
165 ret
= nouveau_notifier_init_channel(chan
);
167 NV_ERROR(dev
, "ntfy %d\n", ret
);
168 nouveau_channel_put(&chan
);
172 /* Allocate DMA push buffer */
173 ret
= nouveau_channel_pushbuf_init(chan
);
175 NV_ERROR(dev
, "pushbuf %d\n", ret
);
176 nouveau_channel_put(&chan
);
180 nouveau_dma_pre_init(chan
);
181 chan
->user_put
= 0x40;
182 chan
->user_get
= 0x44;
184 /* disable the fifo caches */
185 pfifo
->reassign(dev
, false);
187 /* Construct initial RAMFC for new channel */
188 ret
= pfifo
->create_context(chan
);
190 nouveau_channel_put(&chan
);
194 pfifo
->reassign(dev
, true);
196 ret
= nouveau_dma_init(chan
);
198 ret
= nouveau_fence_channel_init(chan
);
200 nouveau_channel_put(&chan
);
204 nouveau_debugfs_channel_init(chan
);
206 NV_DEBUG(dev
, "channel %d initialised\n", chan
->id
);
208 spin_lock(&fpriv
->lock
);
209 list_add(&chan
->list
, &fpriv
->channels
);
210 spin_unlock(&fpriv
->lock
);
216 struct nouveau_channel
*
217 nouveau_channel_get_unlocked(struct nouveau_channel
*ref
)
219 struct nouveau_channel
*chan
= NULL
;
221 if (likely(ref
&& atomic_inc_not_zero(&ref
->users
)))
222 nouveau_channel_ref(ref
, &chan
);
227 struct nouveau_channel
*
228 nouveau_channel_get(struct drm_file
*file_priv
, int id
)
230 struct nouveau_fpriv
*fpriv
= nouveau_fpriv(file_priv
);
231 struct nouveau_channel
*chan
;
233 spin_lock(&fpriv
->lock
);
234 list_for_each_entry(chan
, &fpriv
->channels
, list
) {
235 if (chan
->id
== id
) {
236 chan
= nouveau_channel_get_unlocked(chan
);
237 spin_unlock(&fpriv
->lock
);
238 mutex_lock(&chan
->mutex
);
242 spin_unlock(&fpriv
->lock
);
244 return ERR_PTR(-EINVAL
);
248 nouveau_channel_put_unlocked(struct nouveau_channel
**pchan
)
250 struct nouveau_channel
*chan
= *pchan
;
251 struct drm_device
*dev
= chan
->dev
;
252 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
253 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
257 /* decrement the refcount, and we're done if there's still refs */
258 if (likely(!atomic_dec_and_test(&chan
->users
))) {
259 nouveau_channel_ref(NULL
, pchan
);
263 /* no one wants the channel anymore */
264 NV_DEBUG(dev
, "freeing channel %d\n", chan
->id
);
265 nouveau_debugfs_channel_fini(chan
);
267 /* give it chance to idle */
268 nouveau_channel_idle(chan
);
270 /* ensure all outstanding fences are signaled. they should be if the
271 * above attempts at idling were OK, but if we failed this'll tell TTM
272 * we're done with the buffers.
274 nouveau_fence_channel_fini(chan
);
276 /* boot it off the hardware */
277 pfifo
->reassign(dev
, false);
279 /* destroy the engine specific contexts */
280 pfifo
->destroy_context(chan
);
281 for (i
= 0; i
< NVOBJ_ENGINE_NR
; i
++) {
283 dev_priv
->eng
[i
]->context_del(chan
, i
);
286 pfifo
->reassign(dev
, true);
288 /* aside from its resources, the channel should now be dead,
289 * remove it from the channel list
291 spin_lock_irqsave(&dev_priv
->channels
.lock
, flags
);
292 nouveau_channel_ref(NULL
, &dev_priv
->channels
.ptr
[chan
->id
]);
293 spin_unlock_irqrestore(&dev_priv
->channels
.lock
, flags
);
295 /* destroy any resources the channel owned */
296 nouveau_gpuobj_ref(NULL
, &chan
->pushbuf
);
297 if (chan
->pushbuf_bo
) {
298 nouveau_bo_unmap(chan
->pushbuf_bo
);
299 nouveau_bo_unpin(chan
->pushbuf_bo
);
300 nouveau_bo_ref(NULL
, &chan
->pushbuf_bo
);
302 nouveau_ramht_ref(NULL
, &chan
->ramht
, chan
);
303 nouveau_notifier_takedown_channel(chan
);
304 nouveau_gpuobj_channel_takedown(chan
);
306 nouveau_channel_ref(NULL
, pchan
);
310 nouveau_channel_put(struct nouveau_channel
**pchan
)
312 mutex_unlock(&(*pchan
)->mutex
);
313 nouveau_channel_put_unlocked(pchan
);
317 nouveau_channel_del(struct kref
*ref
)
319 struct nouveau_channel
*chan
=
320 container_of(ref
, struct nouveau_channel
, ref
);
326 nouveau_channel_ref(struct nouveau_channel
*chan
,
327 struct nouveau_channel
**pchan
)
330 kref_get(&chan
->ref
);
333 kref_put(&(*pchan
)->ref
, nouveau_channel_del
);
339 nouveau_channel_idle(struct nouveau_channel
*chan
)
341 struct drm_device
*dev
= chan
->dev
;
342 struct nouveau_fence
*fence
= NULL
;
345 nouveau_fence_update(chan
);
347 if (chan
->fence
.sequence
!= chan
->fence
.sequence_ack
) {
348 ret
= nouveau_fence_new(chan
, &fence
, true);
350 ret
= nouveau_fence_wait(fence
, false, false);
351 nouveau_fence_unref(&fence
);
355 NV_ERROR(dev
, "Failed to idle channel %d.\n", chan
->id
);
359 /* cleans up all the fifos from file_priv */
361 nouveau_channel_cleanup(struct drm_device
*dev
, struct drm_file
*file_priv
)
363 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
364 struct nouveau_engine
*engine
= &dev_priv
->engine
;
365 struct nouveau_channel
*chan
;
368 NV_DEBUG(dev
, "clearing FIFO enables from file_priv\n");
369 for (i
= 0; i
< engine
->fifo
.channels
; i
++) {
370 chan
= nouveau_channel_get(file_priv
, i
);
374 list_del(&chan
->list
);
375 atomic_dec(&chan
->users
);
376 nouveau_channel_put(&chan
);
381 /***********************************
382 * ioctls wrapping the functions
383 ***********************************/
386 nouveau_ioctl_fifo_alloc(struct drm_device
*dev
, void *data
,
387 struct drm_file
*file_priv
)
389 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
390 struct drm_nouveau_channel_alloc
*init
= data
;
391 struct nouveau_channel
*chan
;
394 if (!dev_priv
->eng
[NVOBJ_ENGINE_GR
])
397 if (init
->fb_ctxdma_handle
== ~0 || init
->tt_ctxdma_handle
== ~0)
400 ret
= nouveau_channel_alloc(dev
, &chan
, file_priv
,
401 init
->fb_ctxdma_handle
,
402 init
->tt_ctxdma_handle
);
405 init
->channel
= chan
->id
;
407 if (chan
->dma
.ib_max
)
408 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
|
409 NOUVEAU_GEM_DOMAIN_GART
;
410 else if (chan
->pushbuf_bo
->bo
.mem
.mem_type
== TTM_PL_VRAM
)
411 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_VRAM
;
413 init
->pushbuf_domains
= NOUVEAU_GEM_DOMAIN_GART
;
415 if (dev_priv
->card_type
< NV_C0
) {
416 init
->subchan
[0].handle
= NvM2MF
;
417 if (dev_priv
->card_type
< NV_50
)
418 init
->subchan
[0].grclass
= 0x0039;
420 init
->subchan
[0].grclass
= 0x5039;
421 init
->subchan
[1].handle
= NvSw
;
422 init
->subchan
[1].grclass
= NV_SW
;
423 init
->nr_subchan
= 2;
425 init
->subchan
[0].handle
= 0x9039;
426 init
->subchan
[0].grclass
= 0x9039;
427 init
->nr_subchan
= 1;
430 /* Named memory object area */
431 ret
= drm_gem_handle_create(file_priv
, chan
->notifier_bo
->gem
,
432 &init
->notifier_handle
);
435 atomic_inc(&chan
->users
); /* userspace reference */
436 nouveau_channel_put(&chan
);
441 nouveau_ioctl_fifo_free(struct drm_device
*dev
, void *data
,
442 struct drm_file
*file_priv
)
444 struct drm_nouveau_channel_free
*req
= data
;
445 struct nouveau_channel
*chan
;
447 chan
= nouveau_channel_get(file_priv
, req
->channel
);
449 return PTR_ERR(chan
);
451 list_del(&chan
->list
);
452 atomic_dec(&chan
->users
);
453 nouveau_channel_put(&chan
);
457 /***********************************
458 * finally, the ioctl table
459 ***********************************/
461 struct drm_ioctl_desc nouveau_ioctls
[] = {
462 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM
, nouveau_ioctl_getparam
, DRM_UNLOCKED
|DRM_AUTH
),
463 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM
, nouveau_ioctl_setparam
, DRM_UNLOCKED
|DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
464 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC
, nouveau_ioctl_fifo_alloc
, DRM_UNLOCKED
|DRM_AUTH
),
465 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE
, nouveau_ioctl_fifo_free
, DRM_UNLOCKED
|DRM_AUTH
),
466 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC
, nouveau_ioctl_grobj_alloc
, DRM_UNLOCKED
|DRM_AUTH
),
467 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC
, nouveau_ioctl_notifier_alloc
, DRM_UNLOCKED
|DRM_AUTH
),
468 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE
, nouveau_ioctl_gpuobj_free
, DRM_UNLOCKED
|DRM_AUTH
),
469 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW
, nouveau_gem_ioctl_new
, DRM_UNLOCKED
|DRM_AUTH
),
470 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF
, nouveau_gem_ioctl_pushbuf
, DRM_UNLOCKED
|DRM_AUTH
),
471 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP
, nouveau_gem_ioctl_cpu_prep
, DRM_UNLOCKED
|DRM_AUTH
),
472 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI
, nouveau_gem_ioctl_cpu_fini
, DRM_UNLOCKED
|DRM_AUTH
),
473 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO
, nouveau_gem_ioctl_info
, DRM_UNLOCKED
|DRM_AUTH
),
476 int nouveau_max_ioctl
= DRM_ARRAY_SIZE(nouveau_ioctls
);