Nuke device_ptr_t, USBBASEDEVICE, USBDEVNAME(), USBDEVUNIT(), USBGETSOFTC(),
[dragonfly/vkernel-mp.git] / sys / dev / drm / drm_dma.h
blobd31ad5c622b9330175d1a500613260d1e2ccd6ac
1 /* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
31 * $FreeBSD: src/sys/dev/drm/drm_dma.h,v 1.5.2.1 2003/04/26 07:05:28 anholt Exp $
32 * $DragonFly: src/sys/dev/drm/drm_dma.h,v 1.8 2006/09/03 20:29:15 dillon Exp $
35 #include "dev/drm/drmP.h"
37 #ifndef __HAVE_DMA_WAITQUEUE
38 #define __HAVE_DMA_WAITQUEUE 0
39 #endif
40 #ifndef __HAVE_DMA_RECLAIM
41 #define __HAVE_DMA_RECLAIM 0
42 #endif
43 #ifndef __HAVE_SHARED_IRQ
44 #define __HAVE_SHARED_IRQ 0
45 #endif
47 #if __HAVE_DMA
49 int DRM(dma_setup)( drm_device_t *dev )
51 int i;
53 dev->dma = DRM(alloc)( sizeof(*dev->dma), DRM_MEM_DRIVER );
54 if ( !dev->dma )
55 return DRM_ERR(ENOMEM);
57 memset( dev->dma, 0, sizeof(*dev->dma) );
59 for ( i = 0 ; i <= DRM_MAX_ORDER ; i++ )
60 memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
62 return 0;
65 void DRM(dma_takedown)(drm_device_t *dev)
67 drm_device_dma_t *dma = dev->dma;
68 int i, j;
70 if (!dma) return;
72 /* Clear dma buffers */
73 for (i = 0; i <= DRM_MAX_ORDER; i++) {
74 if (dma->bufs[i].seg_count) {
75 DRM_DEBUG("order %d: buf_count = %d,"
76 " seg_count = %d\n",
78 dma->bufs[i].buf_count,
79 dma->bufs[i].seg_count);
80 for (j = 0; j < dma->bufs[i].seg_count; j++) {
81 DRM(free)((void *)dma->bufs[i].seglist[j],
82 dma->bufs[i].buf_size,
83 DRM_MEM_DMA);
85 DRM(free)(dma->bufs[i].seglist,
86 dma->bufs[i].seg_count
87 * sizeof(*dma->bufs[0].seglist),
88 DRM_MEM_SEGS);
90 if(dma->bufs[i].buf_count) {
91 for(j = 0; j < dma->bufs[i].buf_count; j++) {
92 if(dma->bufs[i].buflist[j].dev_private) {
93 DRM(free)(dma->bufs[i].buflist[j].dev_private,
94 dma->bufs[i].buflist[j].dev_priv_size,
95 DRM_MEM_BUFS);
98 DRM(free)(dma->bufs[i].buflist,
99 dma->bufs[i].buf_count *
100 sizeof(*dma->bufs[0].buflist),
101 DRM_MEM_BUFS);
105 if (dma->buflist) {
106 DRM(free)(dma->buflist,
107 dma->buf_count * sizeof(*dma->buflist),
108 DRM_MEM_BUFS);
111 if (dma->pagelist) {
112 DRM(free)(dma->pagelist,
113 dma->page_count * sizeof(*dma->pagelist),
114 DRM_MEM_PAGES);
116 DRM(free)(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
117 dev->dma = NULL;
121 void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
123 if (!buf) return;
125 buf->pending = 0;
126 buf->filp = NULL;
127 buf->used = 0;
130 #if !__HAVE_DMA_RECLAIM
131 void DRM(reclaim_buffers)(drm_device_t *dev, DRMFILE filp)
133 drm_device_dma_t *dma = dev->dma;
134 int i;
136 if (!dma) return;
137 for (i = 0; i < dma->buf_count; i++) {
138 if (dma->buflist[i]->filp == filp) {
139 switch (dma->buflist[i]->list) {
140 case DRM_LIST_NONE:
141 DRM(free_buffer)(dev, dma->buflist[i]);
142 break;
143 case DRM_LIST_WAIT:
144 dma->buflist[i]->list = DRM_LIST_RECLAIM;
145 break;
146 default:
147 /* Buffer already on hardware. */
148 break;
153 #endif
156 #if __HAVE_DMA_IRQ
158 int DRM(irq_install)( drm_device_t *dev, int irq )
160 int retcode;
162 if ( !irq )
163 return DRM_ERR(EINVAL);
165 DRM_LOCK;
166 if ( dev->irq ) {
167 DRM_UNLOCK;
168 return DRM_ERR(EBUSY);
170 dev->irq = irq;
171 DRM_UNLOCK;
173 DRM_DEBUG( "%s: irq=%d\n", __func__, irq );
175 dev->context_flag = 0;
177 dev->dma->next_buffer = NULL;
178 dev->dma->this_buffer = NULL;
180 #if __HAVE_DMA_IRQ_BH
181 TASK_INIT(&dev->task, 0, DRM(dma_immediate_bh), dev);
182 #endif
184 #if __HAVE_VBL_IRQ && 0 /* disabled */
185 DRM_SPININIT( dev->vbl_lock, "vblsig" );
186 TAILQ_INIT( &dev->vbl_sig_list );
187 #endif
189 /* Before installing handler */
190 DRM(driver_irq_preinstall)( dev );
192 /* Install handler */
193 dev->irqrid = 0;
194 #if defined(__DragonFly__) || defined(__FreeBSD__)
195 dev->irqr = bus_alloc_resource(dev->device, SYS_RES_IRQ, &dev->irqrid,
196 0, ~0, 1, RF_SHAREABLE);
197 if (!dev->irqr) {
198 #elif defined(__NetBSD__)
199 if (pci_intr_map(&dev->pa, &dev->ih) != 0) {
200 #endif
201 DRM_LOCK;
202 dev->irq = 0;
203 dev->irqrid = 0;
204 DRM_UNLOCK;
205 return ENOENT;
208 #if defined(__DragonFly__) || defined(__FreeBSD__)
209 #if defined(__DragonFly__) || __FreeBSD_version < 500000
210 retcode = bus_setup_intr(dev->device, dev->irqr, 0,
211 DRM(dma_service), dev, &dev->irqh, NULL);
212 #else
213 retcode = bus_setup_intr(dev->device, dev->irqr, INTR_MPSAFE,
214 DRM(dma_service), dev, &dev->irqh, NULL);
215 #endif
216 if ( retcode ) {
217 #elif defined(__NetBSD__)
218 dev->irqh = pci_intr_establish(&dev->pa.pa_pc, dev->ih, IPL_TTY,
219 (int (*)(DRM_IRQ_ARGS))DRM(dma_service), dev);
220 if ( !dev->irqh ) {
221 #endif
222 DRM_LOCK;
223 #if defined(__DragonFly__) || defined(__FreeBSD__)
224 bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid, dev->irqr);
225 #endif
226 dev->irq = 0;
227 dev->irqrid = 0;
228 DRM_UNLOCK;
229 return retcode;
232 /* After installing handler */
233 DRM(driver_irq_postinstall)( dev );
235 return 0;
238 int DRM(irq_uninstall)( drm_device_t *dev )
240 int irq;
241 int irqrid;
243 DRM_LOCK;
244 irq = dev->irq;
245 irqrid = dev->irqrid;
246 dev->irq = 0;
247 dev->irqrid = 0;
248 DRM_UNLOCK;
250 if ( !irq )
251 return DRM_ERR(EINVAL);
253 DRM_DEBUG( "%s: irq=%d\n", __func__, irq );
255 DRM(driver_irq_uninstall)( dev );
257 #if defined(__DragonFly__) || defined(__FreeBSD__)
258 bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
259 bus_release_resource(dev->device, SYS_RES_IRQ, irqrid, dev->irqr);
260 #elif defined(__NetBSD__)
261 pci_intr_disestablish(&dev->pa.pa_pc, dev->irqh);
262 #endif
264 return 0;
267 int DRM(control)( DRM_IOCTL_ARGS )
269 DRM_DEVICE;
270 drm_control_t ctl;
272 DRM_COPY_FROM_USER_IOCTL( ctl, (drm_control_t *) data, sizeof(ctl) );
274 switch ( ctl.func ) {
275 case DRM_INST_HANDLER:
276 return DRM(irq_install)( dev, ctl.irq );
277 case DRM_UNINST_HANDLER:
278 return DRM(irq_uninstall)( dev );
279 default:
280 return DRM_ERR(EINVAL);
284 #if __HAVE_VBL_IRQ
285 int DRM(wait_vblank)( DRM_IOCTL_ARGS )
287 DRM_DEVICE;
288 drm_wait_vblank_t vblwait;
289 struct timeval now;
290 int ret;
292 if (!dev->irq)
293 return DRM_ERR(EINVAL);
295 DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
296 sizeof(vblwait) );
298 if (vblwait.request.type & _DRM_VBLANK_RELATIVE) {
299 vblwait.request.sequence += atomic_read(&dev->vbl_received);
300 vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
303 flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
304 if (flags & _DRM_VBLANK_SIGNAL) {
305 #if 0 /* disabled */
306 drm_vbl_sig_t *vbl_sig = DRM_MALLOC(sizeof(drm_vbl_sig_t));
307 if (vbl_sig == NULL)
308 return ENOMEM;
309 bzero(vbl_sig, sizeof(*vbl_sig));
311 vbl_sig->sequence = vblwait.request.sequence;
312 vbl_sig->signo = vblwait.request.signal;
313 vbl_sig->pid = DRM_CURRENTPID;
315 vblwait.reply.sequence = atomic_read(&dev->vbl_received);
317 DRM_SPINLOCK(&dev->vbl_lock);
318 TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
319 DRM_SPINUNLOCK(&dev->vbl_lock);
320 ret = 0;
321 #endif
322 ret = EINVAL;
323 } else {
324 ret = DRM(vblank_wait)(dev, &vblwait.request.sequence);
326 microtime(&now);
327 vblwait.reply.tval_sec = now.tv_sec;
328 vblwait.reply.tval_usec = now.tv_usec;
331 DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
332 sizeof(vblwait) );
334 return ret;
337 void DRM(vbl_send_signals)(drm_device_t *dev)
341 #if 0 /* disabled */
342 void DRM(vbl_send_signals)( drm_device_t *dev )
344 drm_vbl_sig_t *vbl_sig;
345 unsigned int vbl_seq = atomic_read( &dev->vbl_received );
346 struct proc *p;
348 DRM_SPINLOCK(&dev->vbl_lock);
350 loop:
351 vbl_sig = TAILQ_FIRST(&dev->vbl_sig_list);
352 while (vbl_sig != NULL) {
353 drm_vbl_sig_t *next = TAILQ_NEXT(vbl_sig, link);
355 if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
356 p = pfind(vbl_sig->pid);
357 if (p != NULL)
358 ksignal(p, vbl_sig->signo);
360 TAILQ_REMOVE(&dev->vbl_sig_list, vbl_sig, link);
361 DRM_SPINUNLOCK(&dev->vbl_lock);
362 DRM_FREE(vbl_sig,sizeof(*vbl_sig));
363 goto loop;
365 vbl_sig = next;
368 DRM_SPINUNLOCK(&dev->vbl_lock);
370 #endif
372 #endif /* __HAVE_VBL_IRQ */
374 #else
376 int DRM(control)( DRM_IOCTL_ARGS )
378 drm_control_t ctl;
380 DRM_COPY_FROM_USER_IOCTL( ctl, (drm_control_t *) data, sizeof(ctl) );
382 switch ( ctl.func ) {
383 case DRM_INST_HANDLER:
384 case DRM_UNINST_HANDLER:
385 return 0;
386 default:
387 return DRM_ERR(EINVAL);
391 #endif /* __HAVE_DMA_IRQ */
393 #endif /* __HAVE_DMA */