3 * Generic driver template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
8 * To use this template, you must at least define the following (samples
9 * given for the MGA driver):
12 * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
14 * #define DRIVER_NAME "mga"
15 * #define DRIVER_DESC "Matrox G200/G400"
16 * #define DRIVER_DATE "20001127"
18 * #define DRIVER_MAJOR 2
19 * #define DRIVER_MINOR 0
20 * #define DRIVER_PATCHLEVEL 2
22 * #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
24 * #define DRM(x) mga_##x
29 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
31 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
32 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
33 * All Rights Reserved.
35 * Permission is hereby granted, free of charge, to any person obtaining a
36 * copy of this software and associated documentation files (the "Software"),
37 * to deal in the Software without restriction, including without limitation
38 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
39 * and/or sell copies of the Software, and to permit persons to whom the
40 * Software is furnished to do so, subject to the following conditions:
42 * The above copyright notice and this permission notice (including the next
43 * paragraph) shall be included in all copies or substantial portions of the
46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
49 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
50 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
51 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
52 * OTHER DEALINGS IN THE SOFTWARE.
55 #ifndef __MUST_HAVE_AGP
56 #define __MUST_HAVE_AGP 0
58 #ifndef __HAVE_CTX_BITMAP
59 #define __HAVE_CTX_BITMAP 0
61 #ifndef __HAVE_DMA_IRQ
62 #define __HAVE_DMA_IRQ 0
64 #ifndef __HAVE_DMA_QUEUE
65 #define __HAVE_DMA_QUEUE 0
67 #ifndef __HAVE_MULTIPLE_DMA_QUEUES
68 #define __HAVE_MULTIPLE_DMA_QUEUES 0
70 #ifndef __HAVE_DMA_SCHEDULE
71 #define __HAVE_DMA_SCHEDULE 0
73 #ifndef __HAVE_DMA_FLUSH
74 #define __HAVE_DMA_FLUSH 0
76 #ifndef __HAVE_DMA_READY
77 #define __HAVE_DMA_READY 0
79 #ifndef __HAVE_DMA_QUIESCENT
80 #define __HAVE_DMA_QUIESCENT 0
82 #ifndef __HAVE_RELEASE
83 #define __HAVE_RELEASE 0
85 #ifndef __HAVE_COUNTERS
86 #define __HAVE_COUNTERS 0
91 #ifndef __HAVE_KERNEL_CTX_SWITCH
92 #define __HAVE_KERNEL_CTX_SWITCH 0
94 #ifndef __HAVE_DRIVER_FOPS_READ
95 #define __HAVE_DRIVER_FOPS_READ 0
97 #ifndef __HAVE_DRIVER_FOPS_POLL
98 #define __HAVE_DRIVER_FOPS_POLL 0
101 #ifndef DRIVER_PREINIT
102 #define DRIVER_PREINIT()
104 #ifndef DRIVER_POSTINIT
105 #define DRIVER_POSTINIT()
107 #ifndef DRIVER_PRERELEASE
108 #define DRIVER_PRERELEASE()
110 #ifndef DRIVER_PRETAKEDOWN
111 #define DRIVER_PRETAKEDOWN()
113 #ifndef DRIVER_POSTCLEANUP
114 #define DRIVER_POSTCLEANUP()
116 #ifndef DRIVER_PRESETUP
117 #define DRIVER_PRESETUP()
119 #ifndef DRIVER_POSTSETUP
120 #define DRIVER_POSTSETUP()
122 #ifndef DRIVER_IOCTLS
123 #define DRIVER_IOCTLS
126 #define DRIVER_FOPS \
127 static struct file_operations DRM(fops) = { \
128 .owner = THIS_MODULE, \
130 .flush = DRM(flush), \
131 .release = DRM(release), \
132 .ioctl = DRM(ioctl), \
134 .fasync = DRM(fasync), \
141 /** Use an additional macro to avoid preprocessor troubles */
142 #define DRM_OPTIONS_FUNC DRM(options)
144 * Called by the kernel to parse command-line options passed via the
145 * boot-loader (e.g., LILO). It calls the insmod option routine,
148 static int __init
DRM(options
)( char *str
)
150 DRM(parse_options
)( str
);
154 __setup( DRIVER_NAME
"=", DRM_OPTIONS_FUNC
);
155 #undef DRM_OPTIONS_FUNC
159 * The default number of instances (minor numbers) to initialize.
161 #ifndef DRIVER_NUM_CARDS
162 #define DRIVER_NUM_CARDS 1
165 static drm_device_t
*DRM(device
);
166 static int *DRM(minor
);
167 static int DRM(numdevs
) = 0;
172 static drm_ioctl_desc_t
DRM(ioctls
)[] = {
173 [DRM_IOCTL_NR(DRM_IOCTL_VERSION
)] = { DRM(version
), 0, 0 },
174 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE
)] = { DRM(getunique
), 0, 0 },
175 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC
)] = { DRM(getmagic
), 0, 0 },
176 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID
)] = { DRM(irq_busid
), 0, 1 },
177 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP
)] = { DRM(getmap
), 0, 0 },
178 [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT
)] = { DRM(getclient
), 0, 0 },
179 [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS
)] = { DRM(getstats
), 0, 0 },
181 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE
)] = { DRM(setunique
), 1, 1 },
182 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK
)] = { DRM(noop
), 1, 1 },
183 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK
)] = { DRM(noop
), 1, 1 },
184 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC
)] = { DRM(authmagic
), 1, 1 },
186 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP
)] = { DRM(addmap
), 1, 1 },
187 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP
)] = { DRM(rmmap
), 1, 0 },
189 #if __HAVE_CTX_BITMAP
190 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX
)] = { DRM(setsareactx
), 1, 1 },
191 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX
)] = { DRM(getsareactx
), 1, 0 },
194 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX
)] = { DRM(addctx
), 1, 1 },
195 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX
)] = { DRM(rmctx
), 1, 1 },
196 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX
)] = { DRM(modctx
), 1, 1 },
197 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX
)] = { DRM(getctx
), 1, 0 },
198 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX
)] = { DRM(switchctx
), 1, 1 },
199 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX
)] = { DRM(newctx
), 1, 1 },
200 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX
)] = { DRM(resctx
), 1, 0 },
202 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW
)] = { DRM(adddraw
), 1, 1 },
203 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW
)] = { DRM(rmdraw
), 1, 1 },
205 [DRM_IOCTL_NR(DRM_IOCTL_LOCK
)] = { DRM(lock
), 1, 0 },
206 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK
)] = { DRM(unlock
), 1, 0 },
209 /* Gamma only, really */
210 [DRM_IOCTL_NR(DRM_IOCTL_FINISH
)] = { DRM(finish
), 1, 0 },
212 [DRM_IOCTL_NR(DRM_IOCTL_FINISH
)] = { DRM(noop
), 1, 0 },
216 [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS
)] = { DRM(addbufs
), 1, 1 },
217 [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS
)] = { DRM(markbufs
), 1, 1 },
218 [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS
)] = { DRM(infobufs
), 1, 0 },
219 [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS
)] = { DRM(mapbufs
), 1, 0 },
220 [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS
)] = { DRM(freebufs
), 1, 0 },
222 /* The DRM_IOCTL_DMA ioctl should be defined by the driver.
224 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL
)] = { DRM(control
), 1, 1 },
227 #if __REALLY_HAVE_AGP
228 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE
)] = { DRM(agp_acquire
), 1, 1 },
229 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE
)] = { DRM(agp_release
), 1, 1 },
230 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE
)] = { DRM(agp_enable
), 1, 1 },
231 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO
)] = { DRM(agp_info
), 1, 0 },
232 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC
)] = { DRM(agp_alloc
), 1, 1 },
233 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE
)] = { DRM(agp_free
), 1, 1 },
234 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND
)] = { DRM(agp_bind
), 1, 1 },
235 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND
)] = { DRM(agp_unbind
), 1, 1 },
239 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC
)] = { DRM(sg_alloc
), 1, 1 },
240 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE
)] = { DRM(sg_free
), 1, 1 },
244 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK
)] = { DRM(wait_vblank
), 0, 0 },
250 #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( DRM(ioctls) )
253 static char *drm_opts
= NULL
;
256 MODULE_AUTHOR( DRIVER_AUTHOR
);
257 MODULE_DESCRIPTION( DRIVER_DESC
);
258 MODULE_PARM( drm_opts
, "s" );
259 MODULE_LICENSE("GPL and additional rights");
261 static int DRM(setup
)( drm_device_t
*dev
)
266 atomic_set( &dev
->ioctl_count
, 0 );
267 atomic_set( &dev
->vma_count
, 0 );
269 atomic_set( &dev
->buf_alloc
, 0 );
272 i
= DRM(dma_setup
)( dev
);
277 dev
->counters
= 6 + __HAVE_COUNTERS
;
278 dev
->types
[0] = _DRM_STAT_LOCK
;
279 dev
->types
[1] = _DRM_STAT_OPENS
;
280 dev
->types
[2] = _DRM_STAT_CLOSES
;
281 dev
->types
[3] = _DRM_STAT_IOCTLS
;
282 dev
->types
[4] = _DRM_STAT_LOCKS
;
283 dev
->types
[5] = _DRM_STAT_UNLOCKS
;
284 #ifdef __HAVE_COUNTER6
285 dev
->types
[6] = __HAVE_COUNTER6
;
287 #ifdef __HAVE_COUNTER7
288 dev
->types
[7] = __HAVE_COUNTER7
;
290 #ifdef __HAVE_COUNTER8
291 dev
->types
[8] = __HAVE_COUNTER8
;
293 #ifdef __HAVE_COUNTER9
294 dev
->types
[9] = __HAVE_COUNTER9
;
296 #ifdef __HAVE_COUNTER10
297 dev
->types
[10] = __HAVE_COUNTER10
;
299 #ifdef __HAVE_COUNTER11
300 dev
->types
[11] = __HAVE_COUNTER11
;
302 #ifdef __HAVE_COUNTER12
303 dev
->types
[12] = __HAVE_COUNTER12
;
305 #ifdef __HAVE_COUNTER13
306 dev
->types
[13] = __HAVE_COUNTER13
;
308 #ifdef __HAVE_COUNTER14
309 dev
->types
[14] = __HAVE_COUNTER14
;
311 #ifdef __HAVE_COUNTER15
312 dev
->types
[14] = __HAVE_COUNTER14
;
315 for ( i
= 0 ; i
< DRM_ARRAY_SIZE(dev
->counts
) ; i
++ )
316 atomic_set( &dev
->counts
[i
], 0 );
318 for ( i
= 0 ; i
< DRM_HASH_SIZE
; i
++ ) {
319 dev
->magiclist
[i
].head
= NULL
;
320 dev
->magiclist
[i
].tail
= NULL
;
323 dev
->maplist
= DRM(alloc
)(sizeof(*dev
->maplist
),
325 if(dev
->maplist
== NULL
) return -ENOMEM
;
326 memset(dev
->maplist
, 0, sizeof(*dev
->maplist
));
327 INIT_LIST_HEAD(&dev
->maplist
->head
);
330 dev
->sigdata
.lock
= dev
->lock
.hw_lock
= NULL
;
331 init_waitqueue_head( &dev
->lock
.lock_queue
);
332 dev
->queue_count
= 0;
333 dev
->queue_reserved
= 0;
334 dev
->queue_slots
= 0;
335 dev
->queuelist
= NULL
;
337 dev
->context_flag
= 0;
338 dev
->interrupt_flag
= 0;
340 dev
->last_context
= 0;
341 dev
->last_switch
= 0;
342 dev
->last_checked
= 0;
343 init_waitqueue_head( &dev
->context_wait
);
348 dev
->buf_rp
= dev
->buf
;
349 dev
->buf_wp
= dev
->buf
;
350 dev
->buf_end
= dev
->buf
+ DRM_BSZ
;
351 dev
->buf_async
= NULL
;
352 init_waitqueue_head( &dev
->buf_readers
);
353 init_waitqueue_head( &dev
->buf_writers
);
358 * The kernel's context could be created here, but is now created
359 * in drm_dma_enqueue. This is more resource-efficient for
360 * hardware that does not do DMA, but may mean that
361 * drm_select_queue fails between the time the interrupt is
362 * initialized and the time the queues are initialized.
370 * Take down the DRM device.
372 * \param dev DRM device structure.
374 * Frees every resource in \p dev.
376 * \sa drm_device and setup().
378 static int DRM(takedown
)( drm_device_t
*dev
)
380 drm_magic_entry_t
*pt
, *next
;
382 drm_map_list_t
*r_list
;
383 struct list_head
*list
, *list_next
;
384 drm_vma_entry_t
*vma
, *vma_next
;
389 DRIVER_PRETAKEDOWN();
391 if ( dev
->irq
) DRM(irq_uninstall
)( dev
);
394 down( &dev
->struct_sem
);
395 del_timer( &dev
->timer
);
397 if ( dev
->devname
) {
398 DRM(free
)( dev
->devname
, strlen( dev
->devname
) + 1,
404 DRM(free
)( dev
->unique
, strlen( dev
->unique
) + 1,
410 for ( i
= 0 ; i
< DRM_HASH_SIZE
; i
++ ) {
411 for ( pt
= dev
->magiclist
[i
].head
; pt
; pt
= next
) {
413 DRM(free
)( pt
, sizeof(*pt
), DRM_MEM_MAGIC
);
415 dev
->magiclist
[i
].head
= dev
->magiclist
[i
].tail
= NULL
;
418 #if __REALLY_HAVE_AGP
419 /* Clear AGP information */
421 drm_agp_mem_t
*entry
;
422 drm_agp_mem_t
*nexte
;
424 /* Remove AGP resources, but leave dev->agp
425 intact until drv_cleanup is called. */
426 for ( entry
= dev
->agp
->memory
; entry
; entry
= nexte
) {
428 if ( entry
->bound
) DRM(unbind_agp
)( entry
->memory
);
429 DRM(free_agp
)( entry
->memory
, entry
->pages
);
430 DRM(free
)( entry
, sizeof(*entry
), DRM_MEM_AGPLISTS
);
432 dev
->agp
->memory
= NULL
;
434 if ( dev
->agp
->acquired
) DRM(agp_do_release
)();
436 dev
->agp
->acquired
= 0;
437 dev
->agp
->enabled
= 0;
441 /* Clear vma list (only built for debugging) */
442 if ( dev
->vmalist
) {
443 for ( vma
= dev
->vmalist
; vma
; vma
= vma_next
) {
444 vma_next
= vma
->next
;
445 DRM(free
)( vma
, sizeof(*vma
), DRM_MEM_VMAS
);
451 for(list
= dev
->maplist
->head
.next
;
452 list
!= &dev
->maplist
->head
;
454 list_next
= list
->next
;
455 r_list
= (drm_map_list_t
*)list
;
457 DRM(free
)(r_list
, sizeof(*r_list
), DRM_MEM_MAPS
);
460 switch ( map
->type
) {
462 case _DRM_FRAME_BUFFER
:
463 #if __REALLY_HAVE_MTRR
464 if ( map
->mtrr
>= 0 ) {
466 retcode
= mtrr_del( map
->mtrr
,
469 DRM_DEBUG( "mtrr_del=%d\n", retcode
);
472 DRM(ioremapfree
)( map
->handle
, map
->size
, dev
);
479 /* Do nothing here, because this is all
480 * handled in the AGP/GART driver.
483 case _DRM_SCATTER_GATHER
:
484 /* Handle it, but do nothing, if HAVE_SG
489 DRM(sg_cleanup
)(dev
->sg
);
495 DRM(free
)(map
, sizeof(*map
), DRM_MEM_MAPS
);
497 DRM(free
)(dev
->maplist
, sizeof(*dev
->maplist
), DRM_MEM_MAPS
);
501 #if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
502 if ( dev
->queuelist
) {
503 for ( i
= 0 ; i
< dev
->queue_count
; i
++ ) {
504 #if __HAVE_DMA_WAITLIST
505 DRM(waitlist_destroy
)( &dev
->queuelist
[i
]->waitlist
);
507 if ( dev
->queuelist
[i
] ) {
508 DRM(free
)( dev
->queuelist
[i
],
509 sizeof(*dev
->queuelist
[0]),
511 dev
->queuelist
[i
] = NULL
;
514 DRM(free
)( dev
->queuelist
,
515 dev
->queue_slots
* sizeof(*dev
->queuelist
),
517 dev
->queuelist
= NULL
;
519 dev
->queue_count
= 0;
523 DRM(dma_takedown
)( dev
);
525 if ( dev
->lock
.hw_lock
) {
526 dev
->sigdata
.lock
= dev
->lock
.hw_lock
= NULL
; /* SHM removed */
528 wake_up_interruptible( &dev
->lock
.lock_queue
);
530 up( &dev
->struct_sem
);
536 * Figure out how many instances to initialize.
538 * \return number of cards found.
540 * Searches for every PCI card in \c DRIVER_CARD_LIST with matching vendor and device ids.
542 static int drm_count_cards(void)
545 #if defined(DRIVER_CARD_LIST)
549 struct pci_dev
*pdev
= NULL
;
554 #if defined(DRIVER_COUNT_CARDS)
555 num
= DRIVER_COUNT_CARDS();
556 #elif defined(DRIVER_CARD_LIST)
557 for (i
= 0, l
= DRIVER_CARD_LIST
; l
[i
].vendor
!= 0; i
++) {
559 vendor
= l
[i
].vendor
;
560 device
= l
[i
].device
;
561 if(device
== 0xffff) device
= PCI_ANY_ID
;
562 if(vendor
== 0xffff) vendor
= PCI_ANY_ID
;
563 while ((pdev
= pci_find_device(vendor
, device
, pdev
))) {
568 num
= DRIVER_NUM_CARDS
;
570 DRM_DEBUG("numdevs = %d\n", num
);
575 * Module initialization. Called via init_module at module load time, or via
576 * linux/init/main.c (this is not currently supported).
578 * \return zero on success or a negative number on failure.
580 * Allocates and initialize an array of drm_device structures, and attempts to
581 * initialize all available devices, using consecutive minors, registering the
582 * stubs and initializing the AGP device.
584 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
585 * after the initialization for driver customization.
587 static int __init
drm_init( void )
592 #if __HAVE_CTX_BITMAP
598 DRM(parse_options
)( drm_opts
);
601 DRM(numdevs
) = drm_count_cards();
602 /* Force at least one instance. */
603 if (DRM(numdevs
) <= 0)
606 DRM(device
) = kmalloc(sizeof(*DRM(device
)) * DRM(numdevs
), GFP_KERNEL
);
610 DRM(minor
) = kmalloc(sizeof(*DRM(minor
)) * DRM(numdevs
), GFP_KERNEL
);
620 for (i
= 0; i
< DRM(numdevs
); i
++) {
621 dev
= &(DRM(device
)[i
]);
622 memset( (void *)dev
, 0, sizeof(*dev
) );
623 dev
->count_lock
= SPIN_LOCK_UNLOCKED
;
624 init_timer( &dev
->timer
);
625 sema_init( &dev
->struct_sem
, 1 );
627 if ((DRM(minor
)[i
] = DRM(stub_register
)(DRIVER_NAME
, &DRM(fops
),dev
)) < 0)
629 dev
->device
= MKDEV(DRM_MAJOR
, DRM(minor
)[i
] );
630 dev
->name
= DRIVER_NAME
;
632 #if __REALLY_HAVE_AGP
633 dev
->agp
= DRM(agp_init
)();
635 if ( dev
->agp
== NULL
) {
636 DRM_ERROR( "Cannot initialize the agpgart module.\n" );
637 DRM(stub_unregister
)(DRM(minor
)[i
]);
638 DRM(takedown
)( dev
);
642 #if __REALLY_HAVE_MTRR
644 dev
->agp
->agp_mtrr
= mtrr_add( dev
->agp
->agp_info
.aper_base
,
645 dev
->agp
->agp_info
.aper_size
*1024*1024,
651 #if __HAVE_CTX_BITMAP
652 retcode
= DRM(ctxbitmap_init
)( dev
);
654 DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
655 DRM(stub_unregister
)(DRM(minor
)[i
]);
656 DRM(takedown
)( dev
);
660 DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
675 * Called via cleanup_module() at module unload time.
677 * Cleans up all DRM device, calling takedown().
681 static void __exit
drm_cleanup( void )
688 for (i
= DRM(numdevs
) - 1; i
>= 0; i
--) {
689 dev
= &(DRM(device
)[i
]);
690 if ( DRM(stub_unregister
)(DRM(minor
)[i
]) ) {
691 DRM_ERROR( "Cannot unload module\n" );
693 DRM_DEBUG("minor %d unregistered\n", DRM(minor
)[i
]);
695 DRM_INFO( "Module unloaded\n" );
698 #if __HAVE_CTX_BITMAP
699 DRM(ctxbitmap_cleanup
)( dev
);
702 #if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
703 if ( dev
->agp
&& dev
->agp
->agp_mtrr
>= 0) {
705 retval
= mtrr_del( dev
->agp
->agp_mtrr
,
706 dev
->agp
->agp_info
.aper_base
,
707 dev
->agp
->agp_info
.aper_size
*1024*1024 );
708 DRM_DEBUG( "mtrr_del=%d\n", retval
);
712 DRM(takedown
)( dev
);
714 #if __REALLY_HAVE_AGP
717 DRM(free
)( dev
->agp
, sizeof(*dev
->agp
), DRM_MEM_AGPLISTS
);
722 DRIVER_POSTCLEANUP();
728 module_init( drm_init
);
729 module_exit( drm_cleanup
);
733 * Get version information
735 * \param inode device inode.
736 * \param filp file pointer.
737 * \param cmd command.
738 * \param arg user argument, pointing to a drm_version structure.
739 * \return zero on success or negative number on failure.
741 * Fills in the version information in \p arg.
743 int DRM(version
)( struct inode
*inode
, struct file
*filp
,
744 unsigned int cmd
, unsigned long arg
)
746 drm_version_t version
;
749 if ( copy_from_user( &version
,
750 (drm_version_t
*)arg
,
754 #define DRM_COPY( name, value ) \
755 len = strlen( value ); \
756 if ( len > name##_len ) len = name##_len; \
757 name##_len = strlen( value ); \
758 if ( len && name ) { \
759 if ( copy_to_user( name, value, len ) ) \
763 version
.version_major
= DRIVER_MAJOR
;
764 version
.version_minor
= DRIVER_MINOR
;
765 version
.version_patchlevel
= DRIVER_PATCHLEVEL
;
767 DRM_COPY( version
.name
, DRIVER_NAME
);
768 DRM_COPY( version
.date
, DRIVER_DATE
);
769 DRM_COPY( version
.desc
, DRIVER_DESC
);
771 if ( copy_to_user( (drm_version_t
*)arg
,
781 * \param inode device inode
782 * \param filp file pointer.
783 * \return zero on success or a negative number on failure.
785 * Searches the DRM device with the same minor number, calls open_helper(), and
786 * increments the device open count. If the open count was previous at zero,
787 * i.e., it's the first that the device is open, then calls setup().
789 int DRM(open
)( struct inode
*inode
, struct file
*filp
)
791 drm_device_t
*dev
= NULL
;
795 for (i
= 0; i
< DRM(numdevs
); i
++) {
796 if (minor(inode
->i_rdev
) == DRM(minor
)[i
]) {
797 dev
= &(DRM(device
)[i
]);
805 retcode
= DRM(open_helper
)( inode
, filp
, dev
);
807 atomic_inc( &dev
->counts
[_DRM_STAT_OPENS
] );
808 spin_lock( &dev
->count_lock
);
809 if ( !dev
->open_count
++ ) {
810 spin_unlock( &dev
->count_lock
);
811 return DRM(setup
)( dev
);
813 spin_unlock( &dev
->count_lock
);
822 * \param inode device inode
823 * \param filp file pointer.
824 * \return zero on success or a negative number on failure.
826 * If the hardware lock is held then free it, and take it again for the kernel
827 * context since it's necessary to reclaim buffers. Unlink the file private
828 * data from its list and free it. Decreases the open count and if it reaches
829 * zero calls takedown().
831 int DRM(release
)( struct inode
*inode
, struct file
*filp
)
833 drm_file_t
*priv
= filp
->private_data
;
840 DRM_DEBUG( "open_count = %d\n", dev
->open_count
);
844 /* ========================================================
845 * Begin inline drm_release
848 DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
849 current
->pid
, (long)dev
->device
, dev
->open_count
);
851 if ( priv
->lock_count
&& dev
->lock
.hw_lock
&&
852 _DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
) &&
853 dev
->lock
.filp
== filp
) {
854 DRM_DEBUG( "File %p released, freeing lock for context %d\n",
856 _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
) );
860 DRM(lock_free
)( dev
, &dev
->lock
.hw_lock
->lock
,
861 _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
) );
863 /* FIXME: may require heavy-handed reset of
864 hardware at this point, possibly
865 processed via a callback to the X
869 else if ( priv
->lock_count
&& dev
->lock
.hw_lock
) {
870 /* The lock is required to reclaim buffers */
871 DECLARE_WAITQUEUE( entry
, current
);
873 add_wait_queue( &dev
->lock
.lock_queue
, &entry
);
875 current
->state
= TASK_INTERRUPTIBLE
;
876 if ( !dev
->lock
.hw_lock
) {
877 /* Device has been unregistered */
881 if ( DRM(lock_take
)( &dev
->lock
.hw_lock
->lock
,
882 DRM_KERNEL_CONTEXT
) ) {
883 dev
->lock
.filp
= filp
;
884 dev
->lock
.lock_time
= jiffies
;
885 atomic_inc( &dev
->counts
[_DRM_STAT_LOCKS
] );
886 break; /* Got lock */
890 if ( signal_pending( current
) ) {
891 retcode
= -ERESTARTSYS
;
895 current
->state
= TASK_RUNNING
;
896 remove_wait_queue( &dev
->lock
.lock_queue
, &entry
);
899 DRM(lock_free
)( dev
, &dev
->lock
.hw_lock
->lock
,
900 DRM_KERNEL_CONTEXT
);
904 DRM(reclaim_buffers
)( filp
);
907 DRM(fasync
)( -1, filp
, 0 );
909 down( &dev
->struct_sem
);
910 if ( priv
->remove_auth_on_close
== 1 ) {
911 drm_file_t
*temp
= dev
->file_first
;
913 temp
->authenticated
= 0;
918 priv
->prev
->next
= priv
->next
;
920 dev
->file_first
= priv
->next
;
923 priv
->next
->prev
= priv
->prev
;
925 dev
->file_last
= priv
->prev
;
927 up( &dev
->struct_sem
);
929 DRM(free
)( priv
, sizeof(*priv
), DRM_MEM_FILES
);
931 /* ========================================================
932 * End inline drm_release
935 atomic_inc( &dev
->counts
[_DRM_STAT_CLOSES
] );
936 spin_lock( &dev
->count_lock
);
937 if ( !--dev
->open_count
) {
938 if ( atomic_read( &dev
->ioctl_count
) || dev
->blocked
) {
939 DRM_ERROR( "Device busy: %d %d\n",
940 atomic_read( &dev
->ioctl_count
),
942 spin_unlock( &dev
->count_lock
);
946 spin_unlock( &dev
->count_lock
);
948 return DRM(takedown
)( dev
);
950 spin_unlock( &dev
->count_lock
);
958 * Called whenever a process performs an ioctl on /dev/drm.
960 * \param inode device inode.
961 * \param filp file pointer.
962 * \param cmd command.
963 * \param arg user argument.
964 * \return zero on success or negative number on failure.
966 * Looks up the ioctl function in the ::ioctls table, checking for root
967 * previleges if so required, and dispatches to the respective function.
969 int DRM(ioctl
)( struct inode
*inode
, struct file
*filp
,
970 unsigned int cmd
, unsigned long arg
)
972 drm_file_t
*priv
= filp
->private_data
;
973 drm_device_t
*dev
= priv
->dev
;
974 drm_ioctl_desc_t
*ioctl
;
976 int nr
= DRM_IOCTL_NR(cmd
);
979 atomic_inc( &dev
->ioctl_count
);
980 atomic_inc( &dev
->counts
[_DRM_STAT_IOCTLS
] );
983 DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
984 current
->pid
, cmd
, nr
, (long)dev
->device
,
985 priv
->authenticated
);
987 if ( nr
>= DRIVER_IOCTL_COUNT
) {
990 ioctl
= &DRM(ioctls
)[nr
];
994 DRM_DEBUG( "no function\n" );
996 } else if ( ( ioctl
->root_only
&& !capable( CAP_SYS_ADMIN
) )||
997 ( ioctl
->auth_needed
&& !priv
->authenticated
) ) {
1000 retcode
= func( inode
, filp
, cmd
, arg
);
1004 atomic_dec( &dev
->ioctl_count
);
1011 * \param inode device inode.
1012 * \param filp file pointer.
1013 * \param cmd command.
1014 * \param arg user argument, pointing to a drm_lock structure.
1015 * \return zero on success or negative number on failure.
1017 * Add the current task to the lock wait queue, and attempt to take to lock.
1019 int DRM(lock
)( struct inode
*inode
, struct file
*filp
,
1020 unsigned int cmd
, unsigned long arg
)
1022 drm_file_t
*priv
= filp
->private_data
;
1023 drm_device_t
*dev
= priv
->dev
;
1024 DECLARE_WAITQUEUE( entry
, current
);
1027 #if __HAVE_MULTIPLE_DMA_QUEUES
1033 if ( copy_from_user( &lock
, (drm_lock_t
*)arg
, sizeof(lock
) ) )
1036 if ( lock
.context
== DRM_KERNEL_CONTEXT
) {
1037 DRM_ERROR( "Process %d using kernel context %d\n",
1038 current
->pid
, lock
.context
);
1042 DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
1043 lock
.context
, current
->pid
,
1044 dev
->lock
.hw_lock
->lock
, lock
.flags
);
1046 #if __HAVE_DMA_QUEUE
1047 if ( lock
.context
< 0 )
1049 #elif __HAVE_MULTIPLE_DMA_QUEUES
1050 if ( lock
.context
< 0 || lock
.context
>= dev
->queue_count
)
1052 q
= dev
->queuelist
[lock
.context
];
1055 #if __HAVE_DMA_FLUSH
1056 ret
= DRM(flush_block_and_flush
)( dev
, lock
.context
, lock
.flags
);
1059 add_wait_queue( &dev
->lock
.lock_queue
, &entry
);
1061 current
->state
= TASK_INTERRUPTIBLE
;
1062 if ( !dev
->lock
.hw_lock
) {
1063 /* Device has been unregistered */
1067 if ( DRM(lock_take
)( &dev
->lock
.hw_lock
->lock
,
1069 dev
->lock
.filp
= filp
;
1070 dev
->lock
.lock_time
= jiffies
;
1071 atomic_inc( &dev
->counts
[_DRM_STAT_LOCKS
] );
1072 break; /* Got lock */
1077 if ( signal_pending( current
) ) {
1082 current
->state
= TASK_RUNNING
;
1083 remove_wait_queue( &dev
->lock
.lock_queue
, &entry
);
1086 #if __HAVE_DMA_FLUSH
1087 DRM(flush_unblock
)( dev
, lock
.context
, lock
.flags
); /* cleanup phase */
1091 sigemptyset( &dev
->sigmask
);
1092 sigaddset( &dev
->sigmask
, SIGSTOP
);
1093 sigaddset( &dev
->sigmask
, SIGTSTP
);
1094 sigaddset( &dev
->sigmask
, SIGTTIN
);
1095 sigaddset( &dev
->sigmask
, SIGTTOU
);
1096 dev
->sigdata
.context
= lock
.context
;
1097 dev
->sigdata
.lock
= dev
->lock
.hw_lock
;
1098 block_all_signals( DRM(notifier
),
1099 &dev
->sigdata
, &dev
->sigmask
);
1101 #if __HAVE_DMA_READY
1102 if ( lock
.flags
& _DRM_LOCK_READY
) {
1106 #if __HAVE_DMA_QUIESCENT
1107 if ( lock
.flags
& _DRM_LOCK_QUIESCENT
) {
1108 DRIVER_DMA_QUIESCENT();
1111 #if __HAVE_KERNEL_CTX_SWITCH
1112 if ( dev
->last_context
!= lock
.context
) {
1113 DRM(context_switch
)(dev
, dev
->last_context
,
1119 DRM_DEBUG( "%d %s\n", lock
.context
, ret
? "interrupted" : "has lock" );
1127 * \param inode device inode.
1128 * \param filp file pointer.
1129 * \param cmd command.
1130 * \param arg user argument, pointing to a drm_lock structure.
1131 * \return zero on success or negative number on failure.
1133 * Transfer and free the lock.
1135 int DRM(unlock
)( struct inode
*inode
, struct file
*filp
,
1136 unsigned int cmd
, unsigned long arg
)
1138 drm_file_t
*priv
= filp
->private_data
;
1139 drm_device_t
*dev
= priv
->dev
;
1142 if ( copy_from_user( &lock
, (drm_lock_t
*)arg
, sizeof(lock
) ) )
1145 if ( lock
.context
== DRM_KERNEL_CONTEXT
) {
1146 DRM_ERROR( "Process %d using kernel context %d\n",
1147 current
->pid
, lock
.context
);
1151 atomic_inc( &dev
->counts
[_DRM_STAT_UNLOCKS
] );
1153 #if __HAVE_KERNEL_CTX_SWITCH
1154 /* We no longer really hold it, but if we are the next
1155 * agent to request it then we should just be able to
1156 * take it immediately and not eat the ioctl.
1160 __volatile__
unsigned int *plock
= &dev
->lock
.hw_lock
->lock
;
1161 unsigned int old
, new, prev
, ctx
;
1167 prev
= cmpxchg(plock
, old
, new);
1168 } while (prev
!= old
);
1170 wake_up_interruptible(&dev
->lock
.lock_queue
);
1172 DRM(lock_transfer
)( dev
, &dev
->lock
.hw_lock
->lock
,
1173 DRM_KERNEL_CONTEXT
);
1174 #if __HAVE_DMA_SCHEDULE
1175 DRM(dma_schedule
)( dev
, 1 );
1178 /* FIXME: Do we ever really need to check this???
1180 if ( 1 /* !dev->context_flag */ ) {
1181 if ( DRM(lock_free
)( dev
, &dev
->lock
.hw_lock
->lock
,
1182 DRM_KERNEL_CONTEXT
) ) {
1186 #endif /* !__HAVE_KERNEL_CTX_SWITCH */
1188 unblock_all_signals();