Merge comment updates from DRI CVS tree.
[linux-2.6/history.git] / drivers / char / drm / drm_drv.h
blobb39d663e97df9997f23c0c5e3137f6cb66268e0c
1 /**
2 * \file drm_drv.h
3 * Generic driver template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
8 * To use this template, you must at least define the following (samples
9 * given for the MGA driver):
11 * \code
12 * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
14 * #define DRIVER_NAME "mga"
15 * #define DRIVER_DESC "Matrox G200/G400"
16 * #define DRIVER_DATE "20001127"
18 * #define DRIVER_MAJOR 2
19 * #define DRIVER_MINOR 0
20 * #define DRIVER_PATCHLEVEL 2
22 * #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
24 * #define DRM(x) mga_##x
25 * \endcode
29 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
31 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
32 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
33 * All Rights Reserved.
35 * Permission is hereby granted, free of charge, to any person obtaining a
36 * copy of this software and associated documentation files (the "Software"),
37 * to deal in the Software without restriction, including without limitation
38 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
39 * and/or sell copies of the Software, and to permit persons to whom the
40 * Software is furnished to do so, subject to the following conditions:
42 * The above copyright notice and this permission notice (including the next
43 * paragraph) shall be included in all copies or substantial portions of the
44 * Software.
46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
49 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
50 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
51 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
52 * OTHER DEALINGS IN THE SOFTWARE.
55 #ifndef __MUST_HAVE_AGP
56 #define __MUST_HAVE_AGP 0
57 #endif
58 #ifndef __HAVE_CTX_BITMAP
59 #define __HAVE_CTX_BITMAP 0
60 #endif
61 #ifndef __HAVE_DMA_IRQ
62 #define __HAVE_DMA_IRQ 0
63 #endif
64 #ifndef __HAVE_DMA_QUEUE
65 #define __HAVE_DMA_QUEUE 0
66 #endif
67 #ifndef __HAVE_MULTIPLE_DMA_QUEUES
68 #define __HAVE_MULTIPLE_DMA_QUEUES 0
69 #endif
70 #ifndef __HAVE_DMA_SCHEDULE
71 #define __HAVE_DMA_SCHEDULE 0
72 #endif
73 #ifndef __HAVE_DMA_FLUSH
74 #define __HAVE_DMA_FLUSH 0
75 #endif
76 #ifndef __HAVE_DMA_READY
77 #define __HAVE_DMA_READY 0
78 #endif
79 #ifndef __HAVE_DMA_QUIESCENT
80 #define __HAVE_DMA_QUIESCENT 0
81 #endif
82 #ifndef __HAVE_RELEASE
83 #define __HAVE_RELEASE 0
84 #endif
85 #ifndef __HAVE_COUNTERS
86 #define __HAVE_COUNTERS 0
87 #endif
88 #ifndef __HAVE_SG
89 #define __HAVE_SG 0
90 #endif
91 #ifndef __HAVE_KERNEL_CTX_SWITCH
92 #define __HAVE_KERNEL_CTX_SWITCH 0
93 #endif
94 #ifndef __HAVE_DRIVER_FOPS_READ
95 #define __HAVE_DRIVER_FOPS_READ 0
96 #endif
97 #ifndef __HAVE_DRIVER_FOPS_POLL
98 #define __HAVE_DRIVER_FOPS_POLL 0
99 #endif
101 #ifndef DRIVER_PREINIT
102 #define DRIVER_PREINIT()
103 #endif
104 #ifndef DRIVER_POSTINIT
105 #define DRIVER_POSTINIT()
106 #endif
107 #ifndef DRIVER_PRERELEASE
108 #define DRIVER_PRERELEASE()
109 #endif
110 #ifndef DRIVER_PRETAKEDOWN
111 #define DRIVER_PRETAKEDOWN()
112 #endif
113 #ifndef DRIVER_POSTCLEANUP
114 #define DRIVER_POSTCLEANUP()
115 #endif
116 #ifndef DRIVER_PRESETUP
117 #define DRIVER_PRESETUP()
118 #endif
119 #ifndef DRIVER_POSTSETUP
120 #define DRIVER_POSTSETUP()
121 #endif
122 #ifndef DRIVER_IOCTLS
123 #define DRIVER_IOCTLS
124 #endif
125 #ifndef DRIVER_FOPS
126 #define DRIVER_FOPS \
127 static struct file_operations DRM(fops) = { \
128 .owner = THIS_MODULE, \
129 .open = DRM(open), \
130 .flush = DRM(flush), \
131 .release = DRM(release), \
132 .ioctl = DRM(ioctl), \
133 .mmap = DRM(mmap), \
134 .fasync = DRM(fasync), \
135 .poll = DRM(poll), \
136 .read = DRM(read), \
138 #endif
140 #ifndef MODULE
141 /** Use an additional macro to avoid preprocessor troubles */
142 #define DRM_OPTIONS_FUNC DRM(options)
144 * Called by the kernel to parse command-line options passed via the
145 * boot-loader (e.g., LILO). It calls the insmod option routine,
146 * parse_options().
148 static int __init DRM(options)( char *str )
150 DRM(parse_options)( str );
151 return 1;
154 __setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC );
155 #undef DRM_OPTIONS_FUNC
156 #endif
159 * The default number of instances (minor numbers) to initialize.
161 #ifndef DRIVER_NUM_CARDS
162 #define DRIVER_NUM_CARDS 1
163 #endif
165 static drm_device_t *DRM(device);
166 static int *DRM(minor);
167 static int DRM(numdevs) = 0;
169 DRIVER_FOPS;
171 /** Ioctl table */
172 static drm_ioctl_desc_t DRM(ioctls)[] = {
173 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
174 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 },
175 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 },
176 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { DRM(irq_busid), 0, 1 },
177 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { DRM(getmap), 0, 0 },
178 [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { DRM(getclient), 0, 0 },
179 [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
181 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
182 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(noop), 1, 1 },
183 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(noop), 1, 1 },
184 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
186 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
187 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 },
189 #if __HAVE_CTX_BITMAP
190 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
191 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
192 #endif
194 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 },
195 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 },
196 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { DRM(modctx), 1, 1 },
197 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { DRM(getctx), 1, 0 },
198 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { DRM(switchctx), 1, 1 },
199 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { DRM(newctx), 1, 1 },
200 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { DRM(resctx), 1, 0 },
202 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { DRM(adddraw), 1, 1 },
203 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { DRM(rmdraw), 1, 1 },
205 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
206 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
208 #if __HAVE_DMA_FLUSH
209 /* Gamma only, really */
210 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
211 #else
212 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(noop), 1, 0 },
213 #endif
215 #if __HAVE_DMA
216 [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
217 [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { DRM(markbufs), 1, 1 },
218 [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { DRM(infobufs), 1, 0 },
219 [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { DRM(mapbufs), 1, 0 },
220 [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { DRM(freebufs), 1, 0 },
222 /* The DRM_IOCTL_DMA ioctl should be defined by the driver.
224 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 },
225 #endif
227 #if __REALLY_HAVE_AGP
228 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 },
229 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { DRM(agp_release), 1, 1 },
230 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { DRM(agp_enable), 1, 1 },
231 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { DRM(agp_info), 1, 0 },
232 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { DRM(agp_alloc), 1, 1 },
233 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { DRM(agp_free), 1, 1 },
234 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { DRM(agp_bind), 1, 1 },
235 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { DRM(agp_unbind), 1, 1 },
236 #endif
238 #if __HAVE_SG
239 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { DRM(sg_alloc), 1, 1 },
240 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 },
241 #endif
243 #if __HAVE_VBL_IRQ
244 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { DRM(wait_vblank), 0, 0 },
245 #endif
247 DRIVER_IOCTLS
250 #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( DRM(ioctls) )
252 #ifdef MODULE
253 static char *drm_opts = NULL;
254 #endif
256 MODULE_AUTHOR( DRIVER_AUTHOR );
257 MODULE_DESCRIPTION( DRIVER_DESC );
258 MODULE_PARM( drm_opts, "s" );
259 MODULE_LICENSE("GPL and additional rights");
261 static int DRM(setup)( drm_device_t *dev )
263 int i;
265 DRIVER_PRESETUP();
266 atomic_set( &dev->ioctl_count, 0 );
267 atomic_set( &dev->vma_count, 0 );
268 dev->buf_use = 0;
269 atomic_set( &dev->buf_alloc, 0 );
271 #if __HAVE_DMA
272 i = DRM(dma_setup)( dev );
273 if ( i < 0 )
274 return i;
275 #endif
277 dev->counters = 6 + __HAVE_COUNTERS;
278 dev->types[0] = _DRM_STAT_LOCK;
279 dev->types[1] = _DRM_STAT_OPENS;
280 dev->types[2] = _DRM_STAT_CLOSES;
281 dev->types[3] = _DRM_STAT_IOCTLS;
282 dev->types[4] = _DRM_STAT_LOCKS;
283 dev->types[5] = _DRM_STAT_UNLOCKS;
284 #ifdef __HAVE_COUNTER6
285 dev->types[6] = __HAVE_COUNTER6;
286 #endif
287 #ifdef __HAVE_COUNTER7
288 dev->types[7] = __HAVE_COUNTER7;
289 #endif
290 #ifdef __HAVE_COUNTER8
291 dev->types[8] = __HAVE_COUNTER8;
292 #endif
293 #ifdef __HAVE_COUNTER9
294 dev->types[9] = __HAVE_COUNTER9;
295 #endif
296 #ifdef __HAVE_COUNTER10
297 dev->types[10] = __HAVE_COUNTER10;
298 #endif
299 #ifdef __HAVE_COUNTER11
300 dev->types[11] = __HAVE_COUNTER11;
301 #endif
302 #ifdef __HAVE_COUNTER12
303 dev->types[12] = __HAVE_COUNTER12;
304 #endif
305 #ifdef __HAVE_COUNTER13
306 dev->types[13] = __HAVE_COUNTER13;
307 #endif
308 #ifdef __HAVE_COUNTER14
309 dev->types[14] = __HAVE_COUNTER14;
310 #endif
311 #ifdef __HAVE_COUNTER15
312 dev->types[14] = __HAVE_COUNTER14;
313 #endif
315 for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
316 atomic_set( &dev->counts[i], 0 );
318 for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
319 dev->magiclist[i].head = NULL;
320 dev->magiclist[i].tail = NULL;
323 dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
324 DRM_MEM_MAPS);
325 if(dev->maplist == NULL) return -ENOMEM;
326 memset(dev->maplist, 0, sizeof(*dev->maplist));
327 INIT_LIST_HEAD(&dev->maplist->head);
329 dev->vmalist = NULL;
330 dev->sigdata.lock = dev->lock.hw_lock = NULL;
331 init_waitqueue_head( &dev->lock.lock_queue );
332 dev->queue_count = 0;
333 dev->queue_reserved = 0;
334 dev->queue_slots = 0;
335 dev->queuelist = NULL;
336 dev->irq = 0;
337 dev->context_flag = 0;
338 dev->interrupt_flag = 0;
339 dev->dma_flag = 0;
340 dev->last_context = 0;
341 dev->last_switch = 0;
342 dev->last_checked = 0;
343 init_waitqueue_head( &dev->context_wait );
345 dev->ctx_start = 0;
346 dev->lck_start = 0;
348 dev->buf_rp = dev->buf;
349 dev->buf_wp = dev->buf;
350 dev->buf_end = dev->buf + DRM_BSZ;
351 dev->buf_async = NULL;
352 init_waitqueue_head( &dev->buf_readers );
353 init_waitqueue_head( &dev->buf_writers );
355 DRM_DEBUG( "\n" );
358 * The kernel's context could be created here, but is now created
359 * in drm_dma_enqueue. This is more resource-efficient for
360 * hardware that does not do DMA, but may mean that
361 * drm_select_queue fails between the time the interrupt is
362 * initialized and the time the queues are initialized.
364 DRIVER_POSTSETUP();
365 return 0;
370 * Take down the DRM device.
372 * \param dev DRM device structure.
374 * Frees every resource in \p dev.
376 * \sa drm_device and setup().
378 static int DRM(takedown)( drm_device_t *dev )
380 drm_magic_entry_t *pt, *next;
381 drm_map_t *map;
382 drm_map_list_t *r_list;
383 struct list_head *list, *list_next;
384 drm_vma_entry_t *vma, *vma_next;
385 int i;
387 DRM_DEBUG( "\n" );
389 DRIVER_PRETAKEDOWN();
390 #if __HAVE_DMA_IRQ
391 if ( dev->irq ) DRM(irq_uninstall)( dev );
392 #endif
394 down( &dev->struct_sem );
395 del_timer( &dev->timer );
397 if ( dev->devname ) {
398 DRM(free)( dev->devname, strlen( dev->devname ) + 1,
399 DRM_MEM_DRIVER );
400 dev->devname = NULL;
403 if ( dev->unique ) {
404 DRM(free)( dev->unique, strlen( dev->unique ) + 1,
405 DRM_MEM_DRIVER );
406 dev->unique = NULL;
407 dev->unique_len = 0;
409 /* Clear pid list */
410 for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
411 for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
412 next = pt->next;
413 DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
415 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
418 #if __REALLY_HAVE_AGP
419 /* Clear AGP information */
420 if ( dev->agp ) {
421 drm_agp_mem_t *entry;
422 drm_agp_mem_t *nexte;
424 /* Remove AGP resources, but leave dev->agp
425 intact until drv_cleanup is called. */
426 for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
427 nexte = entry->next;
428 if ( entry->bound ) DRM(unbind_agp)( entry->memory );
429 DRM(free_agp)( entry->memory, entry->pages );
430 DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
432 dev->agp->memory = NULL;
434 if ( dev->agp->acquired ) DRM(agp_do_release)();
436 dev->agp->acquired = 0;
437 dev->agp->enabled = 0;
439 #endif
441 /* Clear vma list (only built for debugging) */
442 if ( dev->vmalist ) {
443 for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
444 vma_next = vma->next;
445 DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
447 dev->vmalist = NULL;
450 if( dev->maplist ) {
451 for(list = dev->maplist->head.next;
452 list != &dev->maplist->head;
453 list = list_next) {
454 list_next = list->next;
455 r_list = (drm_map_list_t *)list;
456 map = r_list->map;
457 DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
458 if(!map) continue;
460 switch ( map->type ) {
461 case _DRM_REGISTERS:
462 case _DRM_FRAME_BUFFER:
463 #if __REALLY_HAVE_MTRR
464 if ( map->mtrr >= 0 ) {
465 int retcode;
466 retcode = mtrr_del( map->mtrr,
467 map->offset,
468 map->size );
469 DRM_DEBUG( "mtrr_del=%d\n", retcode );
471 #endif
472 DRM(ioremapfree)( map->handle, map->size, dev );
473 break;
474 case _DRM_SHM:
475 vfree(map->handle);
476 break;
478 case _DRM_AGP:
479 /* Do nothing here, because this is all
480 * handled in the AGP/GART driver.
482 break;
483 case _DRM_SCATTER_GATHER:
484 /* Handle it, but do nothing, if HAVE_SG
485 * isn't defined.
487 #if __HAVE_SG
488 if(dev->sg) {
489 DRM(sg_cleanup)(dev->sg);
490 dev->sg = NULL;
492 #endif
493 break;
495 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
497 DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
498 dev->maplist = NULL;
501 #if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
502 if ( dev->queuelist ) {
503 for ( i = 0 ; i < dev->queue_count ; i++ ) {
504 #if __HAVE_DMA_WAITLIST
505 DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
506 #endif
507 if ( dev->queuelist[i] ) {
508 DRM(free)( dev->queuelist[i],
509 sizeof(*dev->queuelist[0]),
510 DRM_MEM_QUEUES );
511 dev->queuelist[i] = NULL;
514 DRM(free)( dev->queuelist,
515 dev->queue_slots * sizeof(*dev->queuelist),
516 DRM_MEM_QUEUES );
517 dev->queuelist = NULL;
519 dev->queue_count = 0;
520 #endif
522 #if __HAVE_DMA
523 DRM(dma_takedown)( dev );
524 #endif
525 if ( dev->lock.hw_lock ) {
526 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
527 dev->lock.filp = 0;
528 wake_up_interruptible( &dev->lock.lock_queue );
530 up( &dev->struct_sem );
532 return 0;
536 * Figure out how many instances to initialize.
538 * \return number of cards found.
540 * Searches for every PCI card in \c DRIVER_CARD_LIST with matching vendor and device ids.
542 static int drm_count_cards(void)
544 int num = 0;
545 #if defined(DRIVER_CARD_LIST)
546 int i;
547 drm_pci_list_t *l;
548 u16 device, vendor;
549 struct pci_dev *pdev = NULL;
550 #endif
552 DRM_DEBUG( "\n" );
554 #if defined(DRIVER_COUNT_CARDS)
555 num = DRIVER_COUNT_CARDS();
556 #elif defined(DRIVER_CARD_LIST)
557 for (i = 0, l = DRIVER_CARD_LIST; l[i].vendor != 0; i++) {
558 pdev = NULL;
559 vendor = l[i].vendor;
560 device = l[i].device;
561 if(device == 0xffff) device = PCI_ANY_ID;
562 if(vendor == 0xffff) vendor = PCI_ANY_ID;
563 while ((pdev = pci_find_device(vendor, device, pdev))) {
564 num++;
567 #else
568 num = DRIVER_NUM_CARDS;
569 #endif
570 DRM_DEBUG("numdevs = %d\n", num);
571 return num;
575 * Module initialization. Called via init_module at module load time, or via
576 * linux/init/main.c (this is not currently supported).
578 * \return zero on success or a negative number on failure.
580 * Allocates and initialize an array of drm_device structures, and attempts to
581 * initialize all available devices, using consecutive minors, registering the
582 * stubs and initializing the AGP device.
584 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
585 * after the initialization for driver customization.
587 static int __init drm_init( void )
590 drm_device_t *dev;
591 int i;
592 #if __HAVE_CTX_BITMAP
593 int retcode;
594 #endif
595 DRM_DEBUG( "\n" );
597 #ifdef MODULE
598 DRM(parse_options)( drm_opts );
599 #endif
601 DRM(numdevs) = drm_count_cards();
602 /* Force at least one instance. */
603 if (DRM(numdevs) <= 0)
604 DRM(numdevs) = 1;
606 DRM(device) = kmalloc(sizeof(*DRM(device)) * DRM(numdevs), GFP_KERNEL);
607 if (!DRM(device)) {
608 return -ENOMEM;
610 DRM(minor) = kmalloc(sizeof(*DRM(minor)) * DRM(numdevs), GFP_KERNEL);
611 if (!DRM(minor)) {
612 kfree(DRM(device));
613 return -ENOMEM;
616 DRIVER_PREINIT();
618 DRM(mem_init)();
620 for (i = 0; i < DRM(numdevs); i++) {
621 dev = &(DRM(device)[i]);
622 memset( (void *)dev, 0, sizeof(*dev) );
623 dev->count_lock = SPIN_LOCK_UNLOCKED;
624 init_timer( &dev->timer );
625 sema_init( &dev->struct_sem, 1 );
627 if ((DRM(minor)[i] = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
628 return -EPERM;
629 dev->device = MKDEV(DRM_MAJOR, DRM(minor)[i] );
630 dev->name = DRIVER_NAME;
632 #if __REALLY_HAVE_AGP
633 dev->agp = DRM(agp_init)();
634 #if __MUST_HAVE_AGP
635 if ( dev->agp == NULL ) {
636 DRM_ERROR( "Cannot initialize the agpgart module.\n" );
637 DRM(stub_unregister)(DRM(minor)[i]);
638 DRM(takedown)( dev );
639 return -ENOMEM;
641 #endif
642 #if __REALLY_HAVE_MTRR
643 if (dev->agp)
644 dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
645 dev->agp->agp_info.aper_size*1024*1024,
646 MTRR_TYPE_WRCOMB,
647 1 );
648 #endif
649 #endif
651 #if __HAVE_CTX_BITMAP
652 retcode = DRM(ctxbitmap_init)( dev );
653 if( retcode ) {
654 DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
655 DRM(stub_unregister)(DRM(minor)[i]);
656 DRM(takedown)( dev );
657 return retcode;
659 #endif
660 DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
661 DRIVER_NAME,
662 DRIVER_MAJOR,
663 DRIVER_MINOR,
664 DRIVER_PATCHLEVEL,
665 DRIVER_DATE,
666 DRM(minor)[i] );
669 DRIVER_POSTINIT();
671 return 0;
675 * Called via cleanup_module() at module unload time.
677 * Cleans up all DRM device, calling takedown().
679 * \sa drm_init().
681 static void __exit drm_cleanup( void )
683 drm_device_t *dev;
684 int i;
686 DRM_DEBUG( "\n" );
688 for (i = DRM(numdevs) - 1; i >= 0; i--) {
689 dev = &(DRM(device)[i]);
690 if ( DRM(stub_unregister)(DRM(minor)[i]) ) {
691 DRM_ERROR( "Cannot unload module\n" );
692 } else {
693 DRM_DEBUG("minor %d unregistered\n", DRM(minor)[i]);
694 if (i == 0) {
695 DRM_INFO( "Module unloaded\n" );
698 #if __HAVE_CTX_BITMAP
699 DRM(ctxbitmap_cleanup)( dev );
700 #endif
702 #if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
703 if ( dev->agp && dev->agp->agp_mtrr >= 0) {
704 int retval;
705 retval = mtrr_del( dev->agp->agp_mtrr,
706 dev->agp->agp_info.aper_base,
707 dev->agp->agp_info.aper_size*1024*1024 );
708 DRM_DEBUG( "mtrr_del=%d\n", retval );
710 #endif
712 DRM(takedown)( dev );
714 #if __REALLY_HAVE_AGP
715 if ( dev->agp ) {
716 DRM(agp_uninit)();
717 DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
718 dev->agp = NULL;
720 #endif
722 DRIVER_POSTCLEANUP();
723 kfree(DRM(minor));
724 kfree(DRM(device));
725 DRM(numdevs) = 0;
728 module_init( drm_init );
729 module_exit( drm_cleanup );
733 * Get version information
735 * \param inode device inode.
736 * \param filp file pointer.
737 * \param cmd command.
738 * \param arg user argument, pointing to a drm_version structure.
739 * \return zero on success or negative number on failure.
741 * Fills in the version information in \p arg.
743 int DRM(version)( struct inode *inode, struct file *filp,
744 unsigned int cmd, unsigned long arg )
746 drm_version_t version;
747 int len;
749 if ( copy_from_user( &version,
750 (drm_version_t *)arg,
751 sizeof(version) ) )
752 return -EFAULT;
754 #define DRM_COPY( name, value ) \
755 len = strlen( value ); \
756 if ( len > name##_len ) len = name##_len; \
757 name##_len = strlen( value ); \
758 if ( len && name ) { \
759 if ( copy_to_user( name, value, len ) ) \
760 return -EFAULT; \
763 version.version_major = DRIVER_MAJOR;
764 version.version_minor = DRIVER_MINOR;
765 version.version_patchlevel = DRIVER_PATCHLEVEL;
767 DRM_COPY( version.name, DRIVER_NAME );
768 DRM_COPY( version.date, DRIVER_DATE );
769 DRM_COPY( version.desc, DRIVER_DESC );
771 if ( copy_to_user( (drm_version_t *)arg,
772 &version,
773 sizeof(version) ) )
774 return -EFAULT;
775 return 0;
779 * Open file.
781 * \param inode device inode
782 * \param filp file pointer.
783 * \return zero on success or a negative number on failure.
785 * Searches the DRM device with the same minor number, calls open_helper(), and
786 * increments the device open count. If the open count was previous at zero,
787 * i.e., it's the first that the device is open, then calls setup().
789 int DRM(open)( struct inode *inode, struct file *filp )
791 drm_device_t *dev = NULL;
792 int retcode = 0;
793 int i;
795 for (i = 0; i < DRM(numdevs); i++) {
796 if (minor(inode->i_rdev) == DRM(minor)[i]) {
797 dev = &(DRM(device)[i]);
798 break;
801 if (!dev) {
802 return -ENODEV;
805 retcode = DRM(open_helper)( inode, filp, dev );
806 if ( !retcode ) {
807 atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
808 spin_lock( &dev->count_lock );
809 if ( !dev->open_count++ ) {
810 spin_unlock( &dev->count_lock );
811 return DRM(setup)( dev );
813 spin_unlock( &dev->count_lock );
816 return retcode;
820 * Release file.
822 * \param inode device inode
823 * \param filp file pointer.
824 * \return zero on success or a negative number on failure.
826 * If the hardware lock is held then free it, and take it again for the kernel
827 * context since it's necessary to reclaim buffers. Unlink the file private
828 * data from its list and free it. Decreases the open count and if it reaches
829 * zero calls takedown().
831 int DRM(release)( struct inode *inode, struct file *filp )
833 drm_file_t *priv = filp->private_data;
834 drm_device_t *dev;
835 int retcode = 0;
837 lock_kernel();
838 dev = priv->dev;
840 DRM_DEBUG( "open_count = %d\n", dev->open_count );
842 DRIVER_PRERELEASE();
844 /* ========================================================
845 * Begin inline drm_release
848 DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
849 current->pid, (long)dev->device, dev->open_count );
851 if ( priv->lock_count && dev->lock.hw_lock &&
852 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
853 dev->lock.filp == filp ) {
854 DRM_DEBUG( "File %p released, freeing lock for context %d\n",
855 filp,
856 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
857 #if __HAVE_RELEASE
858 DRIVER_RELEASE();
859 #endif
860 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
861 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
863 /* FIXME: may require heavy-handed reset of
864 hardware at this point, possibly
865 processed via a callback to the X
866 server. */
868 #if __HAVE_RELEASE
869 else if ( priv->lock_count && dev->lock.hw_lock ) {
870 /* The lock is required to reclaim buffers */
871 DECLARE_WAITQUEUE( entry, current );
873 add_wait_queue( &dev->lock.lock_queue, &entry );
874 for (;;) {
875 current->state = TASK_INTERRUPTIBLE;
876 if ( !dev->lock.hw_lock ) {
877 /* Device has been unregistered */
878 retcode = -EINTR;
879 break;
881 if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
882 DRM_KERNEL_CONTEXT ) ) {
883 dev->lock.filp = filp;
884 dev->lock.lock_time = jiffies;
885 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
886 break; /* Got lock */
888 /* Contention */
889 schedule();
890 if ( signal_pending( current ) ) {
891 retcode = -ERESTARTSYS;
892 break;
895 current->state = TASK_RUNNING;
896 remove_wait_queue( &dev->lock.lock_queue, &entry );
897 if( !retcode ) {
898 DRIVER_RELEASE();
899 DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
900 DRM_KERNEL_CONTEXT );
903 #elif __HAVE_DMA
904 DRM(reclaim_buffers)( filp );
905 #endif
907 DRM(fasync)( -1, filp, 0 );
909 down( &dev->struct_sem );
910 if ( priv->remove_auth_on_close == 1 ) {
911 drm_file_t *temp = dev->file_first;
912 while ( temp ) {
913 temp->authenticated = 0;
914 temp = temp->next;
917 if ( priv->prev ) {
918 priv->prev->next = priv->next;
919 } else {
920 dev->file_first = priv->next;
922 if ( priv->next ) {
923 priv->next->prev = priv->prev;
924 } else {
925 dev->file_last = priv->prev;
927 up( &dev->struct_sem );
929 DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
931 /* ========================================================
932 * End inline drm_release
935 atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
936 spin_lock( &dev->count_lock );
937 if ( !--dev->open_count ) {
938 if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
939 DRM_ERROR( "Device busy: %d %d\n",
940 atomic_read( &dev->ioctl_count ),
941 dev->blocked );
942 spin_unlock( &dev->count_lock );
943 unlock_kernel();
944 return -EBUSY;
946 spin_unlock( &dev->count_lock );
947 unlock_kernel();
948 return DRM(takedown)( dev );
950 spin_unlock( &dev->count_lock );
952 unlock_kernel();
954 return retcode;
957 /**
958 * Called whenever a process performs an ioctl on /dev/drm.
960 * \param inode device inode.
961 * \param filp file pointer.
962 * \param cmd command.
963 * \param arg user argument.
964 * \return zero on success or negative number on failure.
966 * Looks up the ioctl function in the ::ioctls table, checking for root
967 * previleges if so required, and dispatches to the respective function.
969 int DRM(ioctl)( struct inode *inode, struct file *filp,
970 unsigned int cmd, unsigned long arg )
972 drm_file_t *priv = filp->private_data;
973 drm_device_t *dev = priv->dev;
974 drm_ioctl_desc_t *ioctl;
975 drm_ioctl_t *func;
976 int nr = DRM_IOCTL_NR(cmd);
977 int retcode = 0;
979 atomic_inc( &dev->ioctl_count );
980 atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
981 ++priv->ioctl_count;
983 DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
984 current->pid, cmd, nr, (long)dev->device,
985 priv->authenticated );
987 if ( nr >= DRIVER_IOCTL_COUNT ) {
988 retcode = -EINVAL;
989 } else {
990 ioctl = &DRM(ioctls)[nr];
991 func = ioctl->func;
993 if ( !func ) {
994 DRM_DEBUG( "no function\n" );
995 retcode = -EINVAL;
996 } else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
997 ( ioctl->auth_needed && !priv->authenticated ) ) {
998 retcode = -EACCES;
999 } else {
1000 retcode = func( inode, filp, cmd, arg );
1004 atomic_dec( &dev->ioctl_count );
1005 return retcode;
1008 /**
1009 * Lock ioctl.
1011 * \param inode device inode.
1012 * \param filp file pointer.
1013 * \param cmd command.
1014 * \param arg user argument, pointing to a drm_lock structure.
1015 * \return zero on success or negative number on failure.
1017 * Add the current task to the lock wait queue, and attempt to take to lock.
1019 int DRM(lock)( struct inode *inode, struct file *filp,
1020 unsigned int cmd, unsigned long arg )
1022 drm_file_t *priv = filp->private_data;
1023 drm_device_t *dev = priv->dev;
1024 DECLARE_WAITQUEUE( entry, current );
1025 drm_lock_t lock;
1026 int ret = 0;
1027 #if __HAVE_MULTIPLE_DMA_QUEUES
1028 drm_queue_t *q;
1029 #endif
1031 ++priv->lock_count;
1033 if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
1034 return -EFAULT;
1036 if ( lock.context == DRM_KERNEL_CONTEXT ) {
1037 DRM_ERROR( "Process %d using kernel context %d\n",
1038 current->pid, lock.context );
1039 return -EINVAL;
1042 DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
1043 lock.context, current->pid,
1044 dev->lock.hw_lock->lock, lock.flags );
1046 #if __HAVE_DMA_QUEUE
1047 if ( lock.context < 0 )
1048 return -EINVAL;
1049 #elif __HAVE_MULTIPLE_DMA_QUEUES
1050 if ( lock.context < 0 || lock.context >= dev->queue_count )
1051 return -EINVAL;
1052 q = dev->queuelist[lock.context];
1053 #endif
1055 #if __HAVE_DMA_FLUSH
1056 ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
1057 #endif
1058 if ( !ret ) {
1059 add_wait_queue( &dev->lock.lock_queue, &entry );
1060 for (;;) {
1061 current->state = TASK_INTERRUPTIBLE;
1062 if ( !dev->lock.hw_lock ) {
1063 /* Device has been unregistered */
1064 ret = -EINTR;
1065 break;
1067 if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
1068 lock.context ) ) {
1069 dev->lock.filp = filp;
1070 dev->lock.lock_time = jiffies;
1071 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
1072 break; /* Got lock */
1075 /* Contention */
1076 schedule();
1077 if ( signal_pending( current ) ) {
1078 ret = -ERESTARTSYS;
1079 break;
1082 current->state = TASK_RUNNING;
1083 remove_wait_queue( &dev->lock.lock_queue, &entry );
1086 #if __HAVE_DMA_FLUSH
1087 DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
1088 #endif
1090 if ( !ret ) {
1091 sigemptyset( &dev->sigmask );
1092 sigaddset( &dev->sigmask, SIGSTOP );
1093 sigaddset( &dev->sigmask, SIGTSTP );
1094 sigaddset( &dev->sigmask, SIGTTIN );
1095 sigaddset( &dev->sigmask, SIGTTOU );
1096 dev->sigdata.context = lock.context;
1097 dev->sigdata.lock = dev->lock.hw_lock;
1098 block_all_signals( DRM(notifier),
1099 &dev->sigdata, &dev->sigmask );
1101 #if __HAVE_DMA_READY
1102 if ( lock.flags & _DRM_LOCK_READY ) {
1103 DRIVER_DMA_READY();
1105 #endif
1106 #if __HAVE_DMA_QUIESCENT
1107 if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
1108 DRIVER_DMA_QUIESCENT();
1110 #endif
1111 #if __HAVE_KERNEL_CTX_SWITCH
1112 if ( dev->last_context != lock.context ) {
1113 DRM(context_switch)(dev, dev->last_context,
1114 lock.context);
1116 #endif
1119 DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
1121 return ret;
1124 /**
1125 * Unlock ioctl.
1127 * \param inode device inode.
1128 * \param filp file pointer.
1129 * \param cmd command.
1130 * \param arg user argument, pointing to a drm_lock structure.
1131 * \return zero on success or negative number on failure.
1133 * Transfer and free the lock.
1135 int DRM(unlock)( struct inode *inode, struct file *filp,
1136 unsigned int cmd, unsigned long arg )
1138 drm_file_t *priv = filp->private_data;
1139 drm_device_t *dev = priv->dev;
1140 drm_lock_t lock;
1142 if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
1143 return -EFAULT;
1145 if ( lock.context == DRM_KERNEL_CONTEXT ) {
1146 DRM_ERROR( "Process %d using kernel context %d\n",
1147 current->pid, lock.context );
1148 return -EINVAL;
1151 atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
1153 #if __HAVE_KERNEL_CTX_SWITCH
1154 /* We no longer really hold it, but if we are the next
1155 * agent to request it then we should just be able to
1156 * take it immediately and not eat the ioctl.
1158 dev->lock.filp = 0;
1160 __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
1161 unsigned int old, new, prev, ctx;
1163 ctx = lock.context;
1164 do {
1165 old = *plock;
1166 new = ctx;
1167 prev = cmpxchg(plock, old, new);
1168 } while (prev != old);
1170 wake_up_interruptible(&dev->lock.lock_queue);
1171 #else
1172 DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
1173 DRM_KERNEL_CONTEXT );
1174 #if __HAVE_DMA_SCHEDULE
1175 DRM(dma_schedule)( dev, 1 );
1176 #endif
1178 /* FIXME: Do we ever really need to check this???
1180 if ( 1 /* !dev->context_flag */ ) {
1181 if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
1182 DRM_KERNEL_CONTEXT ) ) {
1183 DRM_ERROR( "\n" );
1186 #endif /* !__HAVE_KERNEL_CTX_SWITCH */
1188 unblock_all_signals();
1189 return 0;