2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
32 * The catch-all file for DRM device support, including module setup/teardown,
33 * open/close, and ioctl dispatch.
36 #include <machine/limits.h>
37 #include "dev/drm/drmP.h"
38 #include "dev/drm/drm.h"
39 #include "dev/drm/drm_sarea.h"
41 #ifdef DRM_DEBUG_DEFAULT_ON
42 int drm_debug_flag
= 1;
44 int drm_debug_flag
= 0;
47 static int drm_load(struct drm_device
*dev
);
48 static void drm_unload(struct drm_device
*dev
);
49 static drm_pci_id_list_t
*drm_find_description(int vendor
, int device
,
50 drm_pci_id_list_t
*idlist
);
52 #define DRIVER_SOFTC(unit) \
53 ((struct drm_device *)devclass_get_softc(drm_devclass, unit))
55 MODULE_VERSION(drm
, 1);
56 MODULE_DEPEND(drm
, agp
, 1, 1, 1);
57 MODULE_DEPEND(drm
, pci
, 1, 1, 1);
58 MODULE_DEPEND(drm
, mem
, 1, 1, 1);
60 static drm_ioctl_desc_t drm_ioctls
[256] = {
61 DRM_IOCTL_DEF(DRM_IOCTL_VERSION
, drm_version
, 0),
62 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE
, drm_getunique
, 0),
63 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC
, drm_getmagic
, 0),
64 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID
, drm_irq_by_busid
, DRM_MASTER
|DRM_ROOT_ONLY
),
65 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP
, drm_getmap
, 0),
66 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT
, drm_getclient
, 0),
67 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS
, drm_getstats
, 0),
68 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION
, drm_setversion
, DRM_MASTER
|DRM_ROOT_ONLY
),
70 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE
, drm_setunique
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
71 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
72 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
73 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC
, drm_authmagic
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
75 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP
, drm_addmap_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
76 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP
, drm_rmmap_ioctl
, DRM_AUTH
),
78 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX
, drm_setsareactx
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
79 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX
, drm_getsareactx
, DRM_AUTH
),
81 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX
, drm_addctx
, DRM_AUTH
|DRM_ROOT_ONLY
),
82 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX
, drm_rmctx
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
83 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX
, drm_modctx
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
84 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX
, drm_getctx
, DRM_AUTH
),
85 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX
, drm_switchctx
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
86 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX
, drm_newctx
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
87 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX
, drm_resctx
, DRM_AUTH
),
89 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW
, drm_adddraw
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
90 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW
, drm_rmdraw
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
92 DRM_IOCTL_DEF(DRM_IOCTL_LOCK
, drm_lock
, DRM_AUTH
),
93 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK
, drm_unlock
, DRM_AUTH
),
95 DRM_IOCTL_DEF(DRM_IOCTL_FINISH
, drm_noop
, DRM_AUTH
),
97 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS
, drm_addbufs
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
98 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS
, drm_markbufs
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
99 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS
, drm_infobufs
, DRM_AUTH
),
100 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS
, drm_mapbufs
, DRM_AUTH
),
101 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS
, drm_freebufs
, DRM_AUTH
),
102 DRM_IOCTL_DEF(DRM_IOCTL_DMA
, drm_dma
, DRM_AUTH
),
104 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL
, drm_control
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
106 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE
, drm_agp_acquire_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
107 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE
, drm_agp_release_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
108 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE
, drm_agp_enable_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
109 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO
, drm_agp_info_ioctl
, DRM_AUTH
),
110 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC
, drm_agp_alloc_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
111 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE
, drm_agp_free_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
112 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND
, drm_agp_bind_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
113 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND
, drm_agp_unbind_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
115 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC
, drm_sg_alloc_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
116 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE
, drm_sg_free
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
117 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK
, drm_wait_vblank
, 0),
118 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL
, drm_modeset_ctl
, 0),
119 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW
, drm_update_draw
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
122 static struct dev_ops drm_cdevsw
= {
123 { "drm", 145, D_TRACKCLOSE
},
126 .d_ioctl
= drm_ioctl
,
131 int drm_msi
= 1; /* Enable by default. */
132 TUNABLE_INT("hw.drm.msi", &drm_msi
);
134 static struct drm_msi_blacklist_entry drm_msi_blacklist
[] = {
135 {0x8086, 0x2772}, /* Intel i945G */ \
136 {0x8086, 0x27A2}, /* Intel i945GM */ \
137 {0x8086, 0x27AE}, /* Intel i945GME */ \
141 static int drm_msi_is_blacklisted(int vendor
, int device
)
145 for (i
= 0; drm_msi_blacklist
[i
].vendor
!= 0; i
++) {
146 if ((drm_msi_blacklist
[i
].vendor
== vendor
) &&
147 (drm_msi_blacklist
[i
].device
== device
)) {
155 int drm_probe(device_t kdev
, drm_pci_id_list_t
*idlist
)
157 drm_pci_id_list_t
*id_entry
;
161 if (!strcmp(device_get_name(kdev
), "drmsub"))
162 realdev
= device_get_parent(kdev
);
165 vendor
= pci_get_vendor(realdev
);
166 device
= pci_get_device(realdev
);
169 if (pci_get_class(kdev
) != PCIC_DISPLAY
170 || pci_get_subclass(kdev
) != PCIS_DISPLAY_VGA
)
174 id_entry
= drm_find_description(vendor
, device
, idlist
);
175 if (id_entry
!= NULL
) {
176 if (!device_get_desc(kdev
)) {
177 DRM_DEBUG("desc : %s\n", device_get_desc(kdev
));
178 device_set_desc(kdev
, id_entry
->name
);
186 int drm_attach(device_t kdev
, drm_pci_id_list_t
*idlist
)
188 struct drm_device
*dev
;
189 drm_pci_id_list_t
*id_entry
;
195 unit
= device_get_unit(kdev
);
196 dev
= device_get_softc(kdev
);
198 if (!strcmp(device_get_name(kdev
), "drmsub"))
199 dev
->device
= device_get_parent(kdev
);
203 dev_ops_add(&drm_cdevsw
, -1, unit
);
204 dev
->devnode
= make_dev(&drm_cdevsw
,
212 dev
->pci_bus
= pci_get_bus(dev
->device
);
213 dev
->pci_slot
= pci_get_slot(dev
->device
);
214 dev
->pci_func
= pci_get_function(dev
->device
);
216 dev
->pci_vendor
= pci_get_vendor(dev
->device
);
217 dev
->pci_device
= pci_get_device(dev
->device
);
220 !drm_msi_is_blacklisted(dev
->pci_vendor
, dev
->pci_device
)) {
222 msicount
= pci_msi_count(dev
->device
);
223 DRM_DEBUG("MSI count = %d\n", msicount
);
227 if (pci_alloc_msi(dev
->device
, &msicount
) == 0) {
228 DRM_INFO("MSI enabled %d message(s)\n", msicount
);
229 dev
->msi_enabled
= 1;
235 dev
->irqr
= bus_alloc_resource_any(dev
->device
, SYS_RES_IRQ
,
236 &dev
->irqrid
, RF_SHAREABLE
);
241 dev
->irq
= (int) rman_get_start(dev
->irqr
);
243 DRM_SPININIT(&dev
->dev_lock
, "drmdev");
244 lwkt_serialize_init(&dev
->irq_lock
);
245 DRM_SPININIT(&dev
->vbl_lock
, "drmvbl");
246 DRM_SPININIT(&dev
->drw_lock
, "drmdrw");
248 id_entry
= drm_find_description(dev
->pci_vendor
,
249 dev
->pci_device
, idlist
);
250 dev
->id_entry
= id_entry
;
252 return drm_load(dev
);
255 int drm_detach(device_t kdev
)
257 struct drm_device
*dev
;
259 dev
= device_get_softc(kdev
);
263 bus_release_resource(dev
->device
, SYS_RES_IRQ
, dev
->irqrid
, dev
->irqr
);
266 if (dev
->msi_enabled
) {
267 pci_release_msi(dev
->device
);
268 DRM_INFO("MSI released\n");
276 #define DRM_DEV_NAME "drm"
279 devclass_t drm_devclass
;
281 drm_pci_id_list_t
*drm_find_description(int vendor
, int device
,
282 drm_pci_id_list_t
*idlist
)
286 for (i
= 0; idlist
[i
].vendor
!= 0; i
++) {
287 if ((idlist
[i
].vendor
== vendor
) &&
288 ((idlist
[i
].device
== device
) ||
289 (idlist
[i
].device
== 0))) {
296 static int drm_firstopen(struct drm_device
*dev
)
298 drm_local_map_t
*map
;
301 DRM_SPINLOCK_ASSERT(&dev
->dev_lock
);
303 /* prebuild the SAREA */
304 i
= drm_addmap(dev
, 0, SAREA_MAX
, _DRM_SHM
,
305 _DRM_CONTAINS_LOCK
, &map
);
309 if (dev
->driver
->firstopen
)
310 dev
->driver
->firstopen(dev
);
314 if (drm_core_check_feature(dev
, DRIVER_HAVE_DMA
)) {
315 i
= drm_dma_setup(dev
);
320 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
321 dev
->magiclist
[i
].head
= NULL
;
322 dev
->magiclist
[i
].tail
= NULL
;
325 dev
->lock
.lock_queue
= 0;
326 dev
->irq_enabled
= 0;
327 dev
->context_flag
= 0;
328 dev
->last_context
= 0;
331 dev
->buf_sigio
= NULL
;
338 static int drm_lastclose(struct drm_device
*dev
)
340 drm_magic_entry_t
*pt
, *next
;
341 drm_local_map_t
*map
, *mapsave
;
344 DRM_SPINLOCK_ASSERT(&dev
->dev_lock
);
348 if (dev
->driver
->lastclose
!= NULL
)
349 dev
->driver
->lastclose(dev
);
351 if (dev
->irq_enabled
)
352 drm_irq_uninstall(dev
);
355 free(dev
->unique
, DRM_MEM_DRIVER
);
360 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
361 for (pt
= dev
->magiclist
[i
].head
; pt
; pt
= next
) {
363 free(pt
, DRM_MEM_MAGIC
);
365 dev
->magiclist
[i
].head
= dev
->magiclist
[i
].tail
= NULL
;
369 drm_drawable_free_all(dev
);
372 /* Clear AGP information */
374 drm_agp_mem_t
*entry
;
375 drm_agp_mem_t
*nexte
;
377 /* Remove AGP resources, but leave dev->agp intact until
378 * drm_unload is called.
380 for (entry
= dev
->agp
->memory
; entry
; entry
= nexte
) {
383 drm_agp_unbind_memory(entry
->handle
);
384 drm_agp_free_memory(entry
->handle
);
385 free(entry
, DRM_MEM_AGPLISTS
);
387 dev
->agp
->memory
= NULL
;
389 if (dev
->agp
->acquired
)
390 drm_agp_release(dev
);
392 dev
->agp
->acquired
= 0;
393 dev
->agp
->enabled
= 0;
395 if (dev
->sg
!= NULL
) {
396 drm_sg_cleanup(dev
->sg
);
400 TAILQ_FOREACH_MUTABLE(map
, &dev
->maplist
, link
, mapsave
) {
401 if (!(map
->flags
& _DRM_DRIVER
))
405 drm_dma_takedown(dev
);
406 if (dev
->lock
.hw_lock
) {
407 dev
->lock
.hw_lock
= NULL
; /* SHM removed */
408 dev
->lock
.file_priv
= NULL
;
409 DRM_WAKEUP_INT((void *)&dev
->lock
.lock_queue
);
415 static int drm_load(struct drm_device
*dev
)
421 TAILQ_INIT(&dev
->maplist
);
424 drm_sysctl_init(dev
);
425 TAILQ_INIT(&dev
->files
);
428 dev
->types
[0] = _DRM_STAT_LOCK
;
429 dev
->types
[1] = _DRM_STAT_OPENS
;
430 dev
->types
[2] = _DRM_STAT_CLOSES
;
431 dev
->types
[3] = _DRM_STAT_IOCTLS
;
432 dev
->types
[4] = _DRM_STAT_LOCKS
;
433 dev
->types
[5] = _DRM_STAT_UNLOCKS
;
435 for (i
= 0; i
< DRM_ARRAY_SIZE(dev
->counts
); i
++)
436 atomic_set(&dev
->counts
[i
], 0);
438 if (dev
->driver
->load
!= NULL
) {
440 /* Shared code returns -errno. */
441 retcode
= -dev
->driver
->load(dev
,
442 dev
->id_entry
->driver_private
);
443 pci_enable_busmaster(dev
->device
);
449 if (drm_core_has_AGP(dev
)) {
450 if (drm_device_is_agp(dev
))
451 dev
->agp
= drm_agp_init();
452 if (drm_core_check_feature(dev
, DRIVER_REQUIRE_AGP
) &&
454 DRM_ERROR("Card isn't AGP, or couldn't initialize "
459 if (dev
->agp
!= NULL
) {
460 if (drm_mtrr_add(dev
->agp
->info
.ai_aperture_base
,
461 dev
->agp
->info
.ai_aperture_size
, DRM_MTRR_WC
) == 0)
466 retcode
= drm_ctxbitmap_init(dev
);
468 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
472 DRM_INFO("Initialized %s %d.%d.%d %s\n",
476 dev
->driver
->patchlevel
,
482 drm_sysctl_cleanup(dev
);
486 destroy_dev(dev
->devnode
);
488 DRM_SPINUNINIT(&dev
->drw_lock
);
489 DRM_SPINUNINIT(&dev
->vbl_lock
);
490 DRM_SPINUNINIT(&dev
->dev_lock
);
495 static void drm_unload(struct drm_device
*dev
)
501 drm_sysctl_cleanup(dev
);
502 destroy_dev(dev
->devnode
);
504 drm_ctxbitmap_cleanup(dev
);
506 if (dev
->agp
&& dev
->agp
->mtrr
) {
507 int __unused retcode
;
509 retcode
= drm_mtrr_del(0, dev
->agp
->info
.ai_aperture_base
,
510 dev
->agp
->info
.ai_aperture_size
, DRM_MTRR_WC
);
511 DRM_DEBUG("mtrr_del = %d", retcode
);
514 drm_vblank_cleanup(dev
);
520 /* Clean up PCI resources allocated by drm_bufs.c. We're not really
521 * worried about resource consumption while the DRM is inactive (between
522 * lastclose and firstopen or unload) because these aren't actually
523 * taking up KVA, just keeping the PCI resource allocated.
525 for (i
= 0; i
< DRM_MAX_PCI_RESOURCE
; i
++) {
526 if (dev
->pcir
[i
] == NULL
)
528 bus_release_resource(dev
->device
, SYS_RES_MEMORY
,
529 dev
->pcirid
[i
], dev
->pcir
[i
]);
534 free(dev
->agp
, DRM_MEM_AGPLISTS
);
538 if (dev
->driver
->unload
!= NULL
) {
540 dev
->driver
->unload(dev
);
546 pci_disable_busmaster(dev
->device
);
548 DRM_SPINUNINIT(&dev
->drw_lock
);
549 DRM_SPINUNINIT(&dev
->vbl_lock
);
550 DRM_SPINUNINIT(&dev
->dev_lock
);
553 int drm_version(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
555 struct drm_version
*version
= data
;
558 #define DRM_COPY( name, value ) \
559 len = strlen( value ); \
560 if ( len > name##_len ) len = name##_len; \
561 name##_len = strlen( value ); \
562 if ( len && name ) { \
563 if ( DRM_COPY_TO_USER( name, value, len ) ) \
567 version
->version_major
= dev
->driver
->major
;
568 version
->version_minor
= dev
->driver
->minor
;
569 version
->version_patchlevel
= dev
->driver
->patchlevel
;
571 DRM_COPY(version
->name
, dev
->driver
->name
);
572 DRM_COPY(version
->date
, dev
->driver
->date
);
573 DRM_COPY(version
->desc
, dev
->driver
->desc
);
578 int drm_open(struct dev_open_args
*ap
)
580 struct cdev
*kdev
= ap
->a_head
.a_dev
;
581 int flags
= ap
->a_oflags
;
583 struct thread
*p
= curthread
;
584 struct drm_device
*dev
= NULL
;
587 dev
= DRIVER_SOFTC(minor(kdev
));
589 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
591 retcode
= drm_open_helper(kdev
, flags
, fmt
, p
, dev
);
594 atomic_inc(&dev
->counts
[_DRM_STAT_OPENS
]);
596 device_busy(dev
->device
);
597 if (!dev
->open_count
++)
598 retcode
= drm_firstopen(dev
);
605 void drm_close(void *data
)
607 struct drm_file
*file_priv
= data
;
608 struct drm_device
*dev
= file_priv
->dev
;
611 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
615 if (--file_priv
->refs
!= 0)
618 if (dev
->driver
->preclose
!= NULL
)
619 dev
->driver
->preclose(dev
, file_priv
);
621 /* ========================================================
622 * Begin inline drm_release
625 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
626 DRM_CURRENTPID
, (long)dev
->device
, dev
->open_count
);
628 if (dev
->lock
.hw_lock
&& _DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
)
629 && dev
->lock
.file_priv
== file_priv
) {
630 DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
632 _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
));
633 if (dev
->driver
->reclaim_buffers_locked
!= NULL
)
634 dev
->driver
->reclaim_buffers_locked(dev
, file_priv
);
636 drm_lock_free(&dev
->lock
,
637 _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
));
639 /* FIXME: may require heavy-handed reset of
640 hardware at this point, possibly
641 processed via a callback to the X
643 } else if (dev
->driver
->reclaim_buffers_locked
!= NULL
&&
644 dev
->lock
.hw_lock
!= NULL
) {
645 /* The lock is required to reclaim buffers */
647 if (!dev
->lock
.hw_lock
) {
648 /* Device has been unregistered */
652 if (drm_lock_take(&dev
->lock
, DRM_KERNEL_CONTEXT
)) {
653 dev
->lock
.file_priv
= file_priv
;
654 dev
->lock
.lock_time
= jiffies
;
655 atomic_inc(&dev
->counts
[_DRM_STAT_LOCKS
]);
656 break; /* Got lock */
660 tsleep_interlock((void *)&dev
->lock
.lock_queue
);
662 retcode
= tsleep((void *)&dev
->lock
.lock_queue
,
663 PCATCH
, "drmlk2", 0);
670 dev
->driver
->reclaim_buffers_locked(dev
, file_priv
);
671 drm_lock_free(&dev
->lock
, DRM_KERNEL_CONTEXT
);
675 if (drm_core_check_feature(dev
, DRIVER_HAVE_DMA
) &&
676 !dev
->driver
->reclaim_buffers_locked
)
677 drm_reclaim_buffers(dev
, file_priv
);
679 funsetown(dev
->buf_sigio
);
681 if (dev
->driver
->postclose
!= NULL
)
682 dev
->driver
->postclose(dev
, file_priv
);
683 TAILQ_REMOVE(&dev
->files
, file_priv
, link
);
684 free(file_priv
, DRM_MEM_FILES
);
686 /* ========================================================
687 * End inline drm_release
690 atomic_inc(&dev
->counts
[_DRM_STAT_CLOSES
]);
691 device_unbusy(dev
->device
);
692 if (--dev
->open_count
== 0) {
693 retcode
= drm_lastclose(dev
);
699 /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
701 int drm_ioctl(struct dev_ioctl_args
*ap
)
703 struct cdev
*kdev
= ap
->a_head
.a_dev
;
704 u_long cmd
= ap
->a_cmd
;
705 caddr_t data
= ap
->a_data
;
706 struct thread
*p
= curthread
;
707 struct drm_device
*dev
= drm_get_device_from_kdev(kdev
);
709 drm_ioctl_desc_t
*ioctl
;
710 int (*func
)(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
);
711 int nr
= DRM_IOCTL_NR(cmd
);
712 int is_driver_ioctl
= 0;
713 struct drm_file
*file_priv
;
715 file_priv
= drm_find_file_by_proc(dev
, p
);
717 DRM_ERROR("can't find authenticator\n");
721 atomic_inc(&dev
->counts
[_DRM_STAT_IOCTLS
]);
722 ++file_priv
->ioctl_count
;
724 DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
725 DRM_CURRENTPID
, cmd
, nr
, (long)dev
->device
,
726 file_priv
->authenticated
);
734 return fsetown(*(int *)data
, &dev
->buf_sigio
);
737 *(int *) data
= fgetown(dev
->buf_sigio
);
741 if (IOCGROUP(cmd
) != DRM_IOCTL_BASE
) {
742 DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd
));
746 ioctl
= &drm_ioctls
[nr
];
747 /* It's not a core DRM ioctl, try driver-specific. */
748 if (ioctl
->func
== NULL
&& nr
>= DRM_COMMAND_BASE
) {
749 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
750 nr
-= DRM_COMMAND_BASE
;
751 if (nr
> dev
->driver
->max_ioctl
) {
752 DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
753 nr
, dev
->driver
->max_ioctl
);
756 ioctl
= &dev
->driver
->ioctls
[nr
];
762 DRM_DEBUG("no function\n");
766 if (((ioctl
->flags
& DRM_ROOT_ONLY
) && !DRM_SUSER(p
)) ||
767 ((ioctl
->flags
& DRM_AUTH
) && !file_priv
->authenticated
) ||
768 ((ioctl
->flags
& DRM_MASTER
) && !file_priv
->master
))
771 if (is_driver_ioctl
) {
773 /* shared code returns -errno */
774 retcode
= -func(dev
, data
, file_priv
);
777 retcode
= func(dev
, data
, file_priv
);
781 DRM_DEBUG(" returning %d\n", retcode
);
786 drm_local_map_t
*drm_getsarea(struct drm_device
*dev
)
788 drm_local_map_t
*map
;
790 DRM_SPINLOCK_ASSERT(&dev
->dev_lock
);
791 TAILQ_FOREACH(map
, &dev
->maplist
, link
) {
792 if (map
->type
== _DRM_SHM
&& (map
->flags
& _DRM_CONTAINS_LOCK
))
801 #include <sys/sysproto.h>
803 MODULE_DEPEND(DRIVER_NAME
, linux
, 1, 1, 1);
805 #define LINUX_IOCTL_DRM_MIN 0x6400
806 #define LINUX_IOCTL_DRM_MAX 0x64ff
808 static linux_ioctl_function_t drm_linux_ioctl
;
809 static struct linux_ioctl_handler drm_handler
= {drm_linux_ioctl
,
810 LINUX_IOCTL_DRM_MIN
, LINUX_IOCTL_DRM_MAX
};
812 SYSINIT(drm_register
, SI_SUB_KLD
, SI_ORDER_MIDDLE
,
813 linux_ioctl_register_handler
, &drm_handler
);
814 SYSUNINIT(drm_unregister
, SI_SUB_KLD
, SI_ORDER_MIDDLE
,
815 linux_ioctl_unregister_handler
, &drm_handler
);
817 /* The bits for in/out are switched on Linux */
818 #define LINUX_IOC_IN IOC_OUT
819 #define LINUX_IOC_OUT IOC_IN
822 drm_linux_ioctl(DRM_STRUCTPROC
*p
, struct linux_ioctl_args
* args
)
827 args
->cmd
&= ~(LINUX_IOC_IN
| LINUX_IOC_OUT
);
828 if (cmd
& LINUX_IOC_IN
)
830 if (cmd
& LINUX_IOC_OUT
)
831 args
->cmd
|= IOC_OUT
;
833 error
= ioctl(p
, (struct ioctl_args
*)args
);
837 #endif /* DRM_LINUX */