2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
32 * The catch-all file for DRM device support, including module setup/teardown,
33 * open/close, and ioctl dispatch.
36 #include <machine/limits.h>
37 #include "dev/drm/drmP.h"
38 #include "dev/drm/drm.h"
39 #include "dev/drm/drm_sarea.h"
41 #ifdef DRM_DEBUG_DEFAULT_ON
42 int drm_debug_flag
= 1;
44 int drm_debug_flag
= 0;
47 static int drm_load(struct drm_device
*dev
);
48 static void drm_unload(struct drm_device
*dev
);
49 static drm_pci_id_list_t
*drm_find_description(int vendor
, int device
,
50 drm_pci_id_list_t
*idlist
);
52 #define DRIVER_SOFTC(unit) \
53 ((struct drm_device *)devclass_get_softc(drm_devclass, unit))
55 MODULE_VERSION(drm
, 1);
56 MODULE_DEPEND(drm
, agp
, 1, 1, 1);
57 MODULE_DEPEND(drm
, pci
, 1, 1, 1);
59 static drm_ioctl_desc_t drm_ioctls
[256] = {
60 DRM_IOCTL_DEF(DRM_IOCTL_VERSION
, drm_version
, 0),
61 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE
, drm_getunique
, 0),
62 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC
, drm_getmagic
, 0),
63 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID
, drm_irq_by_busid
, DRM_MASTER
|DRM_ROOT_ONLY
),
64 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP
, drm_getmap
, 0),
65 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT
, drm_getclient
, 0),
66 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS
, drm_getstats
, 0),
67 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION
, drm_setversion
, DRM_MASTER
|DRM_ROOT_ONLY
),
69 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE
, drm_setunique
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
70 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
71 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
72 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC
, drm_authmagic
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
74 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP
, drm_addmap_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
75 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP
, drm_rmmap_ioctl
, DRM_AUTH
),
77 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX
, drm_setsareactx
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
78 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX
, drm_getsareactx
, DRM_AUTH
),
80 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX
, drm_addctx
, DRM_AUTH
|DRM_ROOT_ONLY
),
81 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX
, drm_rmctx
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
82 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX
, drm_modctx
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
83 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX
, drm_getctx
, DRM_AUTH
),
84 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX
, drm_switchctx
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
85 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX
, drm_newctx
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
86 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX
, drm_resctx
, DRM_AUTH
),
88 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW
, drm_adddraw
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
89 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW
, drm_rmdraw
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
91 DRM_IOCTL_DEF(DRM_IOCTL_LOCK
, drm_lock
, DRM_AUTH
),
92 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK
, drm_unlock
, DRM_AUTH
),
94 DRM_IOCTL_DEF(DRM_IOCTL_FINISH
, drm_noop
, DRM_AUTH
),
96 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS
, drm_addbufs
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
97 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS
, drm_markbufs
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
98 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS
, drm_infobufs
, DRM_AUTH
),
99 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS
, drm_mapbufs
, DRM_AUTH
),
100 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS
, drm_freebufs
, DRM_AUTH
),
101 DRM_IOCTL_DEF(DRM_IOCTL_DMA
, drm_dma
, DRM_AUTH
),
103 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL
, drm_control
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
105 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE
, drm_agp_acquire_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
106 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE
, drm_agp_release_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
107 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE
, drm_agp_enable_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
108 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO
, drm_agp_info_ioctl
, DRM_AUTH
),
109 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC
, drm_agp_alloc_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
110 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE
, drm_agp_free_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
111 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND
, drm_agp_bind_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
112 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND
, drm_agp_unbind_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
114 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC
, drm_sg_alloc_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
115 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE
, drm_sg_free
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
116 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK
, drm_wait_vblank
, 0),
117 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL
, drm_modeset_ctl
, 0),
118 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW
, drm_update_draw
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
121 static struct dev_ops drm_cdevsw
= {
122 { "drm", 145, D_TRACKCLOSE
},
124 .d_close
= drm_close
,
126 .d_ioctl
= drm_ioctl
,
127 .d_kqfilter
= drm_kqfilter
,
131 static int drm_msi
= 1; /* Enable by default. */
132 TUNABLE_INT("hw.drm.msi", &drm_msi
);
134 static struct drm_msi_blacklist_entry drm_msi_blacklist
[] = {
135 {0x8086, 0x2772}, /* Intel i945G */ \
136 {0x8086, 0x27A2}, /* Intel i945GM */ \
137 {0x8086, 0x27AE}, /* Intel i945GME */ \
141 static int drm_msi_is_blacklisted(int vendor
, int device
)
145 for (i
= 0; drm_msi_blacklist
[i
].vendor
!= 0; i
++) {
146 if ((drm_msi_blacklist
[i
].vendor
== vendor
) &&
147 (drm_msi_blacklist
[i
].device
== device
)) {
155 int drm_probe(device_t kdev
, drm_pci_id_list_t
*idlist
)
157 drm_pci_id_list_t
*id_entry
;
160 vendor
= pci_get_vendor(kdev
);
161 device
= pci_get_device(kdev
);
163 if (pci_get_class(kdev
) != PCIC_DISPLAY
164 || pci_get_subclass(kdev
) != PCIS_DISPLAY_VGA
)
167 id_entry
= drm_find_description(vendor
, device
, idlist
);
168 if (id_entry
!= NULL
) {
169 if (!device_get_desc(kdev
)) {
170 DRM_DEBUG("desc : %s\n", device_get_desc(kdev
));
171 device_set_desc(kdev
, id_entry
->name
);
179 int drm_attach(device_t kdev
, drm_pci_id_list_t
*idlist
)
181 struct drm_device
*dev
;
182 drm_pci_id_list_t
*id_entry
;
188 unit
= device_get_unit(kdev
);
189 dev
= device_get_softc(kdev
);
191 if (!strcmp(device_get_name(kdev
), "drmsub"))
192 dev
->device
= device_get_parent(kdev
);
196 dev
->devnode
= make_dev(&drm_cdevsw
, unit
, DRM_DEV_UID
, DRM_DEV_GID
,
197 DRM_DEV_MODE
, "dri/card%d", unit
);
200 dev
->pci_bus
= pci_get_bus(dev
->device
);
201 dev
->pci_slot
= pci_get_slot(dev
->device
);
202 dev
->pci_func
= pci_get_function(dev
->device
);
204 dev
->pci_vendor
= pci_get_vendor(dev
->device
);
205 dev
->pci_device
= pci_get_device(dev
->device
);
207 if (drm_core_check_feature(dev
, DRIVER_HAVE_IRQ
)) {
209 !drm_msi_is_blacklisted(dev
->pci_vendor
, dev
->pci_device
)) {
211 msicount
= pci_msi_count(dev
->device
);
212 DRM_DEBUG("MSI count = %d\n", msicount
);
216 if (pci_alloc_msi(dev
->device
, &msicount
) == 0) {
217 DRM_INFO("MSI enabled %d message(s)\n",
219 dev
->msi_enabled
= 1;
225 dev
->irqr
= bus_alloc_resource_any(dev
->device
, SYS_RES_IRQ
,
226 &dev
->irqrid
, RF_SHAREABLE
);
231 dev
->irq
= (int) rman_get_start(dev
->irqr
);
234 DRM_SPININIT(&dev
->dev_lock
, "drmdev");
235 lwkt_serialize_init(&dev
->irq_lock
);
236 DRM_SPININIT(&dev
->vbl_lock
, "drmvbl");
237 DRM_SPININIT(&dev
->drw_lock
, "drmdrw");
239 id_entry
= drm_find_description(dev
->pci_vendor
,
240 dev
->pci_device
, idlist
);
241 dev
->id_entry
= id_entry
;
243 return drm_load(dev
);
246 int drm_detach(device_t kdev
)
248 struct drm_device
*dev
;
250 dev
= device_get_softc(kdev
);
255 bus_release_resource(dev
->device
, SYS_RES_IRQ
, dev
->irqrid
,
259 if (dev
->msi_enabled
) {
260 pci_release_msi(dev
->device
);
261 DRM_INFO("MSI released\n");
270 #define DRM_DEV_NAME "drm"
273 devclass_t drm_devclass
;
275 drm_pci_id_list_t
*drm_find_description(int vendor
, int device
,
276 drm_pci_id_list_t
*idlist
)
280 for (i
= 0; idlist
[i
].vendor
!= 0; i
++) {
281 if ((idlist
[i
].vendor
== vendor
) &&
282 ((idlist
[i
].device
== device
) ||
283 (idlist
[i
].device
== 0))) {
290 static int drm_firstopen(struct drm_device
*dev
)
292 drm_local_map_t
*map
;
295 DRM_SPINLOCK_ASSERT(&dev
->dev_lock
);
297 /* prebuild the SAREA */
298 i
= drm_addmap(dev
, 0, SAREA_MAX
, _DRM_SHM
,
299 _DRM_CONTAINS_LOCK
, &map
);
303 if (dev
->driver
->firstopen
)
304 dev
->driver
->firstopen(dev
);
308 if (drm_core_check_feature(dev
, DRIVER_HAVE_DMA
)) {
309 i
= drm_dma_setup(dev
);
314 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
315 dev
->magiclist
[i
].head
= NULL
;
316 dev
->magiclist
[i
].tail
= NULL
;
319 dev
->lock
.lock_queue
= 0;
320 dev
->irq_enabled
= 0;
321 dev
->context_flag
= 0;
322 dev
->last_context
= 0;
325 dev
->buf_sigio
= NULL
;
332 static int drm_lastclose(struct drm_device
*dev
)
334 drm_magic_entry_t
*pt
, *next
;
335 drm_local_map_t
*map
, *mapsave
;
338 DRM_SPINLOCK_ASSERT(&dev
->dev_lock
);
342 if (dev
->driver
->lastclose
!= NULL
)
343 dev
->driver
->lastclose(dev
);
345 if (dev
->irq_enabled
)
346 drm_irq_uninstall(dev
);
349 free(dev
->unique
, DRM_MEM_DRIVER
);
354 for (i
= 0; i
< DRM_HASH_SIZE
; i
++) {
355 for (pt
= dev
->magiclist
[i
].head
; pt
; pt
= next
) {
357 free(pt
, DRM_MEM_MAGIC
);
359 dev
->magiclist
[i
].head
= dev
->magiclist
[i
].tail
= NULL
;
363 drm_drawable_free_all(dev
);
366 /* Clear AGP information */
368 drm_agp_mem_t
*entry
;
369 drm_agp_mem_t
*nexte
;
371 /* Remove AGP resources, but leave dev->agp intact until
372 * drm_unload is called.
374 for (entry
= dev
->agp
->memory
; entry
; entry
= nexte
) {
377 drm_agp_unbind_memory(entry
->handle
);
378 drm_agp_free_memory(entry
->handle
);
379 free(entry
, DRM_MEM_AGPLISTS
);
381 dev
->agp
->memory
= NULL
;
383 if (dev
->agp
->acquired
)
384 drm_agp_release(dev
);
386 dev
->agp
->acquired
= 0;
387 dev
->agp
->enabled
= 0;
389 if (dev
->sg
!= NULL
) {
390 drm_sg_cleanup(dev
->sg
);
394 TAILQ_FOREACH_MUTABLE(map
, &dev
->maplist
, link
, mapsave
) {
395 if (!(map
->flags
& _DRM_DRIVER
))
399 drm_dma_takedown(dev
);
400 if (dev
->lock
.hw_lock
) {
401 dev
->lock
.hw_lock
= NULL
; /* SHM removed */
402 dev
->lock
.file_priv
= NULL
;
403 DRM_WAKEUP_INT((void *)&dev
->lock
.lock_queue
);
409 static int drm_load(struct drm_device
*dev
)
415 TAILQ_INIT(&dev
->maplist
);
418 drm_sysctl_init(dev
);
419 TAILQ_INIT(&dev
->files
);
422 dev
->types
[0] = _DRM_STAT_LOCK
;
423 dev
->types
[1] = _DRM_STAT_OPENS
;
424 dev
->types
[2] = _DRM_STAT_CLOSES
;
425 dev
->types
[3] = _DRM_STAT_IOCTLS
;
426 dev
->types
[4] = _DRM_STAT_LOCKS
;
427 dev
->types
[5] = _DRM_STAT_UNLOCKS
;
429 for (i
= 0; i
< DRM_ARRAY_SIZE(dev
->counts
); i
++)
430 atomic_set(&dev
->counts
[i
], 0);
432 if (dev
->driver
->load
!= NULL
) {
434 /* Shared code returns -errno. */
435 retcode
= -dev
->driver
->load(dev
,
436 dev
->id_entry
->driver_private
);
437 pci_enable_busmaster(dev
->device
);
443 if (drm_core_has_AGP(dev
)) {
444 if (drm_device_is_agp(dev
))
445 dev
->agp
= drm_agp_init();
446 if (drm_core_check_feature(dev
, DRIVER_REQUIRE_AGP
) &&
448 DRM_ERROR("Card isn't AGP, or couldn't initialize "
453 if (dev
->agp
!= NULL
) {
454 if (drm_mtrr_add(dev
->agp
->info
.ai_aperture_base
,
455 dev
->agp
->info
.ai_aperture_size
, DRM_MTRR_WC
) == 0)
460 retcode
= drm_ctxbitmap_init(dev
);
462 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
466 DRM_INFO("Initialized %s %d.%d.%d %s\n",
470 dev
->driver
->patchlevel
,
476 drm_sysctl_cleanup(dev
);
480 destroy_dev(dev
->devnode
);
482 DRM_SPINUNINIT(&dev
->drw_lock
);
483 DRM_SPINUNINIT(&dev
->vbl_lock
);
484 DRM_SPINUNINIT(&dev
->dev_lock
);
489 static void drm_unload(struct drm_device
*dev
)
495 drm_sysctl_cleanup(dev
);
496 destroy_dev(dev
->devnode
);
498 drm_ctxbitmap_cleanup(dev
);
500 if (dev
->agp
&& dev
->agp
->mtrr
) {
501 int __unused retcode
;
503 retcode
= drm_mtrr_del(0, dev
->agp
->info
.ai_aperture_base
,
504 dev
->agp
->info
.ai_aperture_size
, DRM_MTRR_WC
);
505 DRM_DEBUG("mtrr_del = %d", retcode
);
508 drm_vblank_cleanup(dev
);
514 /* Clean up PCI resources allocated by drm_bufs.c. We're not really
515 * worried about resource consumption while the DRM is inactive (between
516 * lastclose and firstopen or unload) because these aren't actually
517 * taking up KVA, just keeping the PCI resource allocated.
519 for (i
= 0; i
< DRM_MAX_PCI_RESOURCE
; i
++) {
520 if (dev
->pcir
[i
] == NULL
)
522 bus_release_resource(dev
->device
, SYS_RES_MEMORY
,
523 dev
->pcirid
[i
], dev
->pcir
[i
]);
528 free(dev
->agp
, DRM_MEM_AGPLISTS
);
532 if (dev
->driver
->unload
!= NULL
) {
534 dev
->driver
->unload(dev
);
540 pci_disable_busmaster(dev
->device
);
542 DRM_SPINUNINIT(&dev
->drw_lock
);
543 DRM_SPINUNINIT(&dev
->vbl_lock
);
544 DRM_SPINUNINIT(&dev
->dev_lock
);
547 int drm_version(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
549 struct drm_version
*version
= data
;
552 #define DRM_COPY( name, value ) \
553 len = strlen( value ); \
554 if ( len > name##_len ) len = name##_len; \
555 name##_len = strlen( value ); \
556 if ( len && name ) { \
557 if ( DRM_COPY_TO_USER( name, value, len ) ) \
561 version
->version_major
= dev
->driver
->major
;
562 version
->version_minor
= dev
->driver
->minor
;
563 version
->version_patchlevel
= dev
->driver
->patchlevel
;
565 DRM_COPY(version
->name
, dev
->driver
->name
);
566 DRM_COPY(version
->date
, dev
->driver
->date
);
567 DRM_COPY(version
->desc
, dev
->driver
->desc
);
572 int drm_open(struct dev_open_args
*ap
)
574 struct cdev
*kdev
= ap
->a_head
.a_dev
;
575 int flags
= ap
->a_oflags
;
577 struct thread
*p
= curthread
;
578 struct drm_device
*dev
= NULL
;
581 dev
= DRIVER_SOFTC(minor(kdev
));
583 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
585 retcode
= drm_open_helper(kdev
, flags
, fmt
, p
, dev
);
588 atomic_inc(&dev
->counts
[_DRM_STAT_OPENS
]);
590 device_busy(dev
->device
);
591 if (!dev
->open_count
++)
592 retcode
= drm_firstopen(dev
);
599 int drm_close(struct dev_close_args
*ap
)
601 struct cdev
*kdev
= ap
->a_head
.a_dev
;
602 struct drm_file
*file_priv
;
603 struct drm_device
*dev
;
606 dev
= DRIVER_SOFTC(minor(kdev
));
607 file_priv
= drm_find_file_by_proc(dev
, curthread
);
609 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
613 if (--file_priv
->refs
!= 0)
616 if (dev
->driver
->preclose
!= NULL
)
617 dev
->driver
->preclose(dev
, file_priv
);
619 /* ========================================================
620 * Begin inline drm_release
623 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
624 DRM_CURRENTPID
, (long)dev
->device
, dev
->open_count
);
626 if (dev
->lock
.hw_lock
&& _DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
)
627 && dev
->lock
.file_priv
== file_priv
) {
628 DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
630 _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
));
631 if (dev
->driver
->reclaim_buffers_locked
!= NULL
)
632 dev
->driver
->reclaim_buffers_locked(dev
, file_priv
);
634 drm_lock_free(&dev
->lock
,
635 _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
));
637 /* FIXME: may require heavy-handed reset of
638 hardware at this point, possibly
639 processed via a callback to the X
641 } else if (dev
->driver
->reclaim_buffers_locked
!= NULL
&&
642 dev
->lock
.hw_lock
!= NULL
) {
643 /* The lock is required to reclaim buffers */
645 if (!dev
->lock
.hw_lock
) {
646 /* Device has been unregistered */
650 if (drm_lock_take(&dev
->lock
, DRM_KERNEL_CONTEXT
)) {
651 dev
->lock
.file_priv
= file_priv
;
652 dev
->lock
.lock_time
= jiffies
;
653 atomic_inc(&dev
->counts
[_DRM_STAT_LOCKS
]);
654 break; /* Got lock */
657 tsleep_interlock((void *)&dev
->lock
.lock_queue
, PCATCH
);
659 retcode
= tsleep((void *)&dev
->lock
.lock_queue
,
660 PCATCH
| PINTERLOCKED
, "drmlk2", 0);
666 dev
->driver
->reclaim_buffers_locked(dev
, file_priv
);
667 drm_lock_free(&dev
->lock
, DRM_KERNEL_CONTEXT
);
671 if (drm_core_check_feature(dev
, DRIVER_HAVE_DMA
) &&
672 !dev
->driver
->reclaim_buffers_locked
)
673 drm_reclaim_buffers(dev
, file_priv
);
675 funsetown(dev
->buf_sigio
);
677 if (dev
->driver
->postclose
!= NULL
)
678 dev
->driver
->postclose(dev
, file_priv
);
679 TAILQ_REMOVE(&dev
->files
, file_priv
, link
);
680 free(file_priv
, DRM_MEM_FILES
);
682 /* ========================================================
683 * End inline drm_release
686 atomic_inc(&dev
->counts
[_DRM_STAT_CLOSES
]);
687 device_unbusy(dev
->device
);
688 if (--dev
->open_count
== 0) {
689 retcode
= drm_lastclose(dev
);
697 /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
699 int drm_ioctl(struct dev_ioctl_args
*ap
)
701 struct cdev
*kdev
= ap
->a_head
.a_dev
;
702 u_long cmd
= ap
->a_cmd
;
703 caddr_t data
= ap
->a_data
;
704 struct thread
*p
= curthread
;
705 struct drm_device
*dev
= drm_get_device_from_kdev(kdev
);
707 drm_ioctl_desc_t
*ioctl
;
708 int (*func
)(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
);
709 int nr
= DRM_IOCTL_NR(cmd
);
710 int is_driver_ioctl
= 0;
711 struct drm_file
*file_priv
;
713 file_priv
= drm_find_file_by_proc(dev
, p
);
715 DRM_ERROR("can't find authenticator\n");
719 atomic_inc(&dev
->counts
[_DRM_STAT_IOCTLS
]);
720 ++file_priv
->ioctl_count
;
722 DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
723 DRM_CURRENTPID
, cmd
, nr
, (long)dev
->device
,
724 file_priv
->authenticated
);
732 return fsetown(*(int *)data
, &dev
->buf_sigio
);
735 *(int *) data
= fgetown(dev
->buf_sigio
);
739 if (IOCGROUP(cmd
) != DRM_IOCTL_BASE
) {
740 DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd
));
744 ioctl
= &drm_ioctls
[nr
];
745 /* It's not a core DRM ioctl, try driver-specific. */
746 if (ioctl
->func
== NULL
&& nr
>= DRM_COMMAND_BASE
) {
747 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
748 nr
-= DRM_COMMAND_BASE
;
749 if (nr
> dev
->driver
->max_ioctl
) {
750 DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
751 nr
, dev
->driver
->max_ioctl
);
754 ioctl
= &dev
->driver
->ioctls
[nr
];
760 DRM_DEBUG("no function\n");
764 if (((ioctl
->flags
& DRM_ROOT_ONLY
) && !DRM_SUSER(p
)) ||
765 ((ioctl
->flags
& DRM_AUTH
) && !file_priv
->authenticated
) ||
766 ((ioctl
->flags
& DRM_MASTER
) && !file_priv
->master
))
769 if (is_driver_ioctl
) {
771 /* shared code returns -errno */
772 retcode
= -func(dev
, data
, file_priv
);
775 retcode
= func(dev
, data
, file_priv
);
779 DRM_DEBUG(" returning %d\n", retcode
);
784 drm_local_map_t
*drm_getsarea(struct drm_device
*dev
)
786 drm_local_map_t
*map
;
788 DRM_SPINLOCK_ASSERT(&dev
->dev_lock
);
789 TAILQ_FOREACH(map
, &dev
->maplist
, link
) {
790 if (map
->type
== _DRM_SHM
&& (map
->flags
& _DRM_CONTAINS_LOCK
))
799 #include <sys/sysproto.h>
801 MODULE_DEPEND(DRIVER_NAME
, linux
, 1, 1, 1);
803 #define LINUX_IOCTL_DRM_MIN 0x6400
804 #define LINUX_IOCTL_DRM_MAX 0x64ff
806 static linux_ioctl_function_t drm_linux_ioctl
;
807 static struct linux_ioctl_handler drm_handler
= {drm_linux_ioctl
,
808 LINUX_IOCTL_DRM_MIN
, LINUX_IOCTL_DRM_MAX
};
810 SYSINIT(drm_register
, SI_SUB_KLD
, SI_ORDER_MIDDLE
,
811 linux_ioctl_register_handler
, &drm_handler
);
812 SYSUNINIT(drm_unregister
, SI_SUB_KLD
, SI_ORDER_MIDDLE
,
813 linux_ioctl_unregister_handler
, &drm_handler
);
815 /* The bits for in/out are switched on Linux */
816 #define LINUX_IOC_IN IOC_OUT
817 #define LINUX_IOC_OUT IOC_IN
820 drm_linux_ioctl(DRM_STRUCTPROC
*p
, struct linux_ioctl_args
* args
)
825 args
->cmd
&= ~(LINUX_IOC_IN
| LINUX_IOC_OUT
);
826 if (cmd
& LINUX_IOC_IN
)
828 if (cmd
& LINUX_IOC_OUT
)
829 args
->cmd
|= IOC_OUT
;
831 error
= ioctl(p
, (struct ioctl_args
*)args
);
835 #endif /* DRM_LINUX */