kernel: Remove some old major numbers.
[dragonfly.git] / sys / dev / drm / drm_drv.c
blobf55f0aae8d5516c4d800323e910d5b285bd0276f
1 /*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors:
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
31 /** @file drm_drv.c
32 * The catch-all file for DRM device support, including module setup/teardown,
33 * open/close, and ioctl dispatch.
36 #include <machine/limits.h>
37 #include "dev/drm/drmP.h"
38 #include "dev/drm/drm.h"
39 #include "dev/drm/drm_sarea.h"
41 #ifdef DRM_DEBUG_DEFAULT_ON
42 int drm_debug_flag = 1;
43 #else
44 int drm_debug_flag = 0;
45 #endif
47 static int drm_load(struct drm_device *dev);
48 static void drm_unload(struct drm_device *dev);
49 static drm_pci_id_list_t *drm_find_description(int vendor, int device,
50 drm_pci_id_list_t *idlist);
52 #define DRIVER_SOFTC(unit) \
53 ((struct drm_device *)devclass_get_softc(drm_devclass, unit))
55 MODULE_VERSION(drm, 1);
56 MODULE_DEPEND(drm, agp, 1, 1, 1);
57 MODULE_DEPEND(drm, pci, 1, 1, 1);
59 static drm_ioctl_desc_t drm_ioctls[256] = {
60 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
61 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
62 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
63 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
64 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
65 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
66 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
67 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
69 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
70 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
71 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
72 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
74 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
75 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
77 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
78 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
80 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
81 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
82 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
83 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
84 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
85 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
86 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
88 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
89 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
91 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
92 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
94 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
96 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
97 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
98 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
99 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
100 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
101 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
103 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
105 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
106 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
107 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
108 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
109 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
110 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
111 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
112 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
114 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
115 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
116 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
117 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
118 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
121 static struct dev_ops drm_cdevsw = {
122 { "drm", 0, D_TRACKCLOSE },
123 .d_open = drm_open,
124 .d_close = drm_close,
125 .d_read = drm_read,
126 .d_ioctl = drm_ioctl,
127 .d_kqfilter = drm_kqfilter,
128 .d_mmap = drm_mmap
131 static int drm_msi = 1; /* Enable by default. */
132 TUNABLE_INT("hw.drm.msi", &drm_msi);
134 static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
135 {0x8086, 0x2772}, /* Intel i945G */ \
136 {0x8086, 0x27A2}, /* Intel i945GM */ \
137 {0x8086, 0x27AE}, /* Intel i945GME */ \
138 {0, 0}
141 static int drm_msi_is_blacklisted(int vendor, int device)
143 int i = 0;
145 for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
146 if ((drm_msi_blacklist[i].vendor == vendor) &&
147 (drm_msi_blacklist[i].device == device)) {
148 return 1;
152 return 0;
155 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
157 drm_pci_id_list_t *id_entry;
158 int vendor, device;
160 vendor = pci_get_vendor(kdev);
161 device = pci_get_device(kdev);
163 if (pci_get_class(kdev) != PCIC_DISPLAY
164 || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA)
165 return ENXIO;
167 id_entry = drm_find_description(vendor, device, idlist);
168 if (id_entry != NULL) {
169 if (!device_get_desc(kdev)) {
170 DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
171 device_set_desc(kdev, id_entry->name);
173 return 0;
176 return ENXIO;
179 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
181 struct drm_device *dev;
182 drm_pci_id_list_t *id_entry;
183 int unit;
184 #if 0
185 int msicount;
186 #endif
188 unit = device_get_unit(kdev);
189 dev = device_get_softc(kdev);
191 if (!strcmp(device_get_name(kdev), "drmsub"))
192 dev->device = device_get_parent(kdev);
193 else
194 dev->device = kdev;
196 dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
197 DRM_DEV_MODE, "dri/card%d", unit);
199 dev->pci_domain = 0;
200 dev->pci_bus = pci_get_bus(dev->device);
201 dev->pci_slot = pci_get_slot(dev->device);
202 dev->pci_func = pci_get_function(dev->device);
204 dev->pci_vendor = pci_get_vendor(dev->device);
205 dev->pci_device = pci_get_device(dev->device);
207 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
208 if (drm_msi &&
209 !drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) {
210 #if 0
211 msicount = pci_msi_count(dev->device);
212 DRM_DEBUG("MSI count = %d\n", msicount);
213 if (msicount > 1)
214 msicount = 1;
216 if (pci_alloc_msi(dev->device, &msicount) == 0) {
217 DRM_INFO("MSI enabled %d message(s)\n",
218 msicount);
219 dev->msi_enabled = 1;
220 dev->irqrid = 1;
222 #endif
225 dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
226 &dev->irqrid, RF_SHAREABLE);
227 if (!dev->irqr) {
228 return ENOENT;
231 dev->irq = (int) rman_get_start(dev->irqr);
234 DRM_SPININIT(&dev->dev_lock, "drmdev");
235 lwkt_serialize_init(&dev->irq_lock);
236 DRM_SPININIT(&dev->vbl_lock, "drmvbl");
237 DRM_SPININIT(&dev->drw_lock, "drmdrw");
239 id_entry = drm_find_description(dev->pci_vendor,
240 dev->pci_device, idlist);
241 dev->id_entry = id_entry;
243 return drm_load(dev);
246 int drm_detach(device_t kdev)
248 struct drm_device *dev;
250 dev = device_get_softc(kdev);
252 drm_unload(dev);
254 if (dev->irqr) {
255 bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
256 dev->irqr);
258 #if 0
259 if (dev->msi_enabled) {
260 pci_release_msi(dev->device);
261 DRM_INFO("MSI released\n");
263 #endif
266 return 0;
269 #ifndef DRM_DEV_NAME
270 #define DRM_DEV_NAME "drm"
271 #endif
273 devclass_t drm_devclass;
275 drm_pci_id_list_t *drm_find_description(int vendor, int device,
276 drm_pci_id_list_t *idlist)
278 int i = 0;
280 for (i = 0; idlist[i].vendor != 0; i++) {
281 if ((idlist[i].vendor == vendor) &&
282 ((idlist[i].device == device) ||
283 (idlist[i].device == 0))) {
284 return &idlist[i];
287 return NULL;
290 static int drm_firstopen(struct drm_device *dev)
292 drm_local_map_t *map;
293 int i;
295 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
297 /* prebuild the SAREA */
298 i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
299 _DRM_CONTAINS_LOCK, &map);
300 if (i != 0)
301 return i;
303 if (dev->driver->firstopen)
304 dev->driver->firstopen(dev);
306 dev->buf_use = 0;
308 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
309 i = drm_dma_setup(dev);
310 if (i != 0)
311 return i;
314 for (i = 0; i < DRM_HASH_SIZE; i++) {
315 dev->magiclist[i].head = NULL;
316 dev->magiclist[i].tail = NULL;
319 dev->lock.lock_queue = 0;
320 dev->irq_enabled = 0;
321 dev->context_flag = 0;
322 dev->last_context = 0;
323 dev->if_version = 0;
325 dev->buf_sigio = NULL;
327 DRM_DEBUG("\n");
329 return 0;
332 static int drm_lastclose(struct drm_device *dev)
334 drm_magic_entry_t *pt, *next;
335 drm_local_map_t *map, *mapsave;
336 int i;
338 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
340 DRM_DEBUG("\n");
342 if (dev->driver->lastclose != NULL)
343 dev->driver->lastclose(dev);
345 if (dev->irq_enabled)
346 drm_irq_uninstall(dev);
348 if (dev->unique) {
349 free(dev->unique, DRM_MEM_DRIVER);
350 dev->unique = NULL;
351 dev->unique_len = 0;
353 /* Clear pid list */
354 for (i = 0; i < DRM_HASH_SIZE; i++) {
355 for (pt = dev->magiclist[i].head; pt; pt = next) {
356 next = pt->next;
357 free(pt, DRM_MEM_MAGIC);
359 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
362 DRM_UNLOCK();
363 drm_drawable_free_all(dev);
364 DRM_LOCK();
366 /* Clear AGP information */
367 if (dev->agp) {
368 drm_agp_mem_t *entry;
369 drm_agp_mem_t *nexte;
371 /* Remove AGP resources, but leave dev->agp intact until
372 * drm_unload is called.
374 for (entry = dev->agp->memory; entry; entry = nexte) {
375 nexte = entry->next;
376 if (entry->bound)
377 drm_agp_unbind_memory(entry->handle);
378 drm_agp_free_memory(entry->handle);
379 free(entry, DRM_MEM_AGPLISTS);
381 dev->agp->memory = NULL;
383 if (dev->agp->acquired)
384 drm_agp_release(dev);
386 dev->agp->acquired = 0;
387 dev->agp->enabled = 0;
389 if (dev->sg != NULL) {
390 drm_sg_cleanup(dev->sg);
391 dev->sg = NULL;
394 TAILQ_FOREACH_MUTABLE(map, &dev->maplist, link, mapsave) {
395 if (!(map->flags & _DRM_DRIVER))
396 drm_rmmap(dev, map);
399 drm_dma_takedown(dev);
400 if (dev->lock.hw_lock) {
401 dev->lock.hw_lock = NULL; /* SHM removed */
402 dev->lock.file_priv = NULL;
403 DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
406 return 0;
409 static int drm_load(struct drm_device *dev)
411 int i, retcode;
413 DRM_DEBUG("\n");
415 TAILQ_INIT(&dev->maplist);
417 drm_mem_init();
418 drm_sysctl_init(dev);
419 TAILQ_INIT(&dev->files);
421 dev->counters = 6;
422 dev->types[0] = _DRM_STAT_LOCK;
423 dev->types[1] = _DRM_STAT_OPENS;
424 dev->types[2] = _DRM_STAT_CLOSES;
425 dev->types[3] = _DRM_STAT_IOCTLS;
426 dev->types[4] = _DRM_STAT_LOCKS;
427 dev->types[5] = _DRM_STAT_UNLOCKS;
429 for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
430 atomic_set(&dev->counts[i], 0);
432 if (dev->driver->load != NULL) {
433 DRM_LOCK();
434 /* Shared code returns -errno. */
435 retcode = -dev->driver->load(dev,
436 dev->id_entry->driver_private);
437 pci_enable_busmaster(dev->device);
438 DRM_UNLOCK();
439 if (retcode != 0)
440 goto error;
443 if (drm_core_has_AGP(dev)) {
444 if (drm_device_is_agp(dev))
445 dev->agp = drm_agp_init();
446 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
447 dev->agp == NULL) {
448 DRM_ERROR("Card isn't AGP, or couldn't initialize "
449 "AGP.\n");
450 retcode = ENOMEM;
451 goto error;
453 if (dev->agp != NULL) {
454 if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
455 dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
456 dev->agp->mtrr = 1;
460 retcode = drm_ctxbitmap_init(dev);
461 if (retcode != 0) {
462 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
463 goto error;
466 DRM_INFO("Initialized %s %d.%d.%d %s\n",
467 dev->driver->name,
468 dev->driver->major,
469 dev->driver->minor,
470 dev->driver->patchlevel,
471 dev->driver->date);
473 return 0;
475 error:
476 drm_sysctl_cleanup(dev);
477 DRM_LOCK();
478 drm_lastclose(dev);
479 DRM_UNLOCK();
480 destroy_dev(dev->devnode);
482 DRM_SPINUNINIT(&dev->drw_lock);
483 DRM_SPINUNINIT(&dev->vbl_lock);
484 DRM_SPINUNINIT(&dev->dev_lock);
486 return retcode;
489 static void drm_unload(struct drm_device *dev)
491 int i;
493 DRM_DEBUG("\n");
495 drm_sysctl_cleanup(dev);
496 destroy_dev(dev->devnode);
498 drm_ctxbitmap_cleanup(dev);
500 if (dev->agp && dev->agp->mtrr) {
501 int __unused retcode;
503 retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
504 dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
505 DRM_DEBUG("mtrr_del = %d", retcode);
508 drm_vblank_cleanup(dev);
510 DRM_LOCK();
511 drm_lastclose(dev);
512 DRM_UNLOCK();
514 /* Clean up PCI resources allocated by drm_bufs.c. We're not really
515 * worried about resource consumption while the DRM is inactive (between
516 * lastclose and firstopen or unload) because these aren't actually
517 * taking up KVA, just keeping the PCI resource allocated.
519 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
520 if (dev->pcir[i] == NULL)
521 continue;
522 bus_release_resource(dev->device, SYS_RES_MEMORY,
523 dev->pcirid[i], dev->pcir[i]);
524 dev->pcir[i] = NULL;
527 if (dev->agp) {
528 free(dev->agp, DRM_MEM_AGPLISTS);
529 dev->agp = NULL;
532 if (dev->driver->unload != NULL) {
533 DRM_LOCK();
534 dev->driver->unload(dev);
535 DRM_UNLOCK();
538 drm_mem_uninit();
540 pci_disable_busmaster(dev->device);
542 DRM_SPINUNINIT(&dev->drw_lock);
543 DRM_SPINUNINIT(&dev->vbl_lock);
544 DRM_SPINUNINIT(&dev->dev_lock);
547 int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
549 struct drm_version *version = data;
550 int len;
552 #define DRM_COPY( name, value ) \
553 len = strlen( value ); \
554 if ( len > name##_len ) len = name##_len; \
555 name##_len = strlen( value ); \
556 if ( len && name ) { \
557 if ( DRM_COPY_TO_USER( name, value, len ) ) \
558 return EFAULT; \
561 version->version_major = dev->driver->major;
562 version->version_minor = dev->driver->minor;
563 version->version_patchlevel = dev->driver->patchlevel;
565 DRM_COPY(version->name, dev->driver->name);
566 DRM_COPY(version->date, dev->driver->date);
567 DRM_COPY(version->desc, dev->driver->desc);
569 return 0;
572 int drm_open(struct dev_open_args *ap)
574 struct cdev *kdev = ap->a_head.a_dev;
575 int flags = ap->a_oflags;
576 int fmt = 0;
577 struct thread *p = curthread;
578 struct drm_device *dev = NULL;
579 int retcode = 0;
581 dev = DRIVER_SOFTC(minor(kdev));
583 DRM_DEBUG("open_count = %d\n", dev->open_count);
585 retcode = drm_open_helper(kdev, flags, fmt, p, dev);
587 if (!retcode) {
588 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
589 DRM_LOCK();
590 device_busy(dev->device);
591 if (!dev->open_count++)
592 retcode = drm_firstopen(dev);
593 DRM_UNLOCK();
596 return retcode;
599 int drm_close(struct dev_close_args *ap)
601 struct cdev *kdev = ap->a_head.a_dev;
602 struct drm_file *file_priv;
603 struct drm_device *dev;
604 int retcode = 0;
606 dev = DRIVER_SOFTC(minor(kdev));
607 file_priv = drm_find_file_by_proc(dev, curthread);
609 DRM_DEBUG("open_count = %d\n", dev->open_count);
611 DRM_LOCK();
613 if (--file_priv->refs != 0)
614 goto done;
616 if (dev->driver->preclose != NULL)
617 dev->driver->preclose(dev, file_priv);
619 /* ========================================================
620 * Begin inline drm_release
623 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
624 DRM_CURRENTPID, (long)dev->device, dev->open_count);
626 if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
627 && dev->lock.file_priv == file_priv) {
628 DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
629 DRM_CURRENTPID,
630 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
631 if (dev->driver->reclaim_buffers_locked != NULL)
632 dev->driver->reclaim_buffers_locked(dev, file_priv);
634 drm_lock_free(&dev->lock,
635 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
637 /* FIXME: may require heavy-handed reset of
638 hardware at this point, possibly
639 processed via a callback to the X
640 server. */
641 } else if (dev->driver->reclaim_buffers_locked != NULL &&
642 dev->lock.hw_lock != NULL) {
643 /* The lock is required to reclaim buffers */
644 for (;;) {
645 if (!dev->lock.hw_lock) {
646 /* Device has been unregistered */
647 retcode = EINTR;
648 break;
650 if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
651 dev->lock.file_priv = file_priv;
652 dev->lock.lock_time = jiffies;
653 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
654 break; /* Got lock */
656 /* Contention */
657 tsleep_interlock((void *)&dev->lock.lock_queue, PCATCH);
658 DRM_UNLOCK();
659 retcode = tsleep((void *)&dev->lock.lock_queue,
660 PCATCH | PINTERLOCKED, "drmlk2", 0);
661 DRM_LOCK();
662 if (retcode)
663 break;
665 if (retcode == 0) {
666 dev->driver->reclaim_buffers_locked(dev, file_priv);
667 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
671 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
672 !dev->driver->reclaim_buffers_locked)
673 drm_reclaim_buffers(dev, file_priv);
675 funsetown(&dev->buf_sigio);
677 if (dev->driver->postclose != NULL)
678 dev->driver->postclose(dev, file_priv);
679 TAILQ_REMOVE(&dev->files, file_priv, link);
680 free(file_priv, DRM_MEM_FILES);
682 /* ========================================================
683 * End inline drm_release
685 done:
686 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
687 device_unbusy(dev->device);
688 if (--dev->open_count == 0) {
689 retcode = drm_lastclose(dev);
692 DRM_UNLOCK();
694 return (0);
697 /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
699 int drm_ioctl(struct dev_ioctl_args *ap)
701 struct cdev *kdev = ap->a_head.a_dev;
702 u_long cmd = ap->a_cmd;
703 caddr_t data = ap->a_data;
704 struct thread *p = curthread;
705 struct drm_device *dev = drm_get_device_from_kdev(kdev);
706 int retcode = 0;
707 drm_ioctl_desc_t *ioctl;
708 int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
709 int nr = DRM_IOCTL_NR(cmd);
710 int is_driver_ioctl = 0;
711 struct drm_file *file_priv;
713 file_priv = drm_find_file_by_proc(dev, p);
714 if (!file_priv) {
715 DRM_ERROR("can't find authenticator\n");
716 return EINVAL;
719 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
720 ++file_priv->ioctl_count;
722 DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
723 DRM_CURRENTPID, cmd, nr, (long)dev->device,
724 file_priv->authenticated);
726 switch (cmd) {
727 case FIONBIO:
728 case FIOASYNC:
729 return 0;
731 case FIOSETOWN:
732 return fsetown(*(int *)data, &dev->buf_sigio);
734 case FIOGETOWN:
735 *(int *) data = fgetown(&dev->buf_sigio);
736 return 0;
739 if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
740 DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
741 return EINVAL;
744 ioctl = &drm_ioctls[nr];
745 /* It's not a core DRM ioctl, try driver-specific. */
746 if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
747 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
748 nr -= DRM_COMMAND_BASE;
749 if (nr > dev->driver->max_ioctl) {
750 DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
751 nr, dev->driver->max_ioctl);
752 return EINVAL;
754 ioctl = &dev->driver->ioctls[nr];
755 is_driver_ioctl = 1;
757 func = ioctl->func;
759 if (func == NULL) {
760 DRM_DEBUG("no function\n");
761 return EINVAL;
764 if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
765 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
766 ((ioctl->flags & DRM_MASTER) && !file_priv->master))
767 return EACCES;
769 if (is_driver_ioctl) {
770 DRM_LOCK();
771 /* shared code returns -errno */
772 retcode = -func(dev, data, file_priv);
773 DRM_UNLOCK();
774 } else {
775 retcode = func(dev, data, file_priv);
778 if (retcode != 0)
779 DRM_DEBUG(" returning %d\n", retcode);
781 return retcode;
784 drm_local_map_t *drm_getsarea(struct drm_device *dev)
786 drm_local_map_t *map;
788 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
789 TAILQ_FOREACH(map, &dev->maplist, link) {
790 if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
791 return map;
794 return NULL;
797 #if DRM_LINUX
799 #include <sys/sysproto.h>
801 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
803 #define LINUX_IOCTL_DRM_MIN 0x6400
804 #define LINUX_IOCTL_DRM_MAX 0x64ff
806 static linux_ioctl_function_t drm_linux_ioctl;
807 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
808 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
810 SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
811 linux_ioctl_register_handler, &drm_handler);
812 SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
813 linux_ioctl_unregister_handler, &drm_handler);
815 /* The bits for in/out are switched on Linux */
816 #define LINUX_IOC_IN IOC_OUT
817 #define LINUX_IOC_OUT IOC_IN
819 static int
820 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
822 int error;
823 int cmd = args->cmd;
825 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
826 if (cmd & LINUX_IOC_IN)
827 args->cmd |= IOC_IN;
828 if (cmd & LINUX_IOC_OUT)
829 args->cmd |= IOC_OUT;
831 error = ioctl(p, (struct ioctl_args *)args);
833 return error;
835 #endif /* DRM_LINUX */