kernel - cleanup vfs_cache debugging
[dragonfly.git] / sys / dev / drm / drm_drv.c
blobb002f0ff7ccdb4581632608cf42ae501ea551751
1 /*
2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
7 * Author Rickard E. (Rik) Faith <faith@valinux.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_core.h>
33 #include "drm_legacy.h"
34 #include "drm_internal.h"
36 /* Provides three levels of debug: off, minimal, verbose */
37 #ifdef __DragonFly__
38 #if DRM_DEBUG_DEFAULT_ON == 1
39 #define DRM_DEBUGBITS_ON (DRM_UT_CORE | DRM_UT_DRIVER | DRM_UT_KMS | \
40 DRM_UT_PRIME| DRM_UT_ATOMIC | DRM_UT_FIOCTL)
41 #elif DRM_DEBUG_DEFAULT_ON == 2
42 #define DRM_DEBUGBITS_ON (DRM_UT_CORE | DRM_UT_DRIVER | DRM_UT_KMS | \
43 DRM_UT_PRIME| DRM_UT_ATOMIC | DRM_UT_FIOCTL | \
44 DRM_UT_PID | DRM_UT_IOCTL | DRM_UT_VBLANK)
45 #else
46 #define DRM_DEBUGBITS_ON (0x0)
47 #endif
48 unsigned int drm_debug = DRM_DEBUGBITS_ON; /* defaults to 0 */
49 #else
50 unsigned int drm_debug = 0; /* bitmask of DRM_UT_x */
51 #endif /* __DragonFly__ */
52 EXPORT_SYMBOL(drm_debug);
54 MODULE_AUTHOR(CORE_AUTHOR);
55 MODULE_DESCRIPTION(CORE_DESC);
56 MODULE_PARM_DESC(debug, "Enable debug output");
57 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
58 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
59 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
61 module_param_named(debug, drm_debug, int, 0600);
63 #if 0
64 static DEFINE_SPINLOCK(drm_minor_lock);
65 static struct idr drm_minors_idr;
66 #endif
68 #if 0
69 static struct dentry *drm_debugfs_root;
70 #endif
72 void drm_err(const char *func, const char *format, ...)
74 __va_list args;
76 kprintf("error: [" DRM_NAME ":pid%d:%s] *ERROR* ", DRM_CURRENTPID, func);
78 __va_start(args, format);
79 kvprintf(format, args);
80 __va_end(args);
82 EXPORT_SYMBOL(drm_err);
84 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
86 __va_list args;
88 if (unlikely(drm_debug & DRM_UT_PID)) {
89 kprintf("[" DRM_NAME ":pid%d:%s] ",
90 DRM_CURRENTPID, function_name);
91 } else {
92 kprintf("[" DRM_NAME ":%s] ", function_name);
95 __va_start(args, format);
96 kvprintf(format, args);
97 __va_end(args);
99 EXPORT_SYMBOL(drm_ut_debug_printk);
101 #if 0
102 struct drm_master *drm_master_create(struct drm_minor *minor)
104 struct drm_master *master;
106 master = kzalloc(sizeof(*master), GFP_KERNEL);
107 if (!master)
108 return NULL;
110 kref_init(&master->refcount);
111 spin_lock_init(&master->lock.spinlock);
112 init_waitqueue_head(&master->lock.lock_queue);
113 if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
114 kfree(master);
115 return NULL;
117 master->minor = minor;
119 return master;
122 struct drm_master *drm_master_get(struct drm_master *master)
124 kref_get(&master->refcount);
125 return master;
127 EXPORT_SYMBOL(drm_master_get);
129 static void drm_master_destroy(struct kref *kref)
131 struct drm_master *master = container_of(kref, struct drm_master, refcount);
132 struct drm_device *dev = master->minor->dev;
133 struct drm_map_list *r_list, *list_temp;
135 mutex_lock(&dev->struct_mutex);
136 if (dev->driver->master_destroy)
137 dev->driver->master_destroy(dev, master);
139 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
140 if (r_list->master == master) {
141 drm_legacy_rmmap_locked(dev, r_list->map);
142 r_list = NULL;
146 if (master->unique) {
147 kfree(master->unique);
148 master->unique = NULL;
149 master->unique_len = 0;
152 drm_ht_remove(&master->magiclist);
154 mutex_unlock(&dev->struct_mutex);
155 kfree(master);
158 void drm_master_put(struct drm_master **master)
160 kref_put(&(*master)->refcount, drm_master_destroy);
161 *master = NULL;
163 EXPORT_SYMBOL(drm_master_put);
164 #endif
166 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
167 struct drm_file *file_priv)
169 DRM_DEBUG("setmaster\n");
171 if (file_priv->master != 0)
172 return (0);
174 return (-EPERM);
177 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
178 struct drm_file *file_priv)
180 DRM_DEBUG("dropmaster\n");
181 if (file_priv->master != 0)
182 return -EINVAL;
183 return 0;
186 #if 0
188 * DRM Minors
189 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
190 * of them is represented by a drm_minor object. Depending on the capabilities
191 * of the device-driver, different interfaces are registered.
193 * Minors can be accessed via dev->$minor_name. This pointer is either
194 * NULL or a valid drm_minor pointer and stays valid as long as the device is
195 * valid. This means, DRM minors have the same life-time as the underlying
196 * device. However, this doesn't mean that the minor is active. Minors are
197 * registered and unregistered dynamically according to device-state.
200 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
201 unsigned int type)
203 switch (type) {
204 case DRM_MINOR_LEGACY:
205 return &dev->primary;
206 case DRM_MINOR_RENDER:
207 return &dev->render;
208 case DRM_MINOR_CONTROL:
209 return &dev->control;
210 default:
211 return NULL;
215 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
217 struct drm_minor *minor;
218 unsigned long flags;
219 int r;
221 minor = kzalloc(sizeof(*minor), GFP_KERNEL);
222 if (!minor)
223 return -ENOMEM;
225 minor->type = type;
226 minor->dev = dev;
228 idr_preload(GFP_KERNEL);
229 spin_lock_irqsave(&drm_minor_lock, flags);
230 r = idr_alloc(&drm_minors_idr,
231 NULL,
232 64 * type,
233 64 * (type + 1),
234 GFP_NOWAIT);
235 spin_unlock_irqrestore(&drm_minor_lock, flags);
236 idr_preload_end();
238 if (r < 0)
239 goto err_free;
241 minor->index = r;
243 minor->kdev = drm_sysfs_minor_alloc(minor);
244 if (IS_ERR(minor->kdev)) {
245 r = PTR_ERR(minor->kdev);
246 goto err_index;
249 *drm_minor_get_slot(dev, type) = minor;
250 return 0;
252 err_index:
253 spin_lock_irqsave(&drm_minor_lock, flags);
254 idr_remove(&drm_minors_idr, minor->index);
255 spin_unlock_irqrestore(&drm_minor_lock, flags);
256 err_free:
257 kfree(minor);
258 return r;
261 static void drm_minor_free(struct drm_device *dev, unsigned int type)
263 struct drm_minor **slot, *minor;
264 unsigned long flags;
266 slot = drm_minor_get_slot(dev, type);
267 minor = *slot;
268 if (!minor)
269 return;
271 put_device(minor->kdev);
273 spin_lock_irqsave(&drm_minor_lock, flags);
274 idr_remove(&drm_minors_idr, minor->index);
275 spin_unlock_irqrestore(&drm_minor_lock, flags);
277 kfree(minor);
278 *slot = NULL;
281 static int drm_minor_register(struct drm_device *dev, unsigned int type)
283 struct drm_minor *minor;
284 unsigned long flags;
285 int ret;
287 DRM_DEBUG("\n");
289 minor = *drm_minor_get_slot(dev, type);
290 if (!minor)
291 return 0;
293 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
294 if (ret) {
295 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
296 return ret;
299 ret = device_add(minor->kdev);
300 if (ret)
301 goto err_debugfs;
303 /* replace NULL with @minor so lookups will succeed from now on */
304 spin_lock_irqsave(&drm_minor_lock, flags);
305 idr_replace(&drm_minors_idr, minor, minor->index);
306 spin_unlock_irqrestore(&drm_minor_lock, flags);
308 DRM_DEBUG("new minor registered %d\n", minor->index);
309 return 0;
311 err_debugfs:
312 drm_debugfs_cleanup(minor);
313 return ret;
316 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
318 struct drm_minor *minor;
319 unsigned long flags;
321 minor = *drm_minor_get_slot(dev, type);
322 if (!minor || !device_is_registered(minor->kdev))
323 return;
325 /* replace @minor with NULL so lookups will fail from now on */
326 spin_lock_irqsave(&drm_minor_lock, flags);
327 idr_replace(&drm_minors_idr, NULL, minor->index);
328 spin_unlock_irqrestore(&drm_minor_lock, flags);
330 device_del(minor->kdev);
331 dev_set_drvdata(minor->kdev, NULL); /* safety belt */
332 drm_debugfs_cleanup(minor);
336 * drm_minor_acquire - Acquire a DRM minor
337 * @minor_id: Minor ID of the DRM-minor
339 * Looks up the given minor-ID and returns the respective DRM-minor object. The
340 * refence-count of the underlying device is increased so you must release this
341 * object with drm_minor_release().
343 * As long as you hold this minor, it is guaranteed that the object and the
344 * minor->dev pointer will stay valid! However, the device may get unplugged and
345 * unregistered while you hold the minor.
347 * Returns:
348 * Pointer to minor-object with increased device-refcount, or PTR_ERR on
349 * failure.
351 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
353 struct drm_minor *minor;
354 unsigned long flags;
356 spin_lock_irqsave(&drm_minor_lock, flags);
357 minor = idr_find(&drm_minors_idr, minor_id);
358 if (minor)
359 drm_dev_ref(minor->dev);
360 spin_unlock_irqrestore(&drm_minor_lock, flags);
362 if (!minor) {
363 return ERR_PTR(-ENODEV);
364 } else if (drm_device_is_unplugged(minor->dev)) {
365 drm_dev_unref(minor->dev);
366 return ERR_PTR(-ENODEV);
369 return minor;
373 * drm_minor_release - Release DRM minor
374 * @minor: Pointer to DRM minor object
376 * Release a minor that was previously acquired via drm_minor_acquire().
378 void drm_minor_release(struct drm_minor *minor)
380 drm_dev_unref(minor->dev);
384 * DOC: driver instance overview
386 * A device instance for a drm driver is represented by struct &drm_device. This
387 * is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
388 * callbacks implemented by the driver. The driver then needs to initialize all
389 * the various subsystems for the drm device like memory management, vblank
390 * handling, modesetting support and intial output configuration plus obviously
391 * initialize all the corresponding hardware bits. An important part of this is
392 * also calling drm_dev_set_unique() to set the userspace-visible unique name of
393 * this device instance. Finally when everything is up and running and ready for
394 * userspace the device instance can be published using drm_dev_register().
396 * There is also deprecated support for initalizing device instances using
397 * bus-specific helpers and the ->load() callback. But due to
398 * backwards-compatibility needs the device instance have to be published too
399 * early, which requires unpretty global locking to make safe and is therefore
400 * only support for existing drivers not yet converted to the new scheme.
402 * When cleaning up a device instance everything needs to be done in reverse:
403 * First unpublish the device instance with drm_dev_unregister(). Then clean up
404 * any other resources allocated at device initialization and drop the driver's
405 * reference to &drm_device using drm_dev_unref().
407 * Note that the lifetime rules for &drm_device instance has still a lot of
408 * historical baggage. Hence use the reference counting provided by
409 * drm_dev_ref() and drm_dev_unref() only carefully.
411 * Also note that embedding of &drm_device is currently not (yet) supported (but
412 * it would be easy to add). Drivers can store driver-private data in the
413 * dev_priv field of &drm_device.
417 * drm_put_dev - Unregister and release a DRM device
418 * @dev: DRM device
420 * Called at module unload time or when a PCI device is unplugged.
422 * Cleans up all DRM device, calling drm_lastclose().
424 * Note: Use of this function is deprecated. It will eventually go away
425 * completely. Please use drm_dev_unregister() and drm_dev_unref() explicitly
426 * instead to make sure that the device isn't userspace accessible any more
427 * while teardown is in progress, ensuring that userspace can't access an
428 * inconsistent state.
430 void drm_put_dev(struct drm_device *dev)
432 DRM_DEBUG("\n");
434 if (!dev) {
435 DRM_ERROR("cleanup called no dev\n");
436 return;
439 drm_dev_unregister(dev);
440 drm_dev_unref(dev);
442 EXPORT_SYMBOL(drm_put_dev);
444 void drm_unplug_dev(struct drm_device *dev)
446 /* for a USB device */
447 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
448 drm_minor_unregister(dev, DRM_MINOR_RENDER);
449 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
451 mutex_lock(&drm_global_mutex);
453 drm_device_set_unplugged(dev);
455 if (dev->open_count == 0) {
456 drm_put_dev(dev);
458 mutex_unlock(&drm_global_mutex);
460 EXPORT_SYMBOL(drm_unplug_dev);
463 * DRM internal mount
464 * We want to be able to allocate our own "struct address_space" to control
465 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
466 * stand-alone address_space objects, so we need an underlying inode. As there
467 * is no way to allocate an independent inode easily, we need a fake internal
468 * VFS mount-point.
470 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
471 * frees it again. You are allowed to use iget() and iput() to get references to
472 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
473 * drm_fs_inode_free() call (which does not have to be the last iput()).
474 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
475 * between multiple inode-users. You could, technically, call
476 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
477 * iput(), but this way you'd end up with a new vfsmount for each inode.
480 static int drm_fs_cnt;
481 static struct vfsmount *drm_fs_mnt;
483 static const struct dentry_operations drm_fs_dops = {
484 .d_dname = simple_dname,
487 static const struct super_operations drm_fs_sops = {
488 .statfs = simple_statfs,
491 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
492 const char *dev_name, void *data)
494 return mount_pseudo(fs_type,
495 "drm:",
496 &drm_fs_sops,
497 &drm_fs_dops,
498 0x010203ff);
501 static struct file_system_type drm_fs_type = {
502 .name = "drm",
503 .owner = THIS_MODULE,
504 .mount = drm_fs_mount,
505 .kill_sb = kill_anon_super,
508 static struct inode *drm_fs_inode_new(void)
510 struct inode *inode;
511 int r;
513 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
514 if (r < 0) {
515 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
516 return ERR_PTR(r);
519 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
520 if (IS_ERR(inode))
521 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
523 return inode;
526 static void drm_fs_inode_free(struct inode *inode)
528 if (inode) {
529 iput(inode);
530 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
535 * drm_dev_alloc - Allocate new DRM device
536 * @driver: DRM driver to allocate device for
537 * @parent: Parent device object
539 * Allocate and initialize a new DRM device. No device registration is done.
540 * Call drm_dev_register() to advertice the device to user space and register it
541 * with other core subsystems. This should be done last in the device
542 * initialization sequence to make sure userspace can't access an inconsistent
543 * state.
545 * The initial ref-count of the object is 1. Use drm_dev_ref() and
546 * drm_dev_unref() to take and drop further ref-counts.
548 * Note that for purely virtual devices @parent can be NULL.
550 * RETURNS:
551 * Pointer to new DRM device, or NULL if out of memory.
553 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
554 struct device *parent)
556 struct drm_device *dev;
557 int ret;
559 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
560 if (!dev)
561 return NULL;
563 kref_init(&dev->ref);
564 dev->dev = parent;
565 dev->driver = driver;
567 INIT_LIST_HEAD(&dev->filelist);
568 INIT_LIST_HEAD(&dev->ctxlist);
569 INIT_LIST_HEAD(&dev->vmalist);
570 INIT_LIST_HEAD(&dev->maplist);
571 INIT_LIST_HEAD(&dev->vblank_event_list);
573 spin_lock_init(&dev->buf_lock);
574 spin_lock_init(&dev->event_lock);
575 mutex_init(&dev->struct_mutex);
576 mutex_init(&dev->ctxlist_mutex);
577 mutex_init(&dev->master_mutex);
579 dev->anon_inode = drm_fs_inode_new();
580 if (IS_ERR(dev->anon_inode)) {
581 ret = PTR_ERR(dev->anon_inode);
582 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
583 goto err_free;
586 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
587 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
588 if (ret)
589 goto err_minors;
591 WARN_ON(driver->suspend || driver->resume);
594 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
595 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
596 if (ret)
597 goto err_minors;
600 ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
601 if (ret)
602 goto err_minors;
604 if (drm_ht_create(&dev->map_hash, 12))
605 goto err_minors;
607 drm_legacy_ctxbitmap_init(dev);
609 if (drm_core_check_feature(dev, DRIVER_GEM)) {
610 ret = drm_gem_init(dev);
611 if (ret) {
612 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
613 goto err_ctxbitmap;
617 return dev;
619 err_ctxbitmap:
620 drm_legacy_ctxbitmap_cleanup(dev);
621 drm_ht_remove(&dev->map_hash);
622 err_minors:
623 drm_minor_free(dev, DRM_MINOR_LEGACY);
624 drm_minor_free(dev, DRM_MINOR_RENDER);
625 drm_minor_free(dev, DRM_MINOR_CONTROL);
626 drm_fs_inode_free(dev->anon_inode);
627 err_free:
628 mutex_destroy(&dev->master_mutex);
629 kfree(dev);
630 return NULL;
632 EXPORT_SYMBOL(drm_dev_alloc);
634 static void drm_dev_release(struct kref *ref)
636 struct drm_device *dev = container_of(ref, struct drm_device, ref);
638 if (drm_core_check_feature(dev, DRIVER_GEM))
639 drm_gem_destroy(dev);
641 drm_legacy_ctxbitmap_cleanup(dev);
642 drm_ht_remove(&dev->map_hash);
643 drm_fs_inode_free(dev->anon_inode);
645 drm_minor_free(dev, DRM_MINOR_LEGACY);
646 drm_minor_free(dev, DRM_MINOR_RENDER);
647 drm_minor_free(dev, DRM_MINOR_CONTROL);
649 mutex_destroy(&dev->master_mutex);
650 kfree(dev->unique);
651 kfree(dev);
655 * drm_dev_ref - Take reference of a DRM device
656 * @dev: device to take reference of or NULL
658 * This increases the ref-count of @dev by one. You *must* already own a
659 * reference when calling this. Use drm_dev_unref() to drop this reference
660 * again.
662 * This function never fails. However, this function does not provide *any*
663 * guarantee whether the device is alive or running. It only provides a
664 * reference to the object and the memory associated with it.
666 void drm_dev_ref(struct drm_device *dev)
668 if (dev)
669 kref_get(&dev->ref);
671 EXPORT_SYMBOL(drm_dev_ref);
674 * drm_dev_unref - Drop reference of a DRM device
675 * @dev: device to drop reference of or NULL
677 * This decreases the ref-count of @dev by one. The device is destroyed if the
678 * ref-count drops to zero.
680 void drm_dev_unref(struct drm_device *dev)
682 if (dev)
683 kref_put(&dev->ref, drm_dev_release);
685 EXPORT_SYMBOL(drm_dev_unref);
688 * drm_dev_register - Register DRM device
689 * @dev: Device to register
690 * @flags: Flags passed to the driver's .load() function
692 * Register the DRM device @dev with the system, advertise device to user-space
693 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
694 * previously.
696 * Never call this twice on any device!
698 * NOTE: To ensure backward compatibility with existing drivers method this
699 * function calls the ->load() method after registering the device nodes,
700 * creating race conditions. Usage of the ->load() methods is therefore
701 * deprecated, drivers must perform all initialization before calling
702 * drm_dev_register().
704 * RETURNS:
705 * 0 on success, negative error code on failure.
707 int drm_dev_register(struct drm_device *dev, unsigned long flags)
709 int ret;
711 mutex_lock(&drm_global_mutex);
713 ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
714 if (ret)
715 goto err_minors;
717 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
718 if (ret)
719 goto err_minors;
721 ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
722 if (ret)
723 goto err_minors;
725 if (dev->driver->load) {
726 ret = dev->driver->load(dev, flags);
727 if (ret)
728 goto err_minors;
731 ret = 0;
732 goto out_unlock;
734 err_minors:
735 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
736 drm_minor_unregister(dev, DRM_MINOR_RENDER);
737 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
738 out_unlock:
739 mutex_unlock(&drm_global_mutex);
740 return ret;
742 EXPORT_SYMBOL(drm_dev_register);
745 * drm_dev_unregister - Unregister DRM device
746 * @dev: Device to unregister
748 * Unregister the DRM device from the system. This does the reverse of
749 * drm_dev_register() but does not deallocate the device. The caller must call
750 * drm_dev_unref() to drop their final reference.
752 * This should be called first in the device teardown code to make sure
753 * userspace can't access the device instance any more.
755 void drm_dev_unregister(struct drm_device *dev)
757 struct drm_map_list *r_list, *list_temp;
759 drm_lastclose(dev);
761 if (dev->driver->unload)
762 dev->driver->unload(dev);
764 if (dev->agp)
765 drm_pci_agp_destroy(dev);
767 drm_vblank_cleanup(dev);
769 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
770 drm_legacy_rmmap(dev, r_list->map);
772 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
773 drm_minor_unregister(dev, DRM_MINOR_RENDER);
774 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
776 EXPORT_SYMBOL(drm_dev_unregister);
779 * drm_dev_set_unique - Set the unique name of a DRM device
780 * @dev: device of which to set the unique name
781 * @fmt: format string for unique name
783 * Sets the unique name of a DRM device using the specified format string and
784 * a variable list of arguments. Drivers can use this at driver probe time if
785 * the unique name of the devices they drive is static.
787 * Return: 0 on success or a negative error code on failure.
789 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
791 va_list ap;
793 kfree(dev->unique);
795 va_start(ap, fmt);
796 dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
797 va_end(ap);
799 return dev->unique ? 0 : -ENOMEM;
801 EXPORT_SYMBOL(drm_dev_set_unique);
802 #endif
805 * DRM Core
806 * The DRM core module initializes all global DRM objects and makes them
807 * available to drivers. Once setup, drivers can probe their respective
808 * devices.
809 * Currently, core management includes:
810 * - The "DRM-Global" key/value database
811 * - Global ID management for connectors
812 * - DRM major number allocation
813 * - DRM minor management
814 * - DRM sysfs class
815 * - DRM debugfs root
817 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
818 * interface registered on a DRM device, you can request minor numbers from DRM
819 * core. DRM core takes care of major-number management and char-dev
820 * registration. A stub ->open() callback forwards any open() requests to the
821 * registered minor.
824 #if 0
825 static int drm_stub_open(struct inode *inode, struct file *filp)
827 const struct file_operations *new_fops;
828 struct drm_minor *minor;
829 int err;
831 DRM_DEBUG("\n");
833 mutex_lock(&drm_global_mutex);
834 minor = drm_minor_acquire(iminor(inode));
835 if (IS_ERR(minor)) {
836 err = PTR_ERR(minor);
837 goto out_unlock;
840 new_fops = fops_get(minor->dev->driver->fops);
841 if (!new_fops) {
842 err = -ENODEV;
843 goto out_release;
846 replace_fops(filp, new_fops);
847 if (filp->f_op->open)
848 err = filp->f_op->open(inode, filp);
849 else
850 err = 0;
852 out_release:
853 drm_minor_release(minor);
854 out_unlock:
855 mutex_unlock(&drm_global_mutex);
856 return err;
859 static const struct file_operations drm_stub_fops = {
860 .owner = THIS_MODULE,
861 .open = drm_stub_open,
862 .llseek = noop_llseek,
865 static int __init drm_core_init(void)
867 int ret = -ENOMEM;
869 drm_global_init();
870 drm_connector_ida_init();
871 idr_init(&drm_minors_idr);
873 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
874 goto err_p1;
876 ret = drm_sysfs_init();
877 if (ret < 0) {
878 printk(KERN_ERR "DRM: Error creating drm class.\n");
879 goto err_p2;
882 drm_debugfs_root = debugfs_create_dir("dri", NULL);
883 if (!drm_debugfs_root) {
884 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
885 ret = -1;
886 goto err_p3;
889 DRM_INFO("Initialized %s %d.%d.%d %s\n",
890 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
891 return 0;
892 err_p3:
893 drm_sysfs_destroy();
894 err_p2:
895 unregister_chrdev(DRM_MAJOR, "drm");
897 idr_destroy(&drm_minors_idr);
898 err_p1:
899 return ret;
902 static void __exit drm_core_exit(void)
904 debugfs_remove(drm_debugfs_root);
905 drm_sysfs_destroy();
907 unregister_chrdev(DRM_MAJOR, "drm");
909 drm_connector_ida_destroy();
910 idr_destroy(&drm_minors_idr);
913 module_init(drm_core_init);
914 module_exit(drm_core_exit);
915 #endif
917 #include <sys/devfs.h>
919 #include <linux/export.h>
920 #include <linux/dmi.h>
921 #include <drm/drmP.h>
922 #include <drm/drm_core.h>
924 static int drm_load(struct drm_device *dev);
925 drm_pci_id_list_t *drm_find_description(int vendor, int device,
926 drm_pci_id_list_t *idlist);
928 #define DRIVER_SOFTC(unit) \
929 ((struct drm_device *)devclass_get_softc(drm_devclass, unit))
931 static int
932 drm_modevent(module_t mod, int type, void *data)
935 switch (type) {
936 case MOD_LOAD:
937 TUNABLE_INT_FETCH("drm.debug", &drm_debug);
938 break;
940 return (0);
943 static moduledata_t drm_mod = {
944 "drm",
945 drm_modevent,
948 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
949 MODULE_VERSION(drm, 1);
950 MODULE_DEPEND(drm, agp, 1, 1, 1);
951 MODULE_DEPEND(drm, pci, 1, 1, 1);
952 MODULE_DEPEND(drm, iicbus, 1, 1, 1);
954 static struct dev_ops drm_cdevsw = {
955 { "drm", 0, D_TRACKCLOSE | D_MPSAFE },
956 .d_open = drm_open,
957 .d_close = drm_close,
958 .d_read = drm_read,
959 .d_ioctl = drm_ioctl,
960 .d_kqfilter = drm_kqfilter,
961 .d_mmap = drm_mmap,
962 .d_mmap_single = drm_mmap_single,
965 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
966 SYSCTL_INT(_hw_drm, OID_AUTO, debug, CTLFLAG_RW, &drm_debug, 0,
967 "DRM debugging");
969 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
971 drm_pci_id_list_t *id_entry;
972 int vendor, device;
974 vendor = pci_get_vendor(kdev);
975 device = pci_get_device(kdev);
977 if (pci_get_class(kdev) != PCIC_DISPLAY)
978 return ENXIO;
980 id_entry = drm_find_description(vendor, device, idlist);
981 if (id_entry != NULL) {
982 if (!device_get_desc(kdev)) {
983 device_set_desc(kdev, id_entry->name);
984 DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
986 return 0;
989 return ENXIO;
992 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
994 struct drm_device *dev;
995 drm_pci_id_list_t *id_entry;
996 int unit, error;
997 u_int irq_flags;
998 int msi_enable;
1000 unit = device_get_unit(kdev);
1001 dev = device_get_softc(kdev);
1003 if (!strcmp(device_get_name(kdev), "drmsub"))
1004 dev->dev = device_get_parent(kdev);
1005 else
1006 dev->dev = kdev;
1008 dev->pci_domain = pci_get_domain(dev->dev);
1009 dev->pci_bus = pci_get_bus(dev->dev);
1010 dev->pci_slot = pci_get_slot(dev->dev);
1011 dev->pci_func = pci_get_function(dev->dev);
1012 drm_init_pdev(dev->dev, &dev->pdev);
1014 id_entry = drm_find_description(dev->pdev->vendor,
1015 dev->pdev->device, idlist);
1016 dev->id_entry = id_entry;
1018 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
1019 msi_enable = 1;
1021 dev->irq_type = pci_alloc_1intr(dev->dev, msi_enable,
1022 &dev->irqrid, &irq_flags);
1024 dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ,
1025 &dev->irqrid, irq_flags);
1027 if (!dev->irqr) {
1028 return (ENOENT);
1031 dev->irq = (int) rman_get_start(dev->irqr);
1032 dev->pdev->irq = dev->irq; /* for i915 */
1035 /* Print the contents of pdev struct. */
1036 drm_print_pdev(dev->pdev);
1038 lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE);
1039 lwkt_serialize_init(&dev->irq_lock);
1040 lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
1041 lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE);
1043 error = drm_load(dev);
1044 if (error)
1045 goto error;
1047 error = drm_create_cdevs(kdev);
1048 if (error)
1049 goto error;
1051 return (error);
1052 error:
1053 if (dev->irqr) {
1054 bus_release_resource(dev->dev, SYS_RES_IRQ,
1055 dev->irqrid, dev->irqr);
1057 if (dev->irq_type == PCI_INTR_TYPE_MSI) {
1058 pci_release_msi(dev->dev);
1060 return (error);
1064 drm_create_cdevs(device_t kdev)
1066 struct drm_device *dev;
1067 int error, unit;
1069 unit = device_get_unit(kdev);
1070 dev = device_get_softc(kdev);
1072 dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
1073 DRM_DEV_MODE, "dri/card%d", unit);
1074 error = 0;
1075 if (error == 0)
1076 dev->devnode->si_drv1 = dev;
1077 return (error);
1080 #ifndef DRM_DEV_NAME
1081 #define DRM_DEV_NAME "drm"
1082 #endif
1084 devclass_t drm_devclass;
1086 drm_pci_id_list_t *drm_find_description(int vendor, int device,
1087 drm_pci_id_list_t *idlist)
1089 int i = 0;
1091 for (i = 0; idlist[i].vendor != 0; i++) {
1092 if ((idlist[i].vendor == vendor) &&
1093 ((idlist[i].device == device) ||
1094 (idlist[i].device == 0))) {
1095 return &idlist[i];
1098 return NULL;
1101 static int drm_load(struct drm_device *dev)
1103 int i, retcode;
1105 DRM_DEBUG("\n");
1107 INIT_LIST_HEAD(&dev->maplist);
1109 drm_sysctl_init(dev);
1110 INIT_LIST_HEAD(&dev->filelist);
1112 dev->counters = 6;
1113 dev->types[0] = _DRM_STAT_LOCK;
1114 dev->types[1] = _DRM_STAT_OPENS;
1115 dev->types[2] = _DRM_STAT_CLOSES;
1116 dev->types[3] = _DRM_STAT_IOCTLS;
1117 dev->types[4] = _DRM_STAT_LOCKS;
1118 dev->types[5] = _DRM_STAT_UNLOCKS;
1120 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
1121 atomic_set(&dev->counts[i], 0);
1123 INIT_LIST_HEAD(&dev->vblank_event_list);
1125 if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1126 if (drm_pci_device_is_agp(dev))
1127 dev->agp = drm_agp_init(dev);
1130 if (dev->driver->driver_features & DRIVER_GEM) {
1131 retcode = drm_gem_init(dev);
1132 if (retcode != 0) {
1133 DRM_ERROR("Cannot initialize graphics execution "
1134 "manager (GEM)\n");
1135 goto error1;
1139 if (dev->driver->load != NULL) {
1140 DRM_LOCK(dev);
1141 /* Shared code returns -errno. */
1142 retcode = -dev->driver->load(dev,
1143 dev->id_entry->driver_private);
1144 if (pci_enable_busmaster(dev->dev))
1145 DRM_ERROR("Request to enable bus-master failed.\n");
1146 DRM_UNLOCK(dev);
1147 if (retcode != 0)
1148 goto error1;
1151 DRM_INFO("Initialized %s %d.%d.%d %s\n",
1152 dev->driver->name,
1153 dev->driver->major,
1154 dev->driver->minor,
1155 dev->driver->patchlevel,
1156 dev->driver->date);
1158 return 0;
1160 error1:
1161 drm_gem_destroy(dev);
1162 drm_sysctl_cleanup(dev);
1163 DRM_LOCK(dev);
1164 drm_lastclose(dev);
1165 DRM_UNLOCK(dev);
1166 if (dev->devnode != NULL)
1167 destroy_dev(dev->devnode);
1169 lockuninit(&dev->vbl_lock);
1170 lockuninit(&dev->dev_lock);
1171 lockuninit(&dev->event_lock);
1172 lockuninit(&dev->struct_mutex);
1174 return retcode;
1178 * Stub is needed for devfs
1180 int drm_close(struct dev_close_args *ap)
1182 return 0;
1185 void drm_cdevpriv_dtor(void *cd)
1187 struct drm_file *file_priv = cd;
1188 struct drm_device *dev = file_priv->dev;
1189 int retcode = 0;
1191 DRM_DEBUG("open_count = %d\n", dev->open_count);
1193 DRM_LOCK(dev);
1195 if (dev->driver->preclose != NULL)
1196 dev->driver->preclose(dev, file_priv);
1198 /* ========================================================
1199 * Begin inline drm_release
1202 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
1203 DRM_CURRENTPID, (long)dev->dev, dev->open_count);
1205 if (dev->driver->driver_features & DRIVER_GEM)
1206 drm_gem_release(dev, file_priv);
1208 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
1209 !dev->driver->reclaim_buffers_locked)
1210 drm_legacy_reclaim_buffers(dev, file_priv);
1212 funsetown(&dev->buf_sigio);
1214 if (dev->driver->postclose != NULL)
1215 dev->driver->postclose(dev, file_priv);
1216 list_del(&file_priv->lhead);
1219 /* ========================================================
1220 * End inline drm_release
1223 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
1224 device_unbusy(dev->dev);
1225 if (--dev->open_count == 0) {
1226 retcode = drm_lastclose(dev);
1229 DRM_UNLOCK(dev);
1233 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1234 struct sysctl_oid *top)
1236 struct sysctl_oid *oid;
1238 ksnprintf(dev->busid_str, sizeof(dev->busid_str),
1239 "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
1240 dev->pci_slot, dev->pci_func);
1241 oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
1242 CTLFLAG_RD, dev->busid_str, 0, NULL);
1243 if (oid == NULL)
1244 return (ENOMEM);
1245 dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
1246 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
1247 "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
1248 if (oid == NULL)
1249 return (ENOMEM);
1251 return (0);
1255 drm_mmap_single(struct dev_mmap_single_args *ap)
1257 struct drm_device *dev;
1258 struct cdev *kdev = ap->a_head.a_dev;
1259 vm_ooffset_t *offset = ap->a_offset;
1260 vm_size_t size = ap->a_size;
1261 struct vm_object **obj_res = ap->a_object;
1262 int nprot = ap->a_nprot;
1264 dev = drm_get_device_from_kdev(kdev);
1265 if (dev->drm_ttm_bdev != NULL) {
1266 return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
1267 obj_res, nprot));
1268 } else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
1269 return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
1270 } else {
1271 return (ENODEV);
1275 /* XXX broken code */
1276 #if DRM_LINUX
1278 #include <sys/sysproto.h>
1280 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
1282 #define LINUX_IOCTL_DRM_MIN 0x6400
1283 #define LINUX_IOCTL_DRM_MAX 0x64ff
1285 static linux_ioctl_function_t drm_linux_ioctl;
1286 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
1287 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
1289 /* The bits for in/out are switched on Linux */
1290 #define LINUX_IOC_IN IOC_OUT
1291 #define LINUX_IOC_OUT IOC_IN
1293 static int
1294 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
1296 int error;
1297 int cmd = args->cmd;
1299 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
1300 if (cmd & LINUX_IOC_IN)
1301 args->cmd |= IOC_IN;
1302 if (cmd & LINUX_IOC_OUT)
1303 args->cmd |= IOC_OUT;
1305 error = ioctl(p, (struct ioctl_args *)args);
1307 return error;
1309 #endif /* DRM_LINUX */
1311 static int
1312 drm_core_init(void *arg)
1315 drm_global_init();
1317 #if DRM_LINUX
1318 linux_ioctl_register_handler(&drm_handler);
1319 #endif /* DRM_LINUX */
1321 DRM_INFO("Initialized %s %d.%d.%d %s\n",
1322 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
1323 return 0;
1326 static void
1327 drm_core_exit(void *arg)
1330 #if DRM_LINUX
1331 linux_ioctl_unregister_handler(&drm_handler);
1332 #endif /* DRM_LINUX */
1334 drm_global_release();
1337 SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1338 drm_core_init, NULL);
1339 SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1340 drm_core_exit, NULL);
1343 #include <linux/dmi.h>
1346 * Check if dmi_system_id structure matches system DMI data
1348 static bool
1349 dmi_found(const struct dmi_system_id *dsi)
1351 int i, slot;
1352 bool found = false;
1353 char *sys_vendor, *board_vendor, *product_name, *board_name;
1355 sys_vendor = kgetenv("smbios.system.maker");
1356 board_vendor = kgetenv("smbios.planar.maker");
1357 product_name = kgetenv("smbios.system.product");
1358 board_name = kgetenv("smbios.planar.product");
1360 for (i = 0; i < NELEM(dsi->matches); i++) {
1361 slot = dsi->matches[i].slot;
1362 switch (slot) {
1363 case DMI_NONE:
1364 break;
1365 case DMI_SYS_VENDOR:
1366 if (sys_vendor != NULL &&
1367 !strcmp(sys_vendor, dsi->matches[i].substr))
1368 break;
1369 else
1370 goto done;
1371 case DMI_BOARD_VENDOR:
1372 if (board_vendor != NULL &&
1373 !strcmp(board_vendor, dsi->matches[i].substr))
1374 break;
1375 else
1376 goto done;
1377 case DMI_PRODUCT_NAME:
1378 if (product_name != NULL &&
1379 !strcmp(product_name, dsi->matches[i].substr))
1380 break;
1381 else
1382 goto done;
1383 case DMI_BOARD_NAME:
1384 if (board_name != NULL &&
1385 !strcmp(board_name, dsi->matches[i].substr))
1386 break;
1387 else
1388 goto done;
1389 default:
1390 goto done;
1393 found = true;
1395 done:
1396 if (sys_vendor != NULL)
1397 kfreeenv(sys_vendor);
1398 if (board_vendor != NULL)
1399 kfreeenv(board_vendor);
1400 if (product_name != NULL)
1401 kfreeenv(product_name);
1402 if (board_name != NULL)
1403 kfreeenv(board_name);
1405 return found;
1408 int dmi_check_system(const struct dmi_system_id *sysid)
1410 const struct dmi_system_id *dsi;
1411 int num = 0;
1413 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1414 if (dmi_found(dsi)) {
1415 num++;
1416 if (dsi->callback && dsi->callback(dsi))
1417 break;
1420 return (num);