drm: Sync drm_read() with Linux 4.7.10
[dragonfly.git] / sys / dev / drm / drm_fops.c
blob8e9516b56af2f2a2c145cd5e1baa87c3c447a076
1 /*
2 * \file drm_fops.c
3 * File operations for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Daryll Strauss <daryll@valinux.com>
7 * \author Gareth Hughes <gareth@valinux.com>
8 */
11 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
13 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
14 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
15 * All Rights Reserved.
17 * Permission is hereby granted, free of charge, to any person obtaining a
18 * copy of this software and associated documentation files (the "Software"),
19 * to deal in the Software without restriction, including without limitation
20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21 * and/or sell copies of the Software, and to permit persons to whom the
22 * Software is furnished to do so, subject to the following conditions:
24 * The above copyright notice and this permission notice (including the next
25 * paragraph) shall be included in all copies or substantial portions of the
26 * Software.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34 * OTHER DEALINGS IN THE SOFTWARE.
37 #include <drm/drmP.h>
38 #include <linux/poll.h>
39 #include <linux/slab.h>
40 #include <linux/module.h>
41 #include "drm_legacy.h"
42 #include "drm_internal.h"
44 #include <sys/devfs.h>
46 /* from BKL pushdown */
47 DEFINE_MUTEX(drm_global_mutex);
49 /**
50 * DOC: file operations
52 * Drivers must define the file operations structure that forms the DRM
53 * userspace API entry point, even though most of those operations are
54 * implemented in the DRM core. The mandatory functions are drm_open(),
55 * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled.
56 * Drivers which implement private ioctls that require 32/64 bit compatibility
57 * support must provided their onw .compat_ioctl() handler that processes
58 * private ioctls and calls drm_compat_ioctl() for core ioctls.
60 * In addition drm_read() and drm_poll() provide support for DRM events. DRM
61 * events are a generic and extensible means to send asynchronous events to
62 * userspace through the file descriptor. They are used to send vblank event and
63 * page flip completions by the KMS API. But drivers can also use it for their
64 * own needs, e.g. to signal completion of rendering.
66 * The memory mapping implementation will vary depending on how the driver
67 * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
68 * function, modern drivers should use one of the provided memory-manager
69 * specific implementations. For GEM-based drivers this is drm_gem_mmap().
71 * No other file operations are supported by the DRM userspace API. Overall the
72 * following is an example #file_operations structure:
74 * static const example_drm_fops = {
75 * .owner = THIS_MODULE,
76 * .open = drm_open,
77 * .release = drm_release,
78 * .unlocked_ioctl = drm_ioctl,
79 * #ifdef CONFIG_COMPAT
80 * .compat_ioctl = drm_compat_ioctl,
81 * #endif
82 * .poll = drm_poll,
83 * .read = drm_read,
84 * .llseek = no_llseek,
85 * .mmap = drm_gem_mmap,
86 * };
89 extern drm_pci_id_list_t *drm_find_description(int vendor, int device,
90 drm_pci_id_list_t *idlist);
91 extern devclass_t drm_devclass;
93 static int drm_setup(struct drm_device * dev)
95 int ret;
97 if (dev->driver->firstopen &&
98 !drm_core_check_feature(dev, DRIVER_MODESET)) {
99 ret = dev->driver->firstopen(dev);
100 if (ret != 0)
101 return ret;
104 dev->buf_use = 0;
106 ret = drm_legacy_dma_setup(dev);
107 if (ret < 0)
108 return ret;
110 init_waitqueue_head(&dev->lock.lock_queue);
111 if (!drm_core_check_feature(dev, DRIVER_MODESET))
112 dev->irq_enabled = 0;
113 dev->context_flag = 0;
114 dev->last_context = 0;
115 dev->if_version = 0;
117 dev->buf_sigio = NULL;
120 DRM_DEBUG("\n");
121 return 0;
124 #define DRIVER_SOFTC(unit) \
125 ((struct drm_device *)devclass_get_softc(drm_devclass, unit))
128 * drm_open - open method for DRM file
129 * @inode: device inode
130 * @filp: file pointer.
132 * This function must be used by drivers as their .open() #file_operations
133 * method. It looks up the correct DRM device and instantiates all the per-file
134 * resources for it.
136 * RETURNS:
138 * 0 on success or negative errno value on falure.
140 int drm_open(struct dev_open_args *ap)
142 struct cdev *kdev = ap->a_head.a_dev;
143 int flags = ap->a_oflags;
144 int fmt = 0;
145 struct thread *p = curthread;
146 struct drm_device *dev;
147 int retcode;
149 dev = DRIVER_SOFTC(minor(kdev));
150 if (dev == NULL)
151 return (ENXIO);
153 DRM_DEBUG("open_count = %d\n", dev->open_count);
155 retcode = drm_open_helper(kdev, flags, fmt, p, dev, ap->a_fp);
157 if (retcode == 0) {
158 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
159 DRM_LOCK(dev);
160 device_busy(dev->dev->bsddev);
161 if (!dev->open_count++)
162 retcode = drm_setup(dev);
163 DRM_UNLOCK(dev);
166 DRM_DEBUG("return %d\n", retcode);
168 return (retcode);
170 EXPORT_SYMBOL(drm_open);
173 * Check whether DRI will run on this CPU.
175 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
179 * drm_new_set_master - Allocate a new master object and become master for the
180 * associated master realm.
182 * @dev: The associated device.
183 * @fpriv: File private identifying the client.
185 * This function must be called with dev::struct_mutex held.
186 * Returns negative error code on failure. Zero on success.
190 * Called whenever a process opens /dev/drm.
192 * \param filp file pointer.
193 * \param minor acquired minor-object.
194 * \return zero on success or a negative number on failure.
196 * Creates and initializes a drm_file structure for the file private data in \p
197 * filp and add it into the double linked list in \p dev.
199 int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
200 struct drm_device *dev, struct file *filp)
202 struct drm_file *priv;
203 int retcode;
205 if (flags & O_EXCL)
206 return EBUSY; /* No exclusive opens */
207 dev->flags = flags;
209 DRM_DEBUG("pid = %d, device = %s\n", DRM_CURRENTPID, devtoname(kdev));
211 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
212 if (!priv)
213 return -ENOMEM;
215 filp->private_data = priv;
216 priv->filp = filp;
217 priv->uid = p->td_proc->p_ucred->cr_svuid;
218 priv->pid = p->td_proc->p_pid;
219 priv->dev = dev;
221 /* for compatibility root is always authenticated */
222 priv->authenticated = capable(CAP_SYS_ADMIN);
223 priv->lock_count = 0;
225 INIT_LIST_HEAD(&priv->lhead);
226 INIT_LIST_HEAD(&priv->fbs);
227 lockinit(&priv->fbs_lock, "dpfl", 0, LK_CANRECURSE);
228 INIT_LIST_HEAD(&priv->blobs);
229 INIT_LIST_HEAD(&priv->pending_event_list);
230 INIT_LIST_HEAD(&priv->event_list);
231 init_waitqueue_head(&priv->event_wait);
232 priv->event_space = 4096; /* set aside 4k for event buffer */
234 lockinit(&priv->event_read_lock, "dperl", 0, LK_CANRECURSE);
236 if (drm_core_check_feature(dev, DRIVER_GEM))
237 drm_gem_open(dev, priv);
239 if (dev->driver->open) {
240 /* shared code returns -errno */
241 retcode = -dev->driver->open(dev, priv);
242 if (retcode != 0) {
243 kfree(priv);
244 return retcode;
248 /* first opener automatically becomes master */
249 mutex_lock(&dev->master_mutex);
250 priv->master = list_empty(&dev->filelist);
251 mutex_unlock(&dev->master_mutex);
253 mutex_lock(&dev->filelist_mutex);
254 list_add(&priv->lhead, &dev->filelist);
255 mutex_unlock(&dev->filelist_mutex);
257 kdev->si_drv1 = dev;
258 retcode = devfs_set_cdevpriv(filp, priv, &drm_cdevpriv_dtor);
259 if (retcode != 0)
260 drm_cdevpriv_dtor(priv);
262 return retcode;
266 * drm_legacy_dev_reinit
268 * Reinitializes a legacy/ums drm device in it's lastclose function.
270 static void drm_legacy_dev_reinit(struct drm_device *dev)
272 if (dev->irq_enabled)
273 drm_irq_uninstall(dev);
275 mutex_lock(&dev->struct_mutex);
277 drm_legacy_agp_clear(dev);
279 drm_legacy_sg_cleanup(dev);
280 #if 0
281 drm_legacy_vma_flush(dev);
282 #endif
283 drm_legacy_dma_takedown(dev);
285 mutex_unlock(&dev->struct_mutex);
287 dev->sigdata.lock = NULL;
289 dev->context_flag = 0;
290 dev->last_context = 0;
291 dev->if_version = 0;
293 DRM_DEBUG("lastclose completed\n");
297 * Take down the DRM device.
299 * \param dev DRM device structure.
301 * Frees every resource in \p dev.
303 * \sa drm_device
305 void drm_lastclose(struct drm_device * dev)
307 DRM_DEBUG("\n");
309 if (dev->driver->lastclose)
310 dev->driver->lastclose(dev);
311 DRM_DEBUG("driver lastclose completed\n");
313 if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
314 drm_irq_uninstall(dev);
316 mutex_lock(&dev->struct_mutex);
318 if (dev->unique) {
319 kfree(dev->unique);
320 dev->unique = NULL;
321 dev->unique_len = 0;
324 drm_legacy_agp_clear(dev);
326 drm_legacy_sg_cleanup(dev);
327 drm_legacy_dma_takedown(dev);
329 if (dev->lock.hw_lock) {
330 dev->lock.hw_lock = NULL; /* SHM removed */
331 dev->lock.file_priv = NULL;
332 wakeup(&dev->lock.lock_queue);
335 mutex_unlock(&dev->struct_mutex);
337 DRM_DEBUG("lastclose completed\n");
339 if (!drm_core_check_feature(dev, DRIVER_MODESET))
340 drm_legacy_dev_reinit(dev);
344 * drm_release - release method for DRM file
345 * @inode: device inode
346 * @filp: file pointer.
348 * This function must be used by drivers as their .release() #file_operations
349 * method. It frees any resources associated with the open file, and if this is
350 * the last open file for the DRM device also proceeds to call drm_lastclose().
352 * RETURNS:
354 * Always succeeds and returns 0.
356 //int drm_release(struct inode *inode, struct file *filp)
357 int drm_release(device_t kdev)
359 // XXX: filp is needed in this function
360 #if 0
361 struct drm_file *file_priv = filp->private_data;
362 #endif
363 struct drm_device *dev = device_get_softc(kdev);
364 int i;
366 mutex_lock(&drm_global_mutex);
368 #if 0
369 if (dev->magicfree.next) {
370 list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
371 list_del(&pt->head);
372 drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
373 kfree(pt);
375 drm_ht_remove(&dev->magiclist);
377 #endif
379 /* ========================================================
380 * Begin inline drm_release
383 DRM_DEBUG("\n");
385 drm_sysctl_cleanup(dev);
386 if (dev->devnode != NULL)
387 destroy_dev(dev->devnode);
389 if (drm_core_check_feature(dev, DRIVER_GEM))
390 drm_gem_destroy(dev);
392 drm_vblank_cleanup(dev);
394 DRM_LOCK(dev);
395 drm_lastclose(dev);
396 DRM_UNLOCK(dev);
398 /* Clean up PCI resources allocated by drm_bufs.c. We're not really
399 * worried about resource consumption while the DRM is inactive (between
400 * lastclose and firstopen or unload) because these aren't actually
401 * taking up KVA, just keeping the PCI resource allocated.
403 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
404 if (dev->pcir[i] == NULL)
405 continue;
406 bus_release_resource(dev->dev->bsddev, SYS_RES_MEMORY,
407 dev->pcirid[i], dev->pcir[i]);
408 dev->pcir[i] = NULL;
411 if (dev->agp) {
412 kfree(dev->agp);
413 dev->agp = NULL;
416 if (dev->driver->unload != NULL) {
417 DRM_LOCK(dev);
418 dev->driver->unload(dev);
419 DRM_UNLOCK(dev);
422 if (pci_disable_busmaster(dev->dev->bsddev))
423 DRM_ERROR("Request to disable bus-master failed.\n");
425 lockuninit(&dev->vbl_lock);
426 lockuninit(&dev->dev_lock);
427 lockuninit(&dev->event_lock);
428 lockuninit(&dev->struct_mutex);
430 /* ========================================================
431 * End inline drm_release
434 mutex_unlock(&drm_global_mutex);
436 return (0);
438 EXPORT_SYMBOL(drm_release);
441 * drm_read - read method for DRM file
442 * @filp: file pointer
443 * @buffer: userspace destination pointer for the read
444 * @count: count in bytes to read
445 * @offset: offset to read
447 * This function must be used by drivers as their .read() #file_operations
448 * method iff they use DRM events for asynchronous signalling to userspace.
449 * Since events are used by the KMS API for vblank and page flip completion this
450 * means all modern display drivers must use it.
452 * @offset is ignore, DRM events are read like a pipe. Therefore drivers also
453 * must set the .llseek() #file_operation to no_llseek(). Polling support is
454 * provided by drm_poll().
456 * This function will only ever read a full event. Therefore userspace must
457 * supply a big enough buffer to fit any event to ensure forward progress. Since
458 * the maximum event space is currently 4K it's recommended to just use that for
459 * safety.
461 * RETURNS:
463 * Number of bytes read (always aligned to full events, and can be 0) or a
464 * negative error code on failure.
467 ssize_t drm_read(struct file *filp, char __user *buffer,
468 size_t count, loff_t *offset)
470 int drm_read(struct dev_read_args *ap)
472 struct file *filp = ap->a_fp;
473 struct cdev *kdev = ap->a_head.a_dev;
474 struct uio *uio = ap->a_uio;
475 size_t count = uio->uio_resid;
476 struct drm_file *file_priv = filp->private_data;
477 struct drm_device *dev = drm_get_device_from_kdev(kdev);
478 int ret = 0; /* drm_read() returns int in DragonFly */
480 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
481 if (ret)
482 return ret;
484 for (;;) {
485 struct drm_pending_event *e = NULL;
487 spin_lock_irq(&dev->event_lock);
488 if (!list_empty(&file_priv->event_list)) {
489 e = list_first_entry(&file_priv->event_list,
490 struct drm_pending_event, link);
491 file_priv->event_space += e->event->length;
492 list_del(&e->link);
494 spin_unlock_irq(&dev->event_lock);
496 if (e == NULL) {
497 if (ret) {
498 ret = 0; /* DragonFly expects a zero return value on success */
499 break;
502 if (filp->f_flag & O_NONBLOCK) {
503 ret = -EAGAIN;
504 break;
507 mutex_unlock(&file_priv->event_read_lock);
508 ret = wait_event_interruptible(file_priv->event_wait,
509 !list_empty(&file_priv->event_list));
510 if (ret >= 0)
511 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
512 if (ret)
513 return ret;
514 } else {
515 unsigned length = e->event->length;
517 if (length > count - ret) {
518 put_back_event:
519 spin_lock_irq(&dev->event_lock);
520 file_priv->event_space -= length;
521 list_add(&e->link, &file_priv->event_list);
522 spin_unlock_irq(&dev->event_lock);
523 break;
526 if (uiomove((caddr_t)e->event, length, uio)) {
527 if (ret == 0)
528 ret = -EFAULT;
529 goto put_back_event;
532 ret += length;
533 e->destroy(e);
536 mutex_unlock(&file_priv->event_read_lock);
538 return ret;
540 EXPORT_SYMBOL(drm_read);
543 * drm_poll - poll method for DRM file
544 * @filp: file pointer
545 * @wait: poll waiter table
547 * This function must be used by drivers as their .read() #file_operations
548 * method iff they use DRM events for asynchronous signalling to userspace.
549 * Since events are used by the KMS API for vblank and page flip completion this
550 * means all modern display drivers must use it.
552 * See also drm_read().
554 * RETURNS:
556 * Mask of POLL flags indicating the current status of the file.
559 static int
560 drmfilt(struct knote *kn, long hint)
562 struct drm_file *file_priv = (struct drm_file *)kn->kn_hook;
563 int ready = 0;
565 // poll_wait(filp, &file_priv->event_wait, wait);
567 if (!list_empty(&file_priv->event_list))
568 ready = 1;
570 return (ready);
573 static void
574 drmfilt_detach(struct knote *kn)
576 struct drm_file *file_priv;
577 struct drm_device *dev;
578 struct klist *klist;
580 file_priv = (struct drm_file *)kn->kn_hook;
581 dev = file_priv->dev;
583 klist = &file_priv->dkq.ki_note;
584 knote_remove(klist, kn);
587 static struct filterops drmfiltops =
588 { FILTEROP_MPSAFE | FILTEROP_ISFD, NULL, drmfilt_detach, drmfilt };
591 drm_kqfilter(struct dev_kqfilter_args *ap)
593 struct file *filp = ap->a_fp;
594 struct drm_file *file_priv = filp->private_data;
595 struct knote *kn = ap->a_kn;
596 struct klist *klist;
598 ap->a_result = 0;
600 switch (kn->kn_filter) {
601 case EVFILT_READ:
602 case EVFILT_WRITE:
603 kn->kn_fop = &drmfiltops;
604 kn->kn_hook = (caddr_t)file_priv;
605 break;
606 default:
607 ap->a_result = EOPNOTSUPP;
608 return (0);
611 klist = &file_priv->dkq.ki_note;
612 knote_insert(klist, kn);
614 return (0);
617 #ifdef __DragonFly__
619 * The Linux layer version of kfree() is a macro and can't be called
620 * directly via a function pointer
622 static void
623 drm_event_destroy(struct drm_pending_event *e)
625 kfree(e);
627 #endif
630 * drm_event_reserve_init_locked - init a DRM event and reserve space for it
631 * @dev: DRM device
632 * @file_priv: DRM file private data
633 * @p: tracking structure for the pending event
634 * @e: actual event data to deliver to userspace
636 * This function prepares the passed in event for eventual delivery. If the event
637 * doesn't get delivered (because the IOCTL fails later on, before queuing up
638 * anything) then the even must be cancelled and freed using
639 * drm_event_cancel_free(). Successfully initialized events should be sent out
640 * using drm_send_event() or drm_send_event_locked() to signal completion of the
641 * asynchronous event to userspace.
643 * If callers embedded @p into a larger structure it must be allocated with
644 * kmalloc and @p must be the first member element.
646 * This is the locked version of drm_event_reserve_init() for callers which
647 * already hold dev->event_lock.
649 * RETURNS:
651 * 0 on success or a negative error code on failure.
653 int drm_event_reserve_init_locked(struct drm_device *dev,
654 struct drm_file *file_priv,
655 struct drm_pending_event *p,
656 struct drm_event *e)
658 if (file_priv->event_space < e->length)
659 return -ENOMEM;
661 file_priv->event_space -= e->length;
663 p->event = e;
664 list_add(&p->pending_link, &file_priv->pending_event_list);
665 p->file_priv = file_priv;
667 /* we *could* pass this in as arg, but everyone uses kfree: */
668 #ifdef __DragonFly__
669 p->destroy = drm_event_destroy;
670 #else
671 p->destroy = (void (*) (struct drm_pending_event *)) kfree;
672 #endif
674 return 0;
676 EXPORT_SYMBOL(drm_event_reserve_init_locked);
679 * drm_event_reserve_init - init a DRM event and reserve space for it
680 * @dev: DRM device
681 * @file_priv: DRM file private data
682 * @p: tracking structure for the pending event
683 * @e: actual event data to deliver to userspace
685 * This function prepares the passed in event for eventual delivery. If the event
686 * doesn't get delivered (because the IOCTL fails later on, before queuing up
687 * anything) then the even must be cancelled and freed using
688 * drm_event_cancel_free(). Successfully initialized events should be sent out
689 * using drm_send_event() or drm_send_event_locked() to signal completion of the
690 * asynchronous event to userspace.
692 * If callers embedded @p into a larger structure it must be allocated with
693 * kmalloc and @p must be the first member element.
695 * Callers which already hold dev->event_lock should use
696 * drm_event_reserve_init() instead.
698 * RETURNS:
700 * 0 on success or a negative error code on failure.
702 int drm_event_reserve_init(struct drm_device *dev,
703 struct drm_file *file_priv,
704 struct drm_pending_event *p,
705 struct drm_event *e)
707 unsigned long flags;
708 int ret;
710 spin_lock_irqsave(&dev->event_lock, flags);
711 ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
712 spin_unlock_irqrestore(&dev->event_lock, flags);
714 return ret;
716 EXPORT_SYMBOL(drm_event_reserve_init);
719 * drm_event_cancel_free - free a DRM event and release it's space
720 * @dev: DRM device
721 * @p: tracking structure for the pending event
723 * This function frees the event @p initialized with drm_event_reserve_init()
724 * and releases any allocated space.
726 void drm_event_cancel_free(struct drm_device *dev,
727 struct drm_pending_event *p)
729 unsigned long flags;
730 spin_lock_irqsave(&dev->event_lock, flags);
731 if (p->file_priv) {
732 p->file_priv->event_space += p->event->length;
733 list_del(&p->pending_link);
735 spin_unlock_irqrestore(&dev->event_lock, flags);
736 p->destroy(p);
738 EXPORT_SYMBOL(drm_event_cancel_free);
741 * drm_send_event_locked - send DRM event to file descriptor
742 * @dev: DRM device
743 * @e: DRM event to deliver
745 * This function sends the event @e, initialized with drm_event_reserve_init(),
746 * to its associated userspace DRM file. Callers must already hold
747 * dev->event_lock, see drm_send_event() for the unlocked version.
749 void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
751 assert_spin_locked(&dev->event_lock);
753 if (!e->file_priv) {
754 e->destroy(e);
755 return;
758 list_del(&e->pending_link);
759 list_add_tail(&e->link,
760 &e->file_priv->event_list);
761 wake_up_interruptible(&e->file_priv->event_wait);
762 #ifdef __DragonFly__
763 KNOTE(&e->file_priv->dkq.ki_note, 0);
764 #endif
767 EXPORT_SYMBOL(drm_send_event_locked);
770 * drm_send_event - send DRM event to file descriptor
771 * @dev: DRM device
772 * @e: DRM event to deliver
774 * This function sends the event @e, initialized with drm_event_reserve_init(),
775 * to its associated userspace DRM file. This function acquires dev->event_lock,
776 * see drm_send_event_locked() for callers which already hold this lock.
778 * Note that the core will take care of unlinking and disarming events when the
779 * corresponding DRM file is closed. Drivers need not worry about whether the
780 * DRM file for this event still exists and can call this function upon
781 * completion of the asynchronous work unconditionally.
783 void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
785 unsigned long irqflags;
787 spin_lock_irqsave(&dev->event_lock, irqflags);
788 drm_send_event_locked(dev, e);
789 spin_unlock_irqrestore(&dev->event_lock, irqflags);
791 EXPORT_SYMBOL(drm_send_event);