sched: make the scheduler converge to the ideal latency
[linux-2.6/linux-loongson.git] / drivers / char / drm / drm_irq.c
blob871d2fde09b33ee3d1c8b48b5b6f5d74af0cdffa
1 /**
2 * \file drm_irq.c
3 * IRQ support
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
9 /*
10 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include "drmP.h"
38 #include <linux/interrupt.h> /* For task queue support */
40 /**
41 * Get interrupt from bus id.
43 * \param inode device inode.
44 * \param filp file pointer.
45 * \param cmd command.
46 * \param arg user argument, pointing to a drm_irq_busid structure.
47 * \return zero on success or a negative number on failure.
49 * Finds the PCI device with the specified bus id and gets its IRQ number.
50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51 * to that of the device that this DRM instance attached to.
53 int drm_irq_by_busid(struct inode *inode, struct file *filp,
54 unsigned int cmd, unsigned long arg)
56 struct drm_file *priv = filp->private_data;
57 struct drm_device *dev = priv->head->dev;
58 struct drm_irq_busid __user *argp = (void __user *)arg;
59 struct drm_irq_busid p;
61 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
62 return -EINVAL;
64 if (copy_from_user(&p, argp, sizeof(p)))
65 return -EFAULT;
67 if ((p.busnum >> 8) != drm_get_pci_domain(dev) ||
68 (p.busnum & 0xff) != dev->pdev->bus->number ||
69 p.devnum != PCI_SLOT(dev->pdev->devfn) || p.funcnum != PCI_FUNC(dev->pdev->devfn))
70 return -EINVAL;
72 p.irq = dev->irq;
74 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p.busnum, p.devnum, p.funcnum, p.irq);
75 if (copy_to_user(argp, &p, sizeof(p)))
76 return -EFAULT;
77 return 0;
80 /**
81 * Install IRQ handler.
83 * \param dev DRM device.
84 * \param irq IRQ number.
86 * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
87 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
88 * before and after the installation.
90 static int drm_irq_install(struct drm_device * dev)
92 int ret;
93 unsigned long sh_flags = 0;
95 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
96 return -EINVAL;
98 if (dev->irq == 0)
99 return -EINVAL;
101 mutex_lock(&dev->struct_mutex);
103 /* Driver must have been initialized */
104 if (!dev->dev_private) {
105 mutex_unlock(&dev->struct_mutex);
106 return -EINVAL;
109 if (dev->irq_enabled) {
110 mutex_unlock(&dev->struct_mutex);
111 return -EBUSY;
113 dev->irq_enabled = 1;
114 mutex_unlock(&dev->struct_mutex);
116 DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
118 if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
119 init_waitqueue_head(&dev->vbl_queue);
121 spin_lock_init(&dev->vbl_lock);
123 INIT_LIST_HEAD(&dev->vbl_sigs);
124 INIT_LIST_HEAD(&dev->vbl_sigs2);
126 dev->vbl_pending = 0;
129 /* Before installing handler */
130 dev->driver->irq_preinstall(dev);
132 /* Install handler */
133 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
134 sh_flags = IRQF_SHARED;
136 ret = request_irq(dev->irq, dev->driver->irq_handler,
137 sh_flags, dev->devname, dev);
138 if (ret < 0) {
139 mutex_lock(&dev->struct_mutex);
140 dev->irq_enabled = 0;
141 mutex_unlock(&dev->struct_mutex);
142 return ret;
145 /* After installing handler */
146 dev->driver->irq_postinstall(dev);
148 return 0;
152 * Uninstall the IRQ handler.
154 * \param dev DRM device.
156 * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
158 int drm_irq_uninstall(struct drm_device * dev)
160 int irq_enabled;
162 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
163 return -EINVAL;
165 mutex_lock(&dev->struct_mutex);
166 irq_enabled = dev->irq_enabled;
167 dev->irq_enabled = 0;
168 mutex_unlock(&dev->struct_mutex);
170 if (!irq_enabled)
171 return -EINVAL;
173 DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
175 dev->driver->irq_uninstall(dev);
177 free_irq(dev->irq, dev);
179 dev->locked_tasklet_func = NULL;
181 return 0;
184 EXPORT_SYMBOL(drm_irq_uninstall);
187 * IRQ control ioctl.
189 * \param inode device inode.
190 * \param filp file pointer.
191 * \param cmd command.
192 * \param arg user argument, pointing to a drm_control structure.
193 * \return zero on success or a negative number on failure.
195 * Calls irq_install() or irq_uninstall() according to \p arg.
197 int drm_control(struct inode *inode, struct file *filp,
198 unsigned int cmd, unsigned long arg)
200 struct drm_file *priv = filp->private_data;
201 struct drm_device *dev = priv->head->dev;
202 struct drm_control ctl;
204 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
206 if (copy_from_user(&ctl, (struct drm_control __user *) arg, sizeof(ctl)))
207 return -EFAULT;
209 switch (ctl.func) {
210 case DRM_INST_HANDLER:
211 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
212 return 0;
213 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
214 ctl.irq != dev->irq)
215 return -EINVAL;
216 return drm_irq_install(dev);
217 case DRM_UNINST_HANDLER:
218 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
219 return 0;
220 return drm_irq_uninstall(dev);
221 default:
222 return -EINVAL;
227 * Wait for VBLANK.
229 * \param inode device inode.
230 * \param filp file pointer.
231 * \param cmd command.
232 * \param data user argument, pointing to a drm_wait_vblank structure.
233 * \return zero on success or a negative number on failure.
235 * Verifies the IRQ is installed.
237 * If a signal is requested checks if this task has already scheduled the same signal
238 * for the same vblank sequence number - nothing to be done in
239 * that case. If the number of tasks waiting for the interrupt exceeds 100 the
240 * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
241 * task.
243 * If a signal is not requested, then calls vblank_wait().
245 int drm_wait_vblank(DRM_IOCTL_ARGS)
247 struct drm_file *priv = filp->private_data;
248 struct drm_device *dev = priv->head->dev;
249 union drm_wait_vblank __user *argp = (void __user *)data;
250 union drm_wait_vblank vblwait;
251 struct timeval now;
252 int ret = 0;
253 unsigned int flags, seq;
255 if (!dev->irq)
256 return -EINVAL;
258 if (copy_from_user(&vblwait, argp, sizeof(vblwait)))
259 return -EFAULT;
261 if (vblwait.request.type &
262 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
263 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
264 vblwait.request.type,
265 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
266 return -EINVAL;
269 flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
271 if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
272 DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
273 return -EINVAL;
275 seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
276 : &dev->vbl_received);
278 switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) {
279 case _DRM_VBLANK_RELATIVE:
280 vblwait.request.sequence += seq;
281 vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
282 case _DRM_VBLANK_ABSOLUTE:
283 break;
284 default:
285 return -EINVAL;
288 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
289 (seq - vblwait.request.sequence) <= (1<<23)) {
290 vblwait.request.sequence = seq + 1;
293 if (flags & _DRM_VBLANK_SIGNAL) {
294 unsigned long irqflags;
295 struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
296 ? &dev->vbl_sigs2 : &dev->vbl_sigs;
297 struct drm_vbl_sig *vbl_sig;
299 spin_lock_irqsave(&dev->vbl_lock, irqflags);
301 /* Check if this task has already scheduled the same signal
302 * for the same vblank sequence number; nothing to be done in
303 * that case
305 list_for_each_entry(vbl_sig, vbl_sigs, head) {
306 if (vbl_sig->sequence == vblwait.request.sequence
307 && vbl_sig->info.si_signo == vblwait.request.signal
308 && vbl_sig->task == current) {
309 spin_unlock_irqrestore(&dev->vbl_lock,
310 irqflags);
311 vblwait.reply.sequence = seq;
312 goto done;
316 if (dev->vbl_pending >= 100) {
317 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
318 return -EBUSY;
321 dev->vbl_pending++;
323 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
325 if (!
326 (vbl_sig =
327 drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
328 return -ENOMEM;
331 memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
333 vbl_sig->sequence = vblwait.request.sequence;
334 vbl_sig->info.si_signo = vblwait.request.signal;
335 vbl_sig->task = current;
337 spin_lock_irqsave(&dev->vbl_lock, irqflags);
339 list_add_tail(&vbl_sig->head, vbl_sigs);
341 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
343 vblwait.reply.sequence = seq;
344 } else {
345 if (flags & _DRM_VBLANK_SECONDARY) {
346 if (dev->driver->vblank_wait2)
347 ret = dev->driver->vblank_wait2(dev, &vblwait.request.sequence);
348 } else if (dev->driver->vblank_wait)
349 ret =
350 dev->driver->vblank_wait(dev,
351 &vblwait.request.sequence);
353 do_gettimeofday(&now);
354 vblwait.reply.tval_sec = now.tv_sec;
355 vblwait.reply.tval_usec = now.tv_usec;
358 done:
359 if (copy_to_user(argp, &vblwait, sizeof(vblwait)))
360 return -EFAULT;
362 return ret;
366 * Send the VBLANK signals.
368 * \param dev DRM device.
370 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
372 * If a signal is not requested, then calls vblank_wait().
374 void drm_vbl_send_signals(struct drm_device * dev)
376 unsigned long flags;
377 int i;
379 spin_lock_irqsave(&dev->vbl_lock, flags);
381 for (i = 0; i < 2; i++) {
382 struct drm_vbl_sig *vbl_sig, *tmp;
383 struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
384 unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
385 &dev->vbl_received);
387 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
388 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
389 vbl_sig->info.si_code = vbl_seq;
390 send_sig_info(vbl_sig->info.si_signo,
391 &vbl_sig->info, vbl_sig->task);
393 list_del(&vbl_sig->head);
395 drm_free(vbl_sig, sizeof(*vbl_sig),
396 DRM_MEM_DRIVER);
398 dev->vbl_pending--;
403 spin_unlock_irqrestore(&dev->vbl_lock, flags);
406 EXPORT_SYMBOL(drm_vbl_send_signals);
409 * Tasklet wrapper function.
411 * \param data DRM device in disguise.
413 * Attempts to grab the HW lock and calls the driver callback on success. On
414 * failure, leave the lock marked as contended so the callback can be called
415 * from drm_unlock().
417 static void drm_locked_tasklet_func(unsigned long data)
419 struct drm_device *dev = (struct drm_device *)data;
420 unsigned long irqflags;
422 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
424 if (!dev->locked_tasklet_func ||
425 !drm_lock_take(&dev->lock,
426 DRM_KERNEL_CONTEXT)) {
427 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
428 return;
431 dev->lock.lock_time = jiffies;
432 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
434 dev->locked_tasklet_func(dev);
436 drm_lock_free(&dev->lock,
437 DRM_KERNEL_CONTEXT);
439 dev->locked_tasklet_func = NULL;
441 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
445 * Schedule a tasklet to call back a driver hook with the HW lock held.
447 * \param dev DRM device.
448 * \param func Driver callback.
450 * This is intended for triggering actions that require the HW lock from an
451 * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
452 * completes. Note that the callback may be called from interrupt or process
453 * context, it must not make any assumptions about this. Also, the HW lock will
454 * be held with the kernel context or any client context.
456 void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
458 unsigned long irqflags;
459 static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
461 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
462 test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
463 return;
465 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
467 if (dev->locked_tasklet_func) {
468 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
469 return;
472 dev->locked_tasklet_func = func;
474 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
476 drm_tasklet.data = (unsigned long)dev;
478 tasklet_hi_schedule(&drm_tasklet);
480 EXPORT_SYMBOL(drm_locked_tasklet);