Import 2.3.18pre1
[davej-history.git] / drivers / char / drm / context.c
blobb85aed6065827dfcc6948383c44b34076445ec47
1 /* context.c -- IOCTLs for contexts and DMA queues -*- linux-c -*-
2 * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
3 * Revised: Fri Aug 20 11:32:09 1999 by faith@precisioninsight.com
5 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/generic/context.c,v 1.5 1999/08/30 13:05:00 faith Exp $
28 * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/generic/gen_ioctl.c,v 1.2 1999/06/27 14:08:27 dawes Exp $
32 #define __NO_VERSION__
33 #include "drmP.h"
35 static int drm_init_queue(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
37 DRM_DEBUG("\n");
39 if (atomic_read(&q->use_count) != 1
40 || atomic_read(&q->finalization)
41 || atomic_read(&q->block_count)) {
42 DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
43 atomic_read(&q->use_count),
44 atomic_read(&q->finalization),
45 atomic_read(&q->block_count));
48 atomic_set(&q->finalization, 0);
49 atomic_set(&q->block_count, 0);
50 atomic_set(&q->block_read, 0);
51 atomic_set(&q->block_write, 0);
52 atomic_set(&q->total_queued, 0);
53 atomic_set(&q->total_flushed, 0);
54 atomic_set(&q->total_locks, 0);
56 init_waitqueue_head(&q->write_queue);
57 init_waitqueue_head(&q->read_queue);
58 init_waitqueue_head(&q->flush_queue);
60 q->flags = ctx->flags;
62 drm_waitlist_create(&q->waitlist, dev->dma->buf_count);
64 return 0;
68 /* drm_alloc_queue:
69 PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
70 disappear (so all deallocation must be done after IOCTLs are off)
71 2) dev->queue_count < dev->queue_slots
72 3) dev->queuelist[i].use_count == 0 and
73 dev->queuelist[i].finalization == 0 if i not in use
74 POST: 1) dev->queuelist[i].use_count == 1
75 2) dev->queue_count < dev->queue_slots */
77 static int drm_alloc_queue(drm_device_t *dev)
79 int i;
80 drm_queue_t *queue;
81 int oldslots;
82 int newslots;
83 /* Check for a free queue */
84 for (i = 0; i < dev->queue_count; i++) {
85 atomic_inc(&dev->queuelist[i]->use_count);
86 if (atomic_read(&dev->queuelist[i]->use_count) == 1
87 && !atomic_read(&dev->queuelist[i]->finalization)) {
88 DRM_DEBUG("%d (free)\n", i);
89 return i;
91 atomic_dec(&dev->queuelist[i]->use_count);
93 /* Allocate a new queue */
94 down(&dev->struct_sem);
96 queue = drm_alloc(sizeof(*queue), DRM_MEM_QUEUES);
97 memset(queue, 0, sizeof(*queue));
98 atomic_set(&queue->use_count, 1);
100 ++dev->queue_count;
101 if (dev->queue_count >= dev->queue_slots) {
102 oldslots = dev->queue_slots * sizeof(*dev->queuelist);
103 if (!dev->queue_slots) dev->queue_slots = 1;
104 dev->queue_slots *= 2;
105 newslots = dev->queue_slots * sizeof(*dev->queuelist);
107 dev->queuelist = drm_realloc(dev->queuelist,
108 oldslots,
109 newslots,
110 DRM_MEM_QUEUES);
111 if (!dev->queuelist) {
112 up(&dev->struct_sem);
113 DRM_DEBUG("out of memory\n");
114 return -ENOMEM;
117 dev->queuelist[dev->queue_count-1] = queue;
119 up(&dev->struct_sem);
120 DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
121 return dev->queue_count - 1;
124 int drm_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
125 unsigned long arg)
127 drm_ctx_res_t res;
128 drm_ctx_t ctx;
129 int i;
131 DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
132 copy_from_user_ret(&res, (drm_ctx_res_t *)arg, sizeof(res), -EFAULT);
133 if (res.count >= DRM_RESERVED_CONTEXTS) {
134 memset(&ctx, 0, sizeof(ctx));
135 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
136 ctx.handle = i;
137 copy_to_user_ret(&res.contexts[i],
139 sizeof(i),
140 -EFAULT);
143 res.count = DRM_RESERVED_CONTEXTS;
144 copy_to_user_ret((drm_ctx_res_t *)arg, &res, sizeof(res), -EFAULT);
145 return 0;
149 int drm_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
150 unsigned long arg)
152 drm_file_t *priv = filp->private_data;
153 drm_device_t *dev = priv->dev;
154 drm_ctx_t ctx;
156 copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
157 if ((ctx.handle = drm_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
158 /* Init kernel's context and get a new one. */
159 drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx);
160 ctx.handle = drm_alloc_queue(dev);
162 drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx);
163 DRM_DEBUG("%d\n", ctx.handle);
164 copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
165 return 0;
168 int drm_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
169 unsigned long arg)
171 drm_file_t *priv = filp->private_data;
172 drm_device_t *dev = priv->dev;
173 drm_ctx_t ctx;
174 drm_queue_t *q;
176 copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
178 DRM_DEBUG("%d\n", ctx.handle);
180 if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
181 q = dev->queuelist[ctx.handle];
183 atomic_inc(&q->use_count);
184 if (atomic_read(&q->use_count) == 1) {
185 /* No longer in use */
186 atomic_dec(&q->use_count);
187 return -EINVAL;
190 if (DRM_BUFCOUNT(&q->waitlist)) {
191 atomic_dec(&q->use_count);
192 return -EBUSY;
195 q->flags = ctx.flags;
197 atomic_dec(&q->use_count);
198 return 0;
201 int drm_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
202 unsigned long arg)
204 drm_file_t *priv = filp->private_data;
205 drm_device_t *dev = priv->dev;
206 drm_ctx_t ctx;
207 drm_queue_t *q;
209 copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
211 DRM_DEBUG("%d\n", ctx.handle);
213 if (ctx.handle >= dev->queue_count) return -EINVAL;
214 q = dev->queuelist[ctx.handle];
216 atomic_inc(&q->use_count);
217 if (atomic_read(&q->use_count) == 1) {
218 /* No longer in use */
219 atomic_dec(&q->use_count);
220 return -EINVAL;
223 ctx.flags = q->flags;
224 atomic_dec(&q->use_count);
226 copy_to_user_ret((drm_ctx_t *)arg, &ctx, sizeof(ctx), -EFAULT);
228 return 0;
231 int drm_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
232 unsigned long arg)
234 drm_file_t *priv = filp->private_data;
235 drm_device_t *dev = priv->dev;
236 drm_ctx_t ctx;
238 copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
239 DRM_DEBUG("%d\n", ctx.handle);
240 return drm_context_switch(dev, dev->last_context, ctx.handle);
243 int drm_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
244 unsigned long arg)
246 drm_file_t *priv = filp->private_data;
247 drm_device_t *dev = priv->dev;
248 drm_ctx_t ctx;
250 copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
251 DRM_DEBUG("%d\n", ctx.handle);
252 drm_context_switch_complete(dev, ctx.handle);
254 return 0;
257 int drm_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
258 unsigned long arg)
260 drm_file_t *priv = filp->private_data;
261 drm_device_t *dev = priv->dev;
262 drm_ctx_t ctx;
263 drm_queue_t *q;
264 drm_buf_t *buf;
266 copy_from_user_ret(&ctx, (drm_ctx_t *)arg, sizeof(ctx), -EFAULT);
267 DRM_DEBUG("%d\n", ctx.handle);
269 if (ctx.handle >= dev->queue_count) return -EINVAL;
270 q = dev->queuelist[ctx.handle];
272 atomic_inc(&q->use_count);
273 if (atomic_read(&q->use_count) == 1) {
274 /* No longer in use */
275 atomic_dec(&q->use_count);
276 return -EINVAL;
279 atomic_inc(&q->finalization); /* Mark queue in finalization state */
280 atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
281 finalization) */
283 while (test_and_set_bit(0, &dev->interrupt_flag)) {
284 schedule();
285 if (signal_pending(current)) {
286 clear_bit(0, &dev->interrupt_flag);
287 return -EINTR;
290 /* Remove queued buffers */
291 while ((buf = drm_waitlist_get(&q->waitlist))) {
292 drm_free_buffer(dev, buf);
294 clear_bit(0, &dev->interrupt_flag);
296 /* Wakeup blocked processes */
297 wake_up_interruptible(&q->read_queue);
298 wake_up_interruptible(&q->write_queue);
299 wake_up_interruptible(&q->flush_queue);
301 /* Finalization over. Queue is made
302 available when both use_count and
303 finalization become 0, which won't
304 happen until all the waiting processes
305 stop waiting. */
306 atomic_dec(&q->finalization);
307 return 0;