1 /* context.c -- IOCTLs for contexts and DMA queues -*- linux-c -*-
2 * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
3 * Revised: Fri Aug 20 11:32:09 1999 by faith@precisioninsight.com
5 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/generic/context.c,v 1.5 1999/08/30 13:05:00 faith Exp $
28 * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/generic/gen_ioctl.c,v 1.2 1999/06/27 14:08:27 dawes Exp $
32 #define __NO_VERSION__
35 static int drm_init_queue(drm_device_t
*dev
, drm_queue_t
*q
, drm_ctx_t
*ctx
)
39 if (atomic_read(&q
->use_count
) != 1
40 || atomic_read(&q
->finalization
)
41 || atomic_read(&q
->block_count
)) {
42 DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
43 atomic_read(&q
->use_count
),
44 atomic_read(&q
->finalization
),
45 atomic_read(&q
->block_count
));
48 atomic_set(&q
->finalization
, 0);
49 atomic_set(&q
->block_count
, 0);
50 atomic_set(&q
->block_read
, 0);
51 atomic_set(&q
->block_write
, 0);
52 atomic_set(&q
->total_queued
, 0);
53 atomic_set(&q
->total_flushed
, 0);
54 atomic_set(&q
->total_locks
, 0);
56 init_waitqueue_head(&q
->write_queue
);
57 init_waitqueue_head(&q
->read_queue
);
58 init_waitqueue_head(&q
->flush_queue
);
60 q
->flags
= ctx
->flags
;
62 drm_waitlist_create(&q
->waitlist
, dev
->dma
->buf_count
);
69 PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
70 disappear (so all deallocation must be done after IOCTLs are off)
71 2) dev->queue_count < dev->queue_slots
72 3) dev->queuelist[i].use_count == 0 and
73 dev->queuelist[i].finalization == 0 if i not in use
74 POST: 1) dev->queuelist[i].use_count == 1
75 2) dev->queue_count < dev->queue_slots */
77 static int drm_alloc_queue(drm_device_t
*dev
)
83 /* Check for a free queue */
84 for (i
= 0; i
< dev
->queue_count
; i
++) {
85 atomic_inc(&dev
->queuelist
[i
]->use_count
);
86 if (atomic_read(&dev
->queuelist
[i
]->use_count
) == 1
87 && !atomic_read(&dev
->queuelist
[i
]->finalization
)) {
88 DRM_DEBUG("%d (free)\n", i
);
91 atomic_dec(&dev
->queuelist
[i
]->use_count
);
93 /* Allocate a new queue */
94 down(&dev
->struct_sem
);
96 queue
= drm_alloc(sizeof(*queue
), DRM_MEM_QUEUES
);
97 memset(queue
, 0, sizeof(*queue
));
98 atomic_set(&queue
->use_count
, 1);
101 if (dev
->queue_count
>= dev
->queue_slots
) {
102 oldslots
= dev
->queue_slots
* sizeof(*dev
->queuelist
);
103 if (!dev
->queue_slots
) dev
->queue_slots
= 1;
104 dev
->queue_slots
*= 2;
105 newslots
= dev
->queue_slots
* sizeof(*dev
->queuelist
);
107 dev
->queuelist
= drm_realloc(dev
->queuelist
,
111 if (!dev
->queuelist
) {
112 up(&dev
->struct_sem
);
113 DRM_DEBUG("out of memory\n");
117 dev
->queuelist
[dev
->queue_count
-1] = queue
;
119 up(&dev
->struct_sem
);
120 DRM_DEBUG("%d (new)\n", dev
->queue_count
- 1);
121 return dev
->queue_count
- 1;
124 int drm_resctx(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
131 DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS
);
132 copy_from_user_ret(&res
, (drm_ctx_res_t
*)arg
, sizeof(res
), -EFAULT
);
133 if (res
.count
>= DRM_RESERVED_CONTEXTS
) {
134 memset(&ctx
, 0, sizeof(ctx
));
135 for (i
= 0; i
< DRM_RESERVED_CONTEXTS
; i
++) {
137 copy_to_user_ret(&res
.contexts
[i
],
143 res
.count
= DRM_RESERVED_CONTEXTS
;
144 copy_to_user_ret((drm_ctx_res_t
*)arg
, &res
, sizeof(res
), -EFAULT
);
149 int drm_addctx(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
152 drm_file_t
*priv
= filp
->private_data
;
153 drm_device_t
*dev
= priv
->dev
;
156 copy_from_user_ret(&ctx
, (drm_ctx_t
*)arg
, sizeof(ctx
), -EFAULT
);
157 if ((ctx
.handle
= drm_alloc_queue(dev
)) == DRM_KERNEL_CONTEXT
) {
158 /* Init kernel's context and get a new one. */
159 drm_init_queue(dev
, dev
->queuelist
[ctx
.handle
], &ctx
);
160 ctx
.handle
= drm_alloc_queue(dev
);
162 drm_init_queue(dev
, dev
->queuelist
[ctx
.handle
], &ctx
);
163 DRM_DEBUG("%d\n", ctx
.handle
);
164 copy_to_user_ret((drm_ctx_t
*)arg
, &ctx
, sizeof(ctx
), -EFAULT
);
168 int drm_modctx(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
171 drm_file_t
*priv
= filp
->private_data
;
172 drm_device_t
*dev
= priv
->dev
;
176 copy_from_user_ret(&ctx
, (drm_ctx_t
*)arg
, sizeof(ctx
), -EFAULT
);
178 DRM_DEBUG("%d\n", ctx
.handle
);
180 if (ctx
.handle
< 0 || ctx
.handle
>= dev
->queue_count
) return -EINVAL
;
181 q
= dev
->queuelist
[ctx
.handle
];
183 atomic_inc(&q
->use_count
);
184 if (atomic_read(&q
->use_count
) == 1) {
185 /* No longer in use */
186 atomic_dec(&q
->use_count
);
190 if (DRM_BUFCOUNT(&q
->waitlist
)) {
191 atomic_dec(&q
->use_count
);
195 q
->flags
= ctx
.flags
;
197 atomic_dec(&q
->use_count
);
201 int drm_getctx(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
204 drm_file_t
*priv
= filp
->private_data
;
205 drm_device_t
*dev
= priv
->dev
;
209 copy_from_user_ret(&ctx
, (drm_ctx_t
*)arg
, sizeof(ctx
), -EFAULT
);
211 DRM_DEBUG("%d\n", ctx
.handle
);
213 if (ctx
.handle
>= dev
->queue_count
) return -EINVAL
;
214 q
= dev
->queuelist
[ctx
.handle
];
216 atomic_inc(&q
->use_count
);
217 if (atomic_read(&q
->use_count
) == 1) {
218 /* No longer in use */
219 atomic_dec(&q
->use_count
);
223 ctx
.flags
= q
->flags
;
224 atomic_dec(&q
->use_count
);
226 copy_to_user_ret((drm_ctx_t
*)arg
, &ctx
, sizeof(ctx
), -EFAULT
);
231 int drm_switchctx(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
234 drm_file_t
*priv
= filp
->private_data
;
235 drm_device_t
*dev
= priv
->dev
;
238 copy_from_user_ret(&ctx
, (drm_ctx_t
*)arg
, sizeof(ctx
), -EFAULT
);
239 DRM_DEBUG("%d\n", ctx
.handle
);
240 return drm_context_switch(dev
, dev
->last_context
, ctx
.handle
);
243 int drm_newctx(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
246 drm_file_t
*priv
= filp
->private_data
;
247 drm_device_t
*dev
= priv
->dev
;
250 copy_from_user_ret(&ctx
, (drm_ctx_t
*)arg
, sizeof(ctx
), -EFAULT
);
251 DRM_DEBUG("%d\n", ctx
.handle
);
252 drm_context_switch_complete(dev
, ctx
.handle
);
257 int drm_rmctx(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
260 drm_file_t
*priv
= filp
->private_data
;
261 drm_device_t
*dev
= priv
->dev
;
266 copy_from_user_ret(&ctx
, (drm_ctx_t
*)arg
, sizeof(ctx
), -EFAULT
);
267 DRM_DEBUG("%d\n", ctx
.handle
);
269 if (ctx
.handle
>= dev
->queue_count
) return -EINVAL
;
270 q
= dev
->queuelist
[ctx
.handle
];
272 atomic_inc(&q
->use_count
);
273 if (atomic_read(&q
->use_count
) == 1) {
274 /* No longer in use */
275 atomic_dec(&q
->use_count
);
279 atomic_inc(&q
->finalization
); /* Mark queue in finalization state */
280 atomic_sub(2, &q
->use_count
); /* Mark queue as unused (pending
283 while (test_and_set_bit(0, &dev
->interrupt_flag
)) {
285 if (signal_pending(current
)) {
286 clear_bit(0, &dev
->interrupt_flag
);
290 /* Remove queued buffers */
291 while ((buf
= drm_waitlist_get(&q
->waitlist
))) {
292 drm_free_buffer(dev
, buf
);
294 clear_bit(0, &dev
->interrupt_flag
);
296 /* Wakeup blocked processes */
297 wake_up_interruptible(&q
->read_queue
);
298 wake_up_interruptible(&q
->write_queue
);
299 wake_up_interruptible(&q
->flush_queue
);
301 /* Finalization over. Queue is made
302 available when both use_count and
303 finalization become 0, which won't
304 happen until all the waiting processes
306 atomic_dec(&q
->finalization
);