1 /* i810_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
2 * Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
32 #define __NO_VERSION__
37 int i810_addbufs_agp(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
40 drm_file_t
*priv
= filp
->private_data
;
41 drm_device_t
*dev
= priv
->dev
;
42 drm_device_dma_t
*dma
= dev
->dma
;
43 drm_buf_desc_t request
;
44 drm_buf_entry_t
*entry
;
47 unsigned long agp_offset
;
57 if (!dma
) return -EINVAL
;
59 copy_from_user_ret(&request
,
60 (drm_buf_desc_t
*)arg
,
64 count
= request
.count
;
65 order
= drm_order(request
.size
);
67 agp_offset
= request
.agp_start
;
68 alignment
= (request
.flags
& _DRM_PAGE_ALIGN
) ? PAGE_ALIGN(size
) :size
;
69 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
70 total
= PAGE_SIZE
<< page_order
;
73 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
) return -EINVAL
;
74 if (dev
->queue_count
) return -EBUSY
; /* Not while in use */
75 spin_lock(&dev
->count_lock
);
77 spin_unlock(&dev
->count_lock
);
80 atomic_inc(&dev
->buf_alloc
);
81 spin_unlock(&dev
->count_lock
);
83 down(&dev
->struct_sem
);
84 entry
= &dma
->bufs
[order
];
85 if (entry
->buf_count
) {
87 atomic_dec(&dev
->buf_alloc
);
88 return -ENOMEM
; /* May only call once for each order */
91 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
93 if (!entry
->buflist
) {
95 atomic_dec(&dev
->buf_alloc
);
98 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
100 entry
->buf_size
= size
;
101 entry
->page_order
= page_order
;
104 while(entry
->buf_count
< count
) {
105 buf
= &entry
->buflist
[entry
->buf_count
];
106 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
107 buf
->total
= alignment
;
110 buf
->offset
= offset
;
111 buf
->bus_address
= dev
->agp
->base
+ agp_offset
+ offset
;
112 buf
->address
= (void *)(agp_offset
+ offset
+ dev
->agp
->base
);
116 init_waitqueue_head(&buf
->dma_wait
);
119 buf
->dev_private
= drm_alloc(sizeof(drm_i810_buf_priv_t
),
121 buf
->dev_priv_size
= sizeof(drm_i810_buf_priv_t
);
122 memset(buf
->dev_private
, 0, sizeof(drm_i810_buf_priv_t
));
124 #if DRM_DMA_HISTOGRAM
125 buf
->time_queued
= 0;
126 buf
->time_dispatched
= 0;
127 buf
->time_completed
= 0;
130 offset
= offset
+ alignment
;
132 byte_count
+= PAGE_SIZE
<< page_order
;
134 DRM_DEBUG("buffer %d @ %p\n",
135 entry
->buf_count
, buf
->address
);
138 dma
->buflist
= drm_realloc(dma
->buflist
,
139 dma
->buf_count
* sizeof(*dma
->buflist
),
140 (dma
->buf_count
+ entry
->buf_count
)
141 * sizeof(*dma
->buflist
),
143 for (i
= dma
->buf_count
; i
< dma
->buf_count
+ entry
->buf_count
; i
++)
144 dma
->buflist
[i
] = &entry
->buflist
[i
- dma
->buf_count
];
146 dma
->buf_count
+= entry
->buf_count
;
147 dma
->byte_count
+= byte_count
;
148 drm_freelist_create(&entry
->freelist
, entry
->buf_count
);
149 for (i
= 0; i
< entry
->buf_count
; i
++) {
150 drm_freelist_put(dev
, &entry
->freelist
, &entry
->buflist
[i
]);
153 up(&dev
->struct_sem
);
155 request
.count
= entry
->buf_count
;
158 copy_to_user_ret((drm_buf_desc_t
*)arg
,
163 atomic_dec(&dev
->buf_alloc
);
164 dma
->flags
= _DRM_DMA_USE_AGP
;
168 int i810_addbufs(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
171 drm_buf_desc_t request
;
173 copy_from_user_ret(&request
,
174 (drm_buf_desc_t
*)arg
,
178 if(request
.flags
& _DRM_AGP_BUFFER
)
179 return i810_addbufs_agp(inode
, filp
, cmd
, arg
);
184 int i810_infobufs(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
187 drm_file_t
*priv
= filp
->private_data
;
188 drm_device_t
*dev
= priv
->dev
;
189 drm_device_dma_t
*dma
= dev
->dma
;
190 drm_buf_info_t request
;
194 if (!dma
) return -EINVAL
;
196 spin_lock(&dev
->count_lock
);
197 if (atomic_read(&dev
->buf_alloc
)) {
198 spin_unlock(&dev
->count_lock
);
201 ++dev
->buf_use
; /* Can't allocate more after this call */
202 spin_unlock(&dev
->count_lock
);
204 copy_from_user_ret(&request
,
205 (drm_buf_info_t
*)arg
,
209 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+1; i
++) {
210 if (dma
->bufs
[i
].buf_count
) ++count
;
213 DRM_DEBUG("count = %d\n", count
);
215 if (request
.count
>= count
) {
216 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+1; i
++) {
217 if (dma
->bufs
[i
].buf_count
) {
218 copy_to_user_ret(&request
.list
[count
].count
,
219 &dma
->bufs
[i
].buf_count
,
223 copy_to_user_ret(&request
.list
[count
].size
,
224 &dma
->bufs
[i
].buf_size
,
225 sizeof(dma
->bufs
[0].buf_size
),
227 copy_to_user_ret(&request
.list
[count
].low_mark
,
233 copy_to_user_ret(&request
.list
[count
]
238 .freelist
.high_mark
),
240 DRM_DEBUG("%d %d %d %d %d\n",
242 dma
->bufs
[i
].buf_count
,
243 dma
->bufs
[i
].buf_size
,
244 dma
->bufs
[i
].freelist
.low_mark
,
245 dma
->bufs
[i
].freelist
.high_mark
);
250 request
.count
= count
;
252 copy_to_user_ret((drm_buf_info_t
*)arg
,
260 int i810_markbufs(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
263 drm_file_t
*priv
= filp
->private_data
;
264 drm_device_t
*dev
= priv
->dev
;
265 drm_device_dma_t
*dma
= dev
->dma
;
266 drm_buf_desc_t request
;
268 drm_buf_entry_t
*entry
;
270 if (!dma
) return -EINVAL
;
272 copy_from_user_ret(&request
,
273 (drm_buf_desc_t
*)arg
,
277 DRM_DEBUG("%d, %d, %d\n",
278 request
.size
, request
.low_mark
, request
.high_mark
);
279 order
= drm_order(request
.size
);
280 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
) return -EINVAL
;
281 entry
= &dma
->bufs
[order
];
283 if (request
.low_mark
< 0 || request
.low_mark
> entry
->buf_count
)
285 if (request
.high_mark
< 0 || request
.high_mark
> entry
->buf_count
)
288 entry
->freelist
.low_mark
= request
.low_mark
;
289 entry
->freelist
.high_mark
= request
.high_mark
;
294 int i810_freebufs(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
297 drm_file_t
*priv
= filp
->private_data
;
298 drm_device_t
*dev
= priv
->dev
;
299 drm_device_dma_t
*dma
= dev
->dma
;
300 drm_buf_free_t request
;
305 if (!dma
) return -EINVAL
;
307 copy_from_user_ret(&request
,
308 (drm_buf_free_t
*)arg
,
312 DRM_DEBUG("%d\n", request
.count
);
313 for (i
= 0; i
< request
.count
; i
++) {
314 copy_from_user_ret(&idx
,
318 if (idx
< 0 || idx
>= dma
->buf_count
) {
319 DRM_ERROR("Index %d (of %d max)\n",
320 idx
, dma
->buf_count
- 1);
323 buf
= dma
->buflist
[idx
];
324 if (buf
->pid
!= current
->pid
) {
325 DRM_ERROR("Process %d freeing buffer owned by %d\n",
326 current
->pid
, buf
->pid
);
329 drm_free_buffer(dev
, buf
);