drm/linux: Add sign_extend64()
[dragonfly.git] / sys / dev / drm / drm_bufs.c
blobe3949044a3a37ec15fc18700f40650d8d02056dd
1 /*
2 * Legacy: Generic DRM Buffer Management
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
31 #include <sys/conf.h>
32 #include <bus/pci/pcireg.h>
33 #include <linux/types.h>
34 #include <linux/export.h>
35 #include <drm/drmP.h>
36 #include "drm_legacy.h"
38 int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
39 unsigned int size, enum drm_map_type type,
40 enum drm_map_flags flags, struct drm_local_map **map_ptr)
42 struct drm_local_map *map;
43 struct drm_map_list *entry = NULL;
44 drm_dma_handle_t *dmah;
46 /* Allocate a new map structure, fill it in, and do any type-specific
47 * initialization necessary.
49 map = kmalloc(sizeof(*map), M_DRM, M_ZERO | M_WAITOK | M_NULLOK);
50 if (!map) {
51 return -ENOMEM;
54 map->offset = offset;
55 map->size = size;
56 map->type = type;
57 map->flags = flags;
59 /* Only allow shared memory to be removable since we only keep enough
60 * book keeping information about shared memory to allow for removal
61 * when processes fork.
63 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
64 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
65 kfree(map);
66 return -EINVAL;
68 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
69 DRM_ERROR("offset/size not page aligned: 0x%jx/0x%04x\n",
70 (uintmax_t)offset, size);
71 kfree(map);
72 return -EINVAL;
74 if (offset + size < offset) {
75 DRM_ERROR("offset and size wrap around: 0x%jx/0x%04x\n",
76 (uintmax_t)offset, size);
77 kfree(map);
78 return -EINVAL;
81 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
82 (unsigned long long)map->offset, map->size, map->type);
84 /* Check if this is just another version of a kernel-allocated map, and
85 * just hand that back if so.
87 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
88 type == _DRM_SHM) {
89 list_for_each_entry(entry, &dev->maplist, head) {
90 if (entry->map->type == type && (entry->map->offset == offset ||
91 (entry->map->type == _DRM_SHM &&
92 entry->map->flags == _DRM_CONTAINS_LOCK))) {
93 entry->map->size = size;
94 DRM_DEBUG("Found kernel map %d\n", type);
95 goto done;
100 switch (map->type) {
101 case _DRM_REGISTERS:
102 case _DRM_FRAME_BUFFER:
104 if (map->type == _DRM_FRAME_BUFFER ||
105 (map->flags & _DRM_WRITE_COMBINING)) {
106 map->mtrr =
107 arch_phys_wc_add(map->offset, map->size);
109 if (map->type == _DRM_REGISTERS) {
110 if (map->flags & _DRM_WRITE_COMBINING)
111 map->handle = ioremap_wc(map->offset,
112 map->size);
113 else
114 map->handle = ioremap(map->offset, map->size);
115 if (!map->handle) {
116 kfree(map);
117 return -ENOMEM;
121 break;
122 case _DRM_SHM:
123 map->handle = kmalloc(map->size, M_DRM, M_WAITOK | M_NULLOK);
124 DRM_DEBUG("%lu %d %p\n",
125 map->size, order_base_2(map->size), map->handle);
126 if (!map->handle) {
127 kfree(map);
128 return -ENOMEM;
130 map->offset = (unsigned long)map->handle;
131 if (map->flags & _DRM_CONTAINS_LOCK) {
132 /* Prevent a 2nd X Server from creating a 2nd lock */
133 DRM_LOCK(dev);
134 if (dev->lock.hw_lock != NULL) {
135 DRM_UNLOCK(dev);
136 kfree(map->handle);
137 kfree(map);
138 return -EBUSY;
140 dev->lock.hw_lock = map->handle; /* Pointer to lock */
141 DRM_UNLOCK(dev);
143 break;
144 case _DRM_AGP:
146 if (!dev->agp) {
147 kfree(map);
148 return -EINVAL;
150 /*valid = 0;*/
151 /* In some cases (i810 driver), user space may have already
152 * added the AGP base itself, because dev->agp->base previously
153 * only got set during AGP enable. So, only add the base
154 * address if the map's offset isn't already within the
155 * aperture.
157 if (map->offset < dev->agp->base ||
158 map->offset > dev->agp->base +
159 dev->agp->agp_info.ai_aperture_size - 1) {
160 map->offset += dev->agp->base;
162 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
163 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
164 if ((map->offset >= entry->bound) &&
165 (map->offset + map->size <=
166 entry->bound + entry->pages * PAGE_SIZE)) {
167 valid = 1;
168 break;
171 if (!valid) {
172 kfree(map);
173 return -EACCES;
175 break;
176 case _DRM_SCATTER_GATHER:
177 if (!dev->sg) {
178 kfree(map);
179 return -EINVAL;
181 map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset);
182 map->offset = dev->sg->vaddr + offset;
183 break;
184 case _DRM_CONSISTENT:
185 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
186 * As we're limiting the address to 2^32-1 (or less),
187 * casting it down to 32 bits is no problem, but we
188 * need to point to a 64bit variable first. */
189 dmah = drm_pci_alloc(dev, map->size, map->size);
190 if (!dmah) {
191 kfree(map);
192 return -ENOMEM;
194 map->handle = dmah->vaddr;
195 map->offset = dmah->busaddr;
196 break;
197 default:
198 DRM_ERROR("Bad map type %d\n", map->type);
199 kfree(map);
200 return -EINVAL;
203 list_add(&entry->head, &dev->maplist);
205 done:
206 /* Jumped to, with lock held, when a kernel map is found. */
208 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
209 map->size);
211 *map_ptr = map;
213 return 0;
217 * Ioctl to specify a range of memory that is available for mapping by a
218 * non-root process.
220 * \param inode device inode.
221 * \param file_priv DRM file private.
222 * \param cmd command.
223 * \param arg pointer to a drm_map structure.
224 * \return zero on success or a negative value on error.
227 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
228 struct drm_file *file_priv)
230 struct drm_map *request = data;
231 drm_local_map_t *map;
232 int err;
234 if (!(dev->flags & (FREAD|FWRITE)))
235 return -EACCES; /* Require read/write */
237 if (!capable(CAP_SYS_ADMIN) && request->type != _DRM_AGP)
238 return -EACCES;
240 DRM_LOCK(dev);
241 err = drm_legacy_addmap(dev, request->offset, request->size, request->type,
242 request->flags, &map);
243 DRM_UNLOCK(dev);
244 if (err != 0)
245 return err;
247 request->offset = map->offset;
248 request->size = map->size;
249 request->type = map->type;
250 request->flags = map->flags;
251 request->mtrr = map->mtrr;
252 request->handle = (void *)map->handle;
254 return 0;
258 * Remove a map private from list and deallocate resources if the mapping
259 * isn't in use.
261 * Searches the map on drm_device::maplist, removes it from the list, see if
262 * its being used, and free any associate resource (such as MTRR's) if it's not
263 * being on use.
265 * \sa drm_legacy_addmap
267 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
269 struct drm_map_list *r_list = NULL, *list_t;
270 drm_dma_handle_t dmah;
271 int found = 0;
273 /* Find the list entry for the map and remove it */
274 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
275 if (r_list->map == map) {
276 list_del(&r_list->head);
277 kfree(r_list);
278 found = 1;
279 break;
283 if (!found)
284 return -EINVAL;
286 switch (map->type) {
287 case _DRM_REGISTERS:
288 drm_legacy_ioremapfree(map, dev);
289 /* FALLTHROUGH */
290 case _DRM_FRAME_BUFFER:
291 arch_phys_wc_del(map->mtrr);
292 break;
293 case _DRM_SHM:
294 kfree(map->handle);
295 break;
296 case _DRM_AGP:
297 case _DRM_SCATTER_GATHER:
298 break;
299 case _DRM_CONSISTENT:
300 dmah.vaddr = map->handle;
301 dmah.busaddr = map->offset;
302 dmah.size = map->size;
303 __drm_legacy_pci_free(dev, &dmah);
304 break;
306 kfree(map);
308 return 0;
311 int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
313 int ret;
315 mutex_lock(&dev->struct_mutex);
316 ret = drm_legacy_rmmap_locked(dev, map);
317 mutex_unlock(&dev->struct_mutex);
319 return ret;
321 EXPORT_SYMBOL(drm_legacy_rmmap);
323 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
324 * the last close of the device, and this is necessary for cleanup when things
325 * exit uncleanly. Therefore, having userland manually remove mappings seems
326 * like a pointless exercise since they're going away anyway.
328 * One use case might be after addmap is allowed for normal users for SHM and
329 * gets used by drivers that the server doesn't need to care about. This seems
330 * unlikely.
332 * \param inode device inode.
333 * \param file_priv DRM file private.
334 * \param cmd command.
335 * \param arg pointer to a struct drm_map structure.
336 * \return zero on success or a negative value on error.
338 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
339 struct drm_file *file_priv)
341 struct drm_map *request = data;
342 struct drm_local_map *map = NULL;
343 struct drm_map_list *r_list;
345 DRM_LOCK(dev);
346 list_for_each_entry(r_list, &dev->maplist, head) {
347 if (r_list->map &&
348 r_list->user_token == (unsigned long)request->handle &&
349 r_list->map->flags & _DRM_REMOVABLE) {
350 map = r_list->map;
351 break;
355 /* List has wrapped around to the head pointer, or its empty we didn't
356 * find anything.
358 if (list_empty(&dev->maplist) || !map) {
359 DRM_UNLOCK(dev);
360 return -EINVAL;
363 /* Register and framebuffer maps are permanent */
364 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
365 DRM_UNLOCK(dev);
366 return 0;
369 drm_legacy_rmmap(dev, map);
371 DRM_UNLOCK(dev);
373 return 0;
377 * Cleanup after an error on one of the addbufs() functions.
379 * \param dev DRM device.
380 * \param entry buffer entry where the error occurred.
382 * Frees any pages and buffers associated with the given entry.
384 static void drm_cleanup_buf_error(struct drm_device * dev,
385 struct drm_buf_entry * entry)
387 int i;
389 if (entry->seg_count) {
390 for (i = 0; i < entry->seg_count; i++) {
391 drm_pci_free(dev, entry->seglist[i]);
393 kfree(entry->seglist);
395 entry->seg_count = 0;
398 if (entry->buf_count) {
399 for (i = 0; i < entry->buf_count; i++) {
400 kfree(entry->buflist[i].dev_private);
402 kfree(entry->buflist);
404 entry->buf_count = 0;
408 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
410 struct drm_device_dma *dma = dev->dma;
411 struct drm_buf_entry *entry;
412 /* struct drm_agp_mem *agp_entry; */
413 /* int valid */
414 struct drm_buf *buf;
415 unsigned long offset;
416 unsigned long agp_offset;
417 int count;
418 int order;
419 int size;
420 int alignment;
421 int page_order;
422 int total;
423 int byte_count;
424 int i;
425 struct drm_buf **temp_buflist;
427 count = request->count;
428 order = order_base_2(request->size);
429 size = 1 << order;
431 alignment = (request->flags & _DRM_PAGE_ALIGN)
432 ? round_page(size) : size;
433 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
434 total = PAGE_SIZE << page_order;
436 byte_count = 0;
437 agp_offset = dev->agp->base + request->agp_start;
439 DRM_DEBUG("count: %d\n", count);
440 DRM_DEBUG("order: %d\n", order);
441 DRM_DEBUG("size: %d\n", size);
442 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
443 DRM_DEBUG("alignment: %d\n", alignment);
444 DRM_DEBUG("page_order: %d\n", page_order);
445 DRM_DEBUG("total: %d\n", total);
447 /* Make sure buffers are located in AGP memory that we own */
448 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
449 * memory. Safe to ignore for now because these ioctls are still
450 * root-only.
452 /*valid = 0;
453 for (agp_entry = dev->agp->memory; agp_entry;
454 agp_entry = agp_entry->next) {
455 if ((agp_offset >= agp_entry->bound) &&
456 (agp_offset + total * count <=
457 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
458 valid = 1;
459 break;
462 if (!valid) {
463 DRM_DEBUG("zone invalid\n");
464 return -EINVAL;
467 entry = &dma->bufs[order];
469 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
470 M_WAITOK | M_NULLOK | M_ZERO);
471 if (!entry->buflist) {
472 return -ENOMEM;
475 entry->buf_size = size;
476 entry->page_order = page_order;
478 offset = 0;
480 while (entry->buf_count < count) {
481 buf = &entry->buflist[entry->buf_count];
482 buf->idx = dma->buf_count + entry->buf_count;
483 buf->total = alignment;
484 buf->order = order;
485 buf->used = 0;
487 buf->offset = (dma->byte_count + offset);
488 buf->bus_address = agp_offset + offset;
489 buf->address = (void *)(agp_offset + offset);
490 buf->next = NULL;
491 buf->pending = 0;
492 buf->file_priv = NULL;
494 buf->dev_priv_size = dev->driver->dev_priv_size;
495 buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
496 M_WAITOK | M_NULLOK | M_ZERO);
497 if (buf->dev_private == NULL) {
498 /* Set count correctly so we free the proper amount. */
499 entry->buf_count = count;
500 drm_cleanup_buf_error(dev, entry);
501 return -ENOMEM;
504 offset += alignment;
505 entry->buf_count++;
506 byte_count += PAGE_SIZE << page_order;
509 DRM_DEBUG("byte_count: %d\n", byte_count);
511 temp_buflist = krealloc(dma->buflist,
512 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
513 M_DRM, M_WAITOK | M_NULLOK);
514 if (temp_buflist == NULL) {
515 /* Free the entry because it isn't valid */
516 drm_cleanup_buf_error(dev, entry);
517 return -ENOMEM;
519 dma->buflist = temp_buflist;
521 for (i = 0; i < entry->buf_count; i++) {
522 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
525 dma->buf_count += entry->buf_count;
526 dma->byte_count += byte_count;
528 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
529 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
531 request->count = entry->buf_count;
532 request->size = size;
534 dma->flags = _DRM_DMA_USE_AGP;
536 return 0;
539 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
541 struct drm_device_dma *dma = dev->dma;
542 int count;
543 int order;
544 int size;
545 int total;
546 int page_order;
547 struct drm_buf_entry *entry;
548 drm_dma_handle_t *dmah;
549 struct drm_buf *buf;
550 int alignment;
551 unsigned long offset;
552 int i;
553 int byte_count;
554 int page_count;
555 unsigned long *temp_pagelist;
556 struct drm_buf **temp_buflist;
558 count = request->count;
559 order = order_base_2(request->size);
560 size = 1 << order;
562 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
563 request->count, request->size, size, order);
565 alignment = (request->flags & _DRM_PAGE_ALIGN)
566 ? round_page(size) : size;
567 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
568 total = PAGE_SIZE << page_order;
570 entry = &dma->bufs[order];
572 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
573 M_WAITOK | M_NULLOK | M_ZERO);
574 entry->seglist = kmalloc(count * sizeof(*entry->seglist), M_DRM,
575 M_WAITOK | M_NULLOK | M_ZERO);
577 /* Keep the original pagelist until we know all the allocations
578 * have succeeded
580 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
581 sizeof(*dma->pagelist),
582 M_DRM, M_WAITOK | M_NULLOK);
584 if (entry->buflist == NULL || entry->seglist == NULL ||
585 temp_pagelist == NULL) {
586 kfree(temp_pagelist);
587 kfree(entry->seglist);
588 kfree(entry->buflist);
589 return -ENOMEM;
592 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
593 sizeof(*dma->pagelist));
595 DRM_DEBUG("pagelist: %d entries\n",
596 dma->page_count + (count << page_order));
598 entry->buf_size = size;
599 entry->page_order = page_order;
600 byte_count = 0;
601 page_count = 0;
603 while (entry->buf_count < count) {
604 spin_unlock(&dev->dma_lock);
605 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
606 spin_lock(&dev->dma_lock);
608 if (!dmah) {
609 /* Set count correctly so we free the proper amount. */
610 entry->buf_count = count;
611 entry->seg_count = count;
612 drm_cleanup_buf_error(dev, entry);
613 kfree(temp_pagelist);
614 return -ENOMEM;
616 entry->seglist[entry->seg_count++] = dmah;
617 for (i = 0; i < (1 << page_order); i++) {
618 DRM_DEBUG("page %d @ 0x%08lx\n",
619 dma->page_count + page_count,
620 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
621 temp_pagelist[dma->page_count + page_count++]
622 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
624 for (offset = 0;
625 offset + size <= total && entry->buf_count < count;
626 offset += alignment, ++entry->buf_count) {
627 buf = &entry->buflist[entry->buf_count];
628 buf->idx = dma->buf_count + entry->buf_count;
629 buf->total = alignment;
630 buf->order = order;
631 buf->used = 0;
632 buf->offset = (dma->byte_count + byte_count + offset);
633 buf->address = ((char *)dmah->vaddr + offset);
634 buf->bus_address = dmah->busaddr + offset;
635 buf->next = NULL;
636 buf->pending = 0;
637 buf->file_priv = NULL;
639 buf->dev_priv_size = dev->driver->dev_priv_size;
640 buf->dev_private = kmalloc(buf->dev_priv_size,
641 M_DRM,
642 M_WAITOK | M_NULLOK |
643 M_ZERO);
644 if (buf->dev_private == NULL) {
645 /* Set count correctly so we free the proper amount. */
646 entry->buf_count = count;
647 entry->seg_count = count;
648 drm_cleanup_buf_error(dev, entry);
649 kfree(temp_pagelist);
650 return -ENOMEM;
653 DRM_DEBUG("buffer %d @ %p\n",
654 entry->buf_count, buf->address);
656 byte_count += PAGE_SIZE << page_order;
659 temp_buflist = krealloc(dma->buflist,
660 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
661 M_DRM, M_WAITOK | M_NULLOK);
662 if (temp_buflist == NULL) {
663 /* Free the entry because it isn't valid */
664 drm_cleanup_buf_error(dev, entry);
665 kfree(temp_pagelist);
666 return -ENOMEM;
668 dma->buflist = temp_buflist;
670 for (i = 0; i < entry->buf_count; i++) {
671 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
674 /* No allocations failed, so now we can replace the original pagelist
675 * with the new one.
677 kfree(dma->pagelist);
678 dma->pagelist = temp_pagelist;
680 dma->buf_count += entry->buf_count;
681 dma->seg_count += entry->seg_count;
682 dma->page_count += entry->seg_count << page_order;
683 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
685 request->count = entry->buf_count;
686 request->size = size;
688 return 0;
692 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
694 struct drm_device_dma *dma = dev->dma;
695 struct drm_buf_entry *entry;
696 struct drm_buf *buf;
697 unsigned long offset;
698 unsigned long agp_offset;
699 int count;
700 int order;
701 int size;
702 int alignment;
703 int page_order;
704 int total;
705 int byte_count;
706 int i;
707 struct drm_buf **temp_buflist;
709 count = request->count;
710 order = order_base_2(request->size);
711 size = 1 << order;
713 alignment = (request->flags & _DRM_PAGE_ALIGN)
714 ? round_page(size) : size;
715 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
716 total = PAGE_SIZE << page_order;
718 byte_count = 0;
719 agp_offset = request->agp_start;
721 DRM_DEBUG("count: %d\n", count);
722 DRM_DEBUG("order: %d\n", order);
723 DRM_DEBUG("size: %d\n", size);
724 DRM_DEBUG("agp_offset: %ld\n", agp_offset);
725 DRM_DEBUG("alignment: %d\n", alignment);
726 DRM_DEBUG("page_order: %d\n", page_order);
727 DRM_DEBUG("total: %d\n", total);
729 entry = &dma->bufs[order];
731 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
732 M_WAITOK | M_NULLOK | M_ZERO);
733 if (entry->buflist == NULL)
734 return -ENOMEM;
736 entry->buf_size = size;
737 entry->page_order = page_order;
739 offset = 0;
741 while (entry->buf_count < count) {
742 buf = &entry->buflist[entry->buf_count];
743 buf->idx = dma->buf_count + entry->buf_count;
744 buf->total = alignment;
745 buf->order = order;
746 buf->used = 0;
748 buf->offset = (dma->byte_count + offset);
749 buf->bus_address = agp_offset + offset;
750 buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
751 buf->next = NULL;
752 buf->pending = 0;
753 buf->file_priv = NULL;
755 buf->dev_priv_size = dev->driver->dev_priv_size;
756 buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
757 M_WAITOK | M_NULLOK | M_ZERO);
758 if (buf->dev_private == NULL) {
759 /* Set count correctly so we free the proper amount. */
760 entry->buf_count = count;
761 drm_cleanup_buf_error(dev, entry);
762 return -ENOMEM;
765 DRM_DEBUG("buffer %d @ %p\n",
766 entry->buf_count, buf->address);
768 offset += alignment;
769 entry->buf_count++;
770 byte_count += PAGE_SIZE << page_order;
773 DRM_DEBUG("byte_count: %d\n", byte_count);
775 temp_buflist = krealloc(dma->buflist,
776 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
777 M_DRM, M_WAITOK | M_NULLOK);
778 if (temp_buflist == NULL) {
779 /* Free the entry because it isn't valid */
780 drm_cleanup_buf_error(dev, entry);
781 return -ENOMEM;
783 dma->buflist = temp_buflist;
785 for (i = 0; i < entry->buf_count; i++) {
786 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
789 dma->buf_count += entry->buf_count;
790 dma->byte_count += byte_count;
792 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
793 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
795 request->count = entry->buf_count;
796 request->size = size;
798 dma->flags = _DRM_DMA_USE_SG;
800 return 0;
804 * Add AGP buffers for DMA transfers.
806 * \param dev struct drm_device to which the buffers are to be added.
807 * \param request pointer to a struct drm_buf_desc describing the request.
808 * \return zero on success or a negative number on failure.
810 * After some sanity checks creates a drm_buf structure for each buffer and
811 * reallocates the buffer list of the same size order to accommodate the new
812 * buffers.
814 int drm_legacy_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
816 int order, ret;
818 if (request->count < 0 || request->count > 4096)
819 return -EINVAL;
821 order = order_base_2(request->size);
822 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
823 return -EINVAL;
825 spin_lock(&dev->dma_lock);
827 /* No more allocations after first buffer-using ioctl. */
828 if (dev->buf_use != 0) {
829 spin_unlock(&dev->dma_lock);
830 return -EBUSY;
832 /* No more than one allocation per order */
833 if (dev->dma->bufs[order].buf_count != 0) {
834 spin_unlock(&dev->dma_lock);
835 return -ENOMEM;
838 ret = drm_do_addbufs_agp(dev, request);
840 spin_unlock(&dev->dma_lock);
842 return ret;
845 static int drm_legacy_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
847 int order, ret;
849 if (!capable(CAP_SYS_ADMIN))
850 return -EACCES;
852 if (request->count < 0 || request->count > 4096)
853 return -EINVAL;
855 order = order_base_2(request->size);
856 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
857 return -EINVAL;
859 spin_lock(&dev->dma_lock);
861 /* No more allocations after first buffer-using ioctl. */
862 if (dev->buf_use != 0) {
863 spin_unlock(&dev->dma_lock);
864 return -EBUSY;
866 /* No more than one allocation per order */
867 if (dev->dma->bufs[order].buf_count != 0) {
868 spin_unlock(&dev->dma_lock);
869 return -ENOMEM;
872 ret = drm_do_addbufs_sg(dev, request);
874 spin_unlock(&dev->dma_lock);
876 return ret;
879 int drm_legacy_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
881 int order, ret;
883 if (!capable(CAP_SYS_ADMIN))
884 return -EACCES;
886 if (request->count < 0 || request->count > 4096)
887 return -EINVAL;
889 order = order_base_2(request->size);
890 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
891 return -EINVAL;
893 spin_lock(&dev->dma_lock);
895 /* No more allocations after first buffer-using ioctl. */
896 if (dev->buf_use != 0) {
897 spin_unlock(&dev->dma_lock);
898 return -EBUSY;
900 /* No more than one allocation per order */
901 if (dev->dma->bufs[order].buf_count != 0) {
902 spin_unlock(&dev->dma_lock);
903 return -ENOMEM;
906 ret = drm_do_addbufs_pci(dev, request);
908 spin_unlock(&dev->dma_lock);
910 return ret;
914 * Add buffers for DMA transfers (ioctl).
916 * \param inode device inode.
917 * \param file_priv DRM file private.
918 * \param cmd command.
919 * \param arg pointer to a struct drm_buf_desc request.
920 * \return zero on success or a negative number on failure.
922 * According with the memory type specified in drm_buf_desc::flags and the
923 * build options, it dispatches the call either to addbufs_agp(),
924 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
925 * PCI memory respectively.
927 int drm_legacy_addbufs(struct drm_device *dev, void *data,
928 struct drm_file *file_priv)
930 struct drm_buf_desc *request = data;
931 int err;
933 if (request->flags & _DRM_AGP_BUFFER)
934 err = drm_legacy_addbufs_agp(dev, request);
935 else if (request->flags & _DRM_SG_BUFFER)
936 err = drm_legacy_addbufs_sg(dev, request);
937 else
938 err = drm_legacy_addbufs_pci(dev, request);
940 return err;
944 * Get information about the buffer mappings.
946 * This was originally mean for debugging purposes, or by a sophisticated
947 * client library to determine how best to use the available buffers (e.g.,
948 * large buffers can be used for image transfer).
950 * \param inode device inode.
951 * \param file_priv DRM file private.
952 * \param cmd command.
953 * \param arg pointer to a drm_buf_info structure.
954 * \return zero on success or a negative number on failure.
956 * Increments drm_device::buf_use while holding the drm_device::buf_lock
957 * lock, preventing of allocating more buffers after this call. Information
958 * about each requested buffer is then copied into user space.
960 int drm_legacy_infobufs(struct drm_device *dev, void *data,
961 struct drm_file *file_priv)
963 struct drm_device_dma *dma = dev->dma;
964 struct drm_buf_info *request = data;
965 int i;
966 int count;
968 if (drm_core_check_feature(dev, DRIVER_MODESET))
969 return -EINVAL;
971 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
972 return -EINVAL;
974 if (!dma)
975 return -EINVAL;
977 spin_lock(&dev->buf_lock);
978 if (atomic_read(&dev->buf_alloc)) {
979 spin_unlock(&dev->buf_lock);
980 return -EBUSY;
982 ++dev->buf_use; /* Can't allocate more after this call */
983 spin_unlock(&dev->buf_lock);
985 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
986 if (dma->bufs[i].buf_count)
987 ++count;
990 DRM_DEBUG("count = %d\n", count);
992 if (request->count >= count) {
993 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
994 if (dma->bufs[i].buf_count) {
995 struct drm_buf_desc __user *to =
996 &request->list[count];
997 struct drm_buf_entry *from = &dma->bufs[i];
998 if (copy_to_user(&to->count,
999 &from->buf_count,
1000 sizeof(from->buf_count)) ||
1001 copy_to_user(&to->size,
1002 &from->buf_size,
1003 sizeof(from->buf_size)) ||
1004 copy_to_user(&to->low_mark,
1005 &from->low_mark,
1006 sizeof(from->low_mark)) ||
1007 copy_to_user(&to->high_mark,
1008 &from->high_mark,
1009 sizeof(from->high_mark)))
1010 return -EFAULT;
1012 DRM_DEBUG("%d %d %d %d %d\n",
1014 dma->bufs[i].buf_count,
1015 dma->bufs[i].buf_size,
1016 dma->bufs[i].low_mark,
1017 dma->bufs[i].high_mark);
1018 ++count;
1022 request->count = count;
1024 return 0;
1028 * Specifies a low and high water mark for buffer allocation
1030 * \param inode device inode.
1031 * \param file_priv DRM file private.
1032 * \param cmd command.
1033 * \param arg a pointer to a drm_buf_desc structure.
1034 * \return zero on success or a negative number on failure.
1036 * Verifies that the size order is bounded between the admissible orders and
1037 * updates the respective drm_device_dma::bufs entry low and high water mark.
1039 * \note This ioctl is deprecated and mostly never used.
1041 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1042 struct drm_file *file_priv)
1044 struct drm_device_dma *dma = dev->dma;
1045 struct drm_buf_desc *request = data;
1046 int order;
1047 struct drm_buf_entry *entry;
1049 if (drm_core_check_feature(dev, DRIVER_MODESET))
1050 return -EINVAL;
1052 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1053 return -EINVAL;
1055 if (!dma)
1056 return -EINVAL;
1058 DRM_DEBUG("%d, %d, %d\n",
1059 request->size, request->low_mark, request->high_mark);
1060 order = order_base_2(request->size);
1061 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1062 return -EINVAL;
1063 entry = &dma->bufs[order];
1065 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1066 return -EINVAL;
1067 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1068 return -EINVAL;
1070 entry->low_mark = request->low_mark;
1071 entry->high_mark = request->high_mark;
1073 return 0;
1077 * Unreserve the buffers in list, previously reserved using drmDMA.
1079 * \param inode device inode.
1080 * \param file_priv DRM file private.
1081 * \param cmd command.
1082 * \param arg pointer to a drm_buf_free structure.
1083 * \return zero on success or a negative number on failure.
1085 * Calls free_buffer() for each used buffer.
1086 * This function is primarily used for debugging.
1088 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1089 struct drm_file *file_priv)
1091 struct drm_device_dma *dma = dev->dma;
1092 struct drm_buf_free *request = data;
1093 int i;
1094 int idx;
1095 struct drm_buf *buf;
1096 int retcode = 0;
1098 DRM_DEBUG("%d\n", request->count);
1100 spin_lock(&dev->dma_lock);
1101 for (i = 0; i < request->count; i++) {
1102 if (copy_from_user(&idx, &request->list[i], sizeof(idx))) {
1103 retcode = -EFAULT;
1104 break;
1106 if (idx < 0 || idx >= dma->buf_count) {
1107 DRM_ERROR("Index %d (of %d max)\n",
1108 idx, dma->buf_count - 1);
1109 retcode = -EINVAL;
1110 break;
1112 buf = dma->buflist[idx];
1113 if (buf->file_priv != file_priv) {
1114 DRM_ERROR("Process %d freeing buffer not owned\n",
1115 DRM_CURRENTPID);
1116 retcode = -EINVAL;
1117 break;
1119 drm_legacy_free_buffer(dev, buf);
1121 spin_unlock(&dev->dma_lock);
1123 return retcode;
1127 * Maps all of the DMA buffers into client-virtual space (ioctl).
1129 * \param inode device inode.
1130 * \param file_priv DRM file private.
1131 * \param cmd command.
1132 * \param arg pointer to a drm_buf_map structure.
1133 * \return zero on success or a negative number on failure.
1135 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1136 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1137 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1138 * drm_mmap_dma().
1140 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1141 struct drm_file *file_priv)
1143 struct drm_device_dma *dma = dev->dma;
1144 int retcode = 0;
1145 const int zero = 0;
1146 vm_offset_t address;
1147 struct vmspace *vms;
1148 vm_ooffset_t foff;
1149 vm_size_t size;
1150 vm_offset_t vaddr;
1151 struct drm_buf_map *request = data;
1152 int i;
1154 vms = DRM_CURPROC->td_proc->p_vmspace;
1156 spin_lock(&dev->dma_lock);
1157 dev->buf_use++; /* Can't allocate more after this call */
1158 spin_unlock(&dev->dma_lock);
1160 if (request->count < dma->buf_count)
1161 goto done;
1163 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
1164 (drm_core_check_feature(dev, DRIVER_SG) &&
1165 (dma->flags & _DRM_DMA_USE_SG))) {
1166 drm_local_map_t *map = dev->agp_buffer_map;
1168 if (map == NULL) {
1169 retcode = -EINVAL;
1170 goto done;
1172 size = round_page(map->size);
1173 foff = (unsigned long)map->handle;
1174 } else {
1175 size = round_page(dma->byte_count),
1176 foff = 0;
1179 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1180 retcode = -vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1181 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1182 SLIST_FIRST(&dev->devnode->si_hlist), foff);
1183 if (retcode)
1184 goto done;
1186 request->virtual = (void *)vaddr;
1188 for (i = 0; i < dma->buf_count; i++) {
1189 if (copy_to_user(&request->list[i].idx,
1190 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1191 retcode = -EFAULT;
1192 goto done;
1194 if (copy_to_user(&request->list[i].total,
1195 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1196 retcode = -EFAULT;
1197 goto done;
1199 if (copy_to_user(&request->list[i].used, &zero,
1200 sizeof(zero))) {
1201 retcode = -EFAULT;
1202 goto done;
1204 address = vaddr + dma->buflist[i]->offset; /* *** */
1205 if (copy_to_user(&request->list[i].address, &address,
1206 sizeof(address))) {
1207 retcode = -EFAULT;
1208 goto done;
1211 done:
1212 request->count = dma->buf_count;
1213 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1215 return retcode;
1218 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1219 struct drm_file *file_priv)
1221 if (drm_core_check_feature(dev, DRIVER_MODESET))
1222 return -EINVAL;
1224 if (dev->driver->dma_ioctl)
1225 return dev->driver->dma_ioctl(dev, data, file_priv);
1226 else
1227 return -EINVAL;
1230 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1232 struct drm_map_list *entry;
1234 list_for_each_entry(entry, &dev->maplist, head) {
1235 if (entry->map && entry->map->type == _DRM_SHM &&
1236 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1237 return entry->map;
1240 return NULL;
1242 EXPORT_SYMBOL(drm_legacy_getsarea);