drm: Define struct page and use it everywhere
[dragonfly.git] / sys / dev / drm / drm_bufs.c
blob098a3e0fc98059d5418b8177aeff4d022bc6bd82
1 /*
2 * Legacy: Generic DRM Buffer Management
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
31 #include <sys/conf.h>
32 #include <bus/pci/pcireg.h>
33 #include <linux/types.h>
34 #include <linux/export.h>
35 #include <drm/drmP.h>
36 #include "drm_legacy.h"
38 int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
39 unsigned int size, enum drm_map_type type,
40 enum drm_map_flags flags, struct drm_local_map **map_ptr)
42 struct drm_local_map *map;
43 struct drm_map_list *entry = NULL;
44 drm_dma_handle_t *dmah;
46 /* Allocate a new map structure, fill it in, and do any type-specific
47 * initialization necessary.
49 map = kmalloc(sizeof(*map), M_DRM, M_ZERO | M_WAITOK | M_NULLOK);
50 if (!map) {
51 return -ENOMEM;
54 map->offset = offset;
55 map->size = size;
56 map->type = type;
57 map->flags = flags;
59 /* Only allow shared memory to be removable since we only keep enough
60 * book keeping information about shared memory to allow for removal
61 * when processes fork.
63 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
64 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
65 kfree(map);
66 return -EINVAL;
68 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
69 DRM_ERROR("offset/size not page aligned: 0x%jx/0x%04x\n",
70 (uintmax_t)offset, size);
71 kfree(map);
72 return -EINVAL;
74 if (offset + size < offset) {
75 DRM_ERROR("offset and size wrap around: 0x%jx/0x%04x\n",
76 (uintmax_t)offset, size);
77 kfree(map);
78 return -EINVAL;
81 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
82 (unsigned long long)map->offset, map->size, map->type);
84 /* Check if this is just another version of a kernel-allocated map, and
85 * just hand that back if so.
87 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
88 type == _DRM_SHM) {
89 list_for_each_entry(entry, &dev->maplist, head) {
90 if (entry->map->type == type && (entry->map->offset == offset ||
91 (entry->map->type == _DRM_SHM &&
92 entry->map->flags == _DRM_CONTAINS_LOCK))) {
93 entry->map->size = size;
94 DRM_DEBUG("Found kernel map %d\n", type);
95 goto done;
100 switch (map->type) {
101 case _DRM_REGISTERS:
102 case _DRM_FRAME_BUFFER:
104 if (map->type == _DRM_FRAME_BUFFER ||
105 (map->flags & _DRM_WRITE_COMBINING)) {
106 map->mtrr =
107 arch_phys_wc_add(map->offset, map->size);
109 if (map->type == _DRM_REGISTERS) {
110 if (map->flags & _DRM_WRITE_COMBINING)
111 map->handle = ioremap_wc(map->offset,
112 map->size);
113 else
114 map->handle = ioremap(map->offset, map->size);
115 if (!map->handle) {
116 kfree(map);
117 return -ENOMEM;
121 break;
122 case _DRM_SHM:
123 map->handle = kmalloc(map->size, M_DRM, M_WAITOK | M_NULLOK);
124 DRM_DEBUG("%lu %d %p\n",
125 map->size, order_base_2(map->size), map->handle);
126 if (!map->handle) {
127 kfree(map);
128 return -ENOMEM;
130 map->offset = (unsigned long)map->handle;
131 if (map->flags & _DRM_CONTAINS_LOCK) {
132 /* Prevent a 2nd X Server from creating a 2nd lock */
133 DRM_LOCK(dev);
134 if (dev->lock.hw_lock != NULL) {
135 DRM_UNLOCK(dev);
136 kfree(map->handle);
137 kfree(map);
138 return -EBUSY;
140 dev->lock.hw_lock = map->handle; /* Pointer to lock */
141 DRM_UNLOCK(dev);
143 break;
144 case _DRM_AGP:
146 if (!dev->agp) {
147 kfree(map);
148 return -EINVAL;
150 /*valid = 0;*/
151 /* In some cases (i810 driver), user space may have already
152 * added the AGP base itself, because dev->agp->base previously
153 * only got set during AGP enable. So, only add the base
154 * address if the map's offset isn't already within the
155 * aperture.
157 if (map->offset < dev->agp->base ||
158 map->offset > dev->agp->base +
159 dev->agp->agp_info.ai_aperture_size - 1) {
160 map->offset += dev->agp->base;
162 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
163 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
164 if ((map->offset >= entry->bound) &&
165 (map->offset + map->size <=
166 entry->bound + entry->pages * PAGE_SIZE)) {
167 valid = 1;
168 break;
171 if (!valid) {
172 kfree(map);
173 return -EACCES;
175 break;
176 case _DRM_SCATTER_GATHER:
177 if (!dev->sg) {
178 kfree(map);
179 return -EINVAL;
181 map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset);
182 map->offset = dev->sg->vaddr + offset;
183 break;
184 case _DRM_CONSISTENT:
185 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
186 * As we're limiting the address to 2^32-1 (or less),
187 * casting it down to 32 bits is no problem, but we
188 * need to point to a 64bit variable first. */
189 dmah = drm_pci_alloc(dev, map->size, map->size);
190 if (!dmah) {
191 kfree(map);
192 return -ENOMEM;
194 map->handle = dmah->vaddr;
195 map->offset = dmah->busaddr;
196 break;
197 default:
198 DRM_ERROR("Bad map type %d\n", map->type);
199 kfree(map);
200 return -EINVAL;
203 list_add(&entry->head, &dev->maplist);
205 done:
206 /* Jumped to, with lock held, when a kernel map is found. */
208 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
209 map->size);
211 *map_ptr = map;
213 return 0;
217 * Ioctl to specify a range of memory that is available for mapping by a
218 * non-root process.
220 * \param inode device inode.
221 * \param file_priv DRM file private.
222 * \param cmd command.
223 * \param arg pointer to a drm_map structure.
224 * \return zero on success or a negative value on error.
227 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
228 struct drm_file *file_priv)
230 struct drm_map *request = data;
231 drm_local_map_t *map;
232 int err;
234 if (!(dev->flags & (FREAD|FWRITE)))
235 return -EACCES; /* Require read/write */
237 if (!capable(CAP_SYS_ADMIN) && request->type != _DRM_AGP)
238 return -EACCES;
240 DRM_LOCK(dev);
241 err = drm_legacy_addmap(dev, request->offset, request->size, request->type,
242 request->flags, &map);
243 DRM_UNLOCK(dev);
244 if (err != 0)
245 return err;
247 request->offset = map->offset;
248 request->size = map->size;
249 request->type = map->type;
250 request->flags = map->flags;
251 request->mtrr = map->mtrr;
252 request->handle = (void *)map->handle;
254 return 0;
258 * Get a mapping information.
260 * \param inode device inode.
261 * \param file_priv DRM file private.
262 * \param cmd command.
263 * \param arg user argument, pointing to a drm_map structure.
265 * \return zero on success or a negative number on failure.
267 * Searches for the mapping with the specified offset and copies its information
268 * into userspace
270 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
271 struct drm_file *file_priv)
273 struct drm_map *map = data;
274 struct drm_map_list *r_list = NULL;
275 struct list_head *list;
276 int idx;
277 int i;
279 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
280 drm_core_check_feature(dev, DRIVER_MODESET))
281 return -EINVAL;
283 idx = map->offset;
284 if (idx < 0)
285 return -EINVAL;
287 i = 0;
288 mutex_lock(&dev->struct_mutex);
289 list_for_each(list, &dev->maplist) {
290 if (i == idx) {
291 r_list = list_entry(list, struct drm_map_list, head);
292 break;
294 i++;
296 if (!r_list || !r_list->map) {
297 mutex_unlock(&dev->struct_mutex);
298 return -EINVAL;
301 map->offset = r_list->map->offset;
302 map->size = r_list->map->size;
303 map->type = r_list->map->type;
304 map->flags = r_list->map->flags;
305 map->handle = (void *)(unsigned long) r_list->user_token;
306 map->mtrr = r_list->map->mtrr;
308 mutex_unlock(&dev->struct_mutex);
310 return 0;
314 * Remove a map private from list and deallocate resources if the mapping
315 * isn't in use.
317 * Searches the map on drm_device::maplist, removes it from the list, see if
318 * its being used, and free any associate resource (such as MTRR's) if it's not
319 * being on use.
321 * \sa drm_legacy_addmap
323 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
325 struct drm_map_list *r_list = NULL, *list_t;
326 drm_dma_handle_t dmah;
327 int found = 0;
329 /* Find the list entry for the map and remove it */
330 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
331 if (r_list->map == map) {
332 list_del(&r_list->head);
333 kfree(r_list);
334 found = 1;
335 break;
339 if (!found)
340 return -EINVAL;
342 switch (map->type) {
343 case _DRM_REGISTERS:
344 drm_legacy_ioremapfree(map, dev);
345 /* FALLTHROUGH */
346 case _DRM_FRAME_BUFFER:
347 arch_phys_wc_del(map->mtrr);
348 break;
349 case _DRM_SHM:
350 kfree(map->handle);
351 break;
352 case _DRM_AGP:
353 case _DRM_SCATTER_GATHER:
354 break;
355 case _DRM_CONSISTENT:
356 dmah.vaddr = map->handle;
357 dmah.busaddr = map->offset;
358 dmah.size = map->size;
359 __drm_legacy_pci_free(dev, &dmah);
360 break;
362 kfree(map);
364 return 0;
367 int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
369 int ret;
371 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
372 drm_core_check_feature(dev, DRIVER_MODESET))
373 return -EINVAL;
375 mutex_lock(&dev->struct_mutex);
376 ret = drm_legacy_rmmap_locked(dev, map);
377 mutex_unlock(&dev->struct_mutex);
379 return ret;
381 EXPORT_SYMBOL(drm_legacy_rmmap);
383 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
384 * the last close of the device, and this is necessary for cleanup when things
385 * exit uncleanly. Therefore, having userland manually remove mappings seems
386 * like a pointless exercise since they're going away anyway.
388 * One use case might be after addmap is allowed for normal users for SHM and
389 * gets used by drivers that the server doesn't need to care about. This seems
390 * unlikely.
392 * \param inode device inode.
393 * \param file_priv DRM file private.
394 * \param cmd command.
395 * \param arg pointer to a struct drm_map structure.
396 * \return zero on success or a negative value on error.
398 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
399 struct drm_file *file_priv)
401 struct drm_map *request = data;
402 struct drm_local_map *map = NULL;
403 struct drm_map_list *r_list;
405 DRM_LOCK(dev);
406 list_for_each_entry(r_list, &dev->maplist, head) {
407 if (r_list->map &&
408 r_list->user_token == (unsigned long)request->handle &&
409 r_list->map->flags & _DRM_REMOVABLE) {
410 map = r_list->map;
411 break;
415 /* List has wrapped around to the head pointer, or its empty we didn't
416 * find anything.
418 if (list_empty(&dev->maplist) || !map) {
419 DRM_UNLOCK(dev);
420 return -EINVAL;
423 /* Register and framebuffer maps are permanent */
424 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
425 DRM_UNLOCK(dev);
426 return 0;
429 drm_legacy_rmmap(dev, map);
431 DRM_UNLOCK(dev);
433 return 0;
437 * Cleanup after an error on one of the addbufs() functions.
439 * \param dev DRM device.
440 * \param entry buffer entry where the error occurred.
442 * Frees any pages and buffers associated with the given entry.
444 static void drm_cleanup_buf_error(struct drm_device * dev,
445 struct drm_buf_entry * entry)
447 int i;
449 if (entry->seg_count) {
450 for (i = 0; i < entry->seg_count; i++) {
451 drm_pci_free(dev, entry->seglist[i]);
453 kfree(entry->seglist);
455 entry->seg_count = 0;
458 if (entry->buf_count) {
459 for (i = 0; i < entry->buf_count; i++) {
460 kfree(entry->buflist[i].dev_private);
462 kfree(entry->buflist);
464 entry->buf_count = 0;
468 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
470 struct drm_device_dma *dma = dev->dma;
471 struct drm_buf_entry *entry;
472 /* struct drm_agp_mem *agp_entry; */
473 /* int valid */
474 struct drm_buf *buf;
475 unsigned long offset;
476 unsigned long agp_offset;
477 int count;
478 int order;
479 int size;
480 int alignment;
481 int page_order;
482 int total;
483 int byte_count;
484 int i;
485 struct drm_buf **temp_buflist;
487 count = request->count;
488 order = order_base_2(request->size);
489 size = 1 << order;
491 alignment = (request->flags & _DRM_PAGE_ALIGN)
492 ? round_page(size) : size;
493 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
494 total = PAGE_SIZE << page_order;
496 byte_count = 0;
497 agp_offset = dev->agp->base + request->agp_start;
499 DRM_DEBUG("count: %d\n", count);
500 DRM_DEBUG("order: %d\n", order);
501 DRM_DEBUG("size: %d\n", size);
502 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
503 DRM_DEBUG("alignment: %d\n", alignment);
504 DRM_DEBUG("page_order: %d\n", page_order);
505 DRM_DEBUG("total: %d\n", total);
507 /* Make sure buffers are located in AGP memory that we own */
508 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
509 * memory. Safe to ignore for now because these ioctls are still
510 * root-only.
512 /*valid = 0;
513 for (agp_entry = dev->agp->memory; agp_entry;
514 agp_entry = agp_entry->next) {
515 if ((agp_offset >= agp_entry->bound) &&
516 (agp_offset + total * count <=
517 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
518 valid = 1;
519 break;
522 if (!valid) {
523 DRM_DEBUG("zone invalid\n");
524 return -EINVAL;
527 entry = &dma->bufs[order];
529 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
530 M_WAITOK | M_NULLOK | M_ZERO);
531 if (!entry->buflist) {
532 return -ENOMEM;
535 entry->buf_size = size;
536 entry->page_order = page_order;
538 offset = 0;
540 while (entry->buf_count < count) {
541 buf = &entry->buflist[entry->buf_count];
542 buf->idx = dma->buf_count + entry->buf_count;
543 buf->total = alignment;
544 buf->order = order;
545 buf->used = 0;
547 buf->offset = (dma->byte_count + offset);
548 buf->bus_address = agp_offset + offset;
549 buf->address = (void *)(agp_offset + offset);
550 buf->next = NULL;
551 buf->pending = 0;
552 buf->file_priv = NULL;
554 buf->dev_priv_size = dev->driver->dev_priv_size;
555 buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
556 M_WAITOK | M_NULLOK | M_ZERO);
557 if (buf->dev_private == NULL) {
558 /* Set count correctly so we free the proper amount. */
559 entry->buf_count = count;
560 drm_cleanup_buf_error(dev, entry);
561 return -ENOMEM;
564 offset += alignment;
565 entry->buf_count++;
566 byte_count += PAGE_SIZE << page_order;
569 DRM_DEBUG("byte_count: %d\n", byte_count);
571 temp_buflist = krealloc(dma->buflist,
572 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
573 M_DRM, M_WAITOK | M_NULLOK);
574 if (temp_buflist == NULL) {
575 /* Free the entry because it isn't valid */
576 drm_cleanup_buf_error(dev, entry);
577 return -ENOMEM;
579 dma->buflist = temp_buflist;
581 for (i = 0; i < entry->buf_count; i++) {
582 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
585 dma->buf_count += entry->buf_count;
586 dma->byte_count += byte_count;
588 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
589 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
591 request->count = entry->buf_count;
592 request->size = size;
594 dma->flags = _DRM_DMA_USE_AGP;
596 return 0;
599 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
601 struct drm_device_dma *dma = dev->dma;
602 int count;
603 int order;
604 int size;
605 int total;
606 int page_order;
607 struct drm_buf_entry *entry;
608 drm_dma_handle_t *dmah;
609 struct drm_buf *buf;
610 int alignment;
611 unsigned long offset;
612 int i;
613 int byte_count;
614 int page_count;
615 unsigned long *temp_pagelist;
616 struct drm_buf **temp_buflist;
618 count = request->count;
619 order = order_base_2(request->size);
620 size = 1 << order;
622 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
623 request->count, request->size, size, order);
625 alignment = (request->flags & _DRM_PAGE_ALIGN)
626 ? round_page(size) : size;
627 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
628 total = PAGE_SIZE << page_order;
630 entry = &dma->bufs[order];
632 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
633 M_WAITOK | M_NULLOK | M_ZERO);
634 entry->seglist = kmalloc(count * sizeof(*entry->seglist), M_DRM,
635 M_WAITOK | M_NULLOK | M_ZERO);
637 /* Keep the original pagelist until we know all the allocations
638 * have succeeded
640 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
641 sizeof(*dma->pagelist),
642 M_DRM, M_WAITOK | M_NULLOK);
644 if (entry->buflist == NULL || entry->seglist == NULL ||
645 temp_pagelist == NULL) {
646 kfree(temp_pagelist);
647 kfree(entry->seglist);
648 kfree(entry->buflist);
649 return -ENOMEM;
652 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
653 sizeof(*dma->pagelist));
655 DRM_DEBUG("pagelist: %d entries\n",
656 dma->page_count + (count << page_order));
658 entry->buf_size = size;
659 entry->page_order = page_order;
660 byte_count = 0;
661 page_count = 0;
663 while (entry->buf_count < count) {
664 spin_unlock(&dev->dma_lock);
665 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
666 spin_lock(&dev->dma_lock);
668 if (!dmah) {
669 /* Set count correctly so we free the proper amount. */
670 entry->buf_count = count;
671 entry->seg_count = count;
672 drm_cleanup_buf_error(dev, entry);
673 kfree(temp_pagelist);
674 return -ENOMEM;
676 entry->seglist[entry->seg_count++] = dmah;
677 for (i = 0; i < (1 << page_order); i++) {
678 DRM_DEBUG("page %d @ 0x%08lx\n",
679 dma->page_count + page_count,
680 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
681 temp_pagelist[dma->page_count + page_count++]
682 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
684 for (offset = 0;
685 offset + size <= total && entry->buf_count < count;
686 offset += alignment, ++entry->buf_count) {
687 buf = &entry->buflist[entry->buf_count];
688 buf->idx = dma->buf_count + entry->buf_count;
689 buf->total = alignment;
690 buf->order = order;
691 buf->used = 0;
692 buf->offset = (dma->byte_count + byte_count + offset);
693 buf->address = ((char *)dmah->vaddr + offset);
694 buf->bus_address = dmah->busaddr + offset;
695 buf->next = NULL;
696 buf->pending = 0;
697 buf->file_priv = NULL;
699 buf->dev_priv_size = dev->driver->dev_priv_size;
700 buf->dev_private = kmalloc(buf->dev_priv_size,
701 M_DRM,
702 M_WAITOK | M_NULLOK |
703 M_ZERO);
704 if (buf->dev_private == NULL) {
705 /* Set count correctly so we free the proper amount. */
706 entry->buf_count = count;
707 entry->seg_count = count;
708 drm_cleanup_buf_error(dev, entry);
709 kfree(temp_pagelist);
710 return -ENOMEM;
713 DRM_DEBUG("buffer %d @ %p\n",
714 entry->buf_count, buf->address);
716 byte_count += PAGE_SIZE << page_order;
719 temp_buflist = krealloc(dma->buflist,
720 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
721 M_DRM, M_WAITOK | M_NULLOK);
722 if (temp_buflist == NULL) {
723 /* Free the entry because it isn't valid */
724 drm_cleanup_buf_error(dev, entry);
725 kfree(temp_pagelist);
726 return -ENOMEM;
728 dma->buflist = temp_buflist;
730 for (i = 0; i < entry->buf_count; i++) {
731 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
734 /* No allocations failed, so now we can replace the original pagelist
735 * with the new one.
737 kfree(dma->pagelist);
738 dma->pagelist = temp_pagelist;
740 dma->buf_count += entry->buf_count;
741 dma->seg_count += entry->seg_count;
742 dma->page_count += entry->seg_count << page_order;
743 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
745 request->count = entry->buf_count;
746 request->size = size;
748 return 0;
752 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
754 struct drm_device_dma *dma = dev->dma;
755 struct drm_buf_entry *entry;
756 struct drm_buf *buf;
757 unsigned long offset;
758 unsigned long agp_offset;
759 int count;
760 int order;
761 int size;
762 int alignment;
763 int page_order;
764 int total;
765 int byte_count;
766 int i;
767 struct drm_buf **temp_buflist;
769 count = request->count;
770 order = order_base_2(request->size);
771 size = 1 << order;
773 alignment = (request->flags & _DRM_PAGE_ALIGN)
774 ? round_page(size) : size;
775 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
776 total = PAGE_SIZE << page_order;
778 byte_count = 0;
779 agp_offset = request->agp_start;
781 DRM_DEBUG("count: %d\n", count);
782 DRM_DEBUG("order: %d\n", order);
783 DRM_DEBUG("size: %d\n", size);
784 DRM_DEBUG("agp_offset: %ld\n", agp_offset);
785 DRM_DEBUG("alignment: %d\n", alignment);
786 DRM_DEBUG("page_order: %d\n", page_order);
787 DRM_DEBUG("total: %d\n", total);
789 entry = &dma->bufs[order];
791 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
792 M_WAITOK | M_NULLOK | M_ZERO);
793 if (entry->buflist == NULL)
794 return -ENOMEM;
796 entry->buf_size = size;
797 entry->page_order = page_order;
799 offset = 0;
801 while (entry->buf_count < count) {
802 buf = &entry->buflist[entry->buf_count];
803 buf->idx = dma->buf_count + entry->buf_count;
804 buf->total = alignment;
805 buf->order = order;
806 buf->used = 0;
808 buf->offset = (dma->byte_count + offset);
809 buf->bus_address = agp_offset + offset;
810 buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
811 buf->next = NULL;
812 buf->pending = 0;
813 buf->file_priv = NULL;
815 buf->dev_priv_size = dev->driver->dev_priv_size;
816 buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
817 M_WAITOK | M_NULLOK | M_ZERO);
818 if (buf->dev_private == NULL) {
819 /* Set count correctly so we free the proper amount. */
820 entry->buf_count = count;
821 drm_cleanup_buf_error(dev, entry);
822 return -ENOMEM;
825 DRM_DEBUG("buffer %d @ %p\n",
826 entry->buf_count, buf->address);
828 offset += alignment;
829 entry->buf_count++;
830 byte_count += PAGE_SIZE << page_order;
833 DRM_DEBUG("byte_count: %d\n", byte_count);
835 temp_buflist = krealloc(dma->buflist,
836 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
837 M_DRM, M_WAITOK | M_NULLOK);
838 if (temp_buflist == NULL) {
839 /* Free the entry because it isn't valid */
840 drm_cleanup_buf_error(dev, entry);
841 return -ENOMEM;
843 dma->buflist = temp_buflist;
845 for (i = 0; i < entry->buf_count; i++) {
846 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
849 dma->buf_count += entry->buf_count;
850 dma->byte_count += byte_count;
852 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
853 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
855 request->count = entry->buf_count;
856 request->size = size;
858 dma->flags = _DRM_DMA_USE_SG;
860 return 0;
864 * Add AGP buffers for DMA transfers.
866 * \param dev struct drm_device to which the buffers are to be added.
867 * \param request pointer to a struct drm_buf_desc describing the request.
868 * \return zero on success or a negative number on failure.
870 * After some sanity checks creates a drm_buf structure for each buffer and
871 * reallocates the buffer list of the same size order to accommodate the new
872 * buffers.
874 int drm_legacy_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
876 int order, ret;
878 if (request->count < 0 || request->count > 4096)
879 return -EINVAL;
881 order = order_base_2(request->size);
882 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
883 return -EINVAL;
885 spin_lock(&dev->dma_lock);
887 /* No more allocations after first buffer-using ioctl. */
888 if (dev->buf_use != 0) {
889 spin_unlock(&dev->dma_lock);
890 return -EBUSY;
892 /* No more than one allocation per order */
893 if (dev->dma->bufs[order].buf_count != 0) {
894 spin_unlock(&dev->dma_lock);
895 return -ENOMEM;
898 ret = drm_do_addbufs_agp(dev, request);
900 spin_unlock(&dev->dma_lock);
902 return ret;
905 static int drm_legacy_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
907 int order, ret;
909 if (!capable(CAP_SYS_ADMIN))
910 return -EACCES;
912 if (request->count < 0 || request->count > 4096)
913 return -EINVAL;
915 order = order_base_2(request->size);
916 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
917 return -EINVAL;
919 spin_lock(&dev->dma_lock);
921 /* No more allocations after first buffer-using ioctl. */
922 if (dev->buf_use != 0) {
923 spin_unlock(&dev->dma_lock);
924 return -EBUSY;
926 /* No more than one allocation per order */
927 if (dev->dma->bufs[order].buf_count != 0) {
928 spin_unlock(&dev->dma_lock);
929 return -ENOMEM;
932 ret = drm_do_addbufs_sg(dev, request);
934 spin_unlock(&dev->dma_lock);
936 return ret;
939 int drm_legacy_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
941 int order, ret;
943 if (!capable(CAP_SYS_ADMIN))
944 return -EACCES;
946 if (request->count < 0 || request->count > 4096)
947 return -EINVAL;
949 order = order_base_2(request->size);
950 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
951 return -EINVAL;
953 spin_lock(&dev->dma_lock);
955 /* No more allocations after first buffer-using ioctl. */
956 if (dev->buf_use != 0) {
957 spin_unlock(&dev->dma_lock);
958 return -EBUSY;
960 /* No more than one allocation per order */
961 if (dev->dma->bufs[order].buf_count != 0) {
962 spin_unlock(&dev->dma_lock);
963 return -ENOMEM;
966 ret = drm_do_addbufs_pci(dev, request);
968 spin_unlock(&dev->dma_lock);
970 return ret;
974 * Add buffers for DMA transfers (ioctl).
976 * \param inode device inode.
977 * \param file_priv DRM file private.
978 * \param cmd command.
979 * \param arg pointer to a struct drm_buf_desc request.
980 * \return zero on success or a negative number on failure.
982 * According with the memory type specified in drm_buf_desc::flags and the
983 * build options, it dispatches the call either to addbufs_agp(),
984 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
985 * PCI memory respectively.
987 int drm_legacy_addbufs(struct drm_device *dev, void *data,
988 struct drm_file *file_priv)
990 struct drm_buf_desc *request = data;
991 int err;
993 if (request->flags & _DRM_AGP_BUFFER)
994 err = drm_legacy_addbufs_agp(dev, request);
995 else if (request->flags & _DRM_SG_BUFFER)
996 err = drm_legacy_addbufs_sg(dev, request);
997 else
998 err = drm_legacy_addbufs_pci(dev, request);
1000 return err;
1004 * Get information about the buffer mappings.
1006 * This was originally mean for debugging purposes, or by a sophisticated
1007 * client library to determine how best to use the available buffers (e.g.,
1008 * large buffers can be used for image transfer).
1010 * \param inode device inode.
1011 * \param file_priv DRM file private.
1012 * \param cmd command.
1013 * \param arg pointer to a drm_buf_info structure.
1014 * \return zero on success or a negative number on failure.
1016 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1017 * lock, preventing of allocating more buffers after this call. Information
1018 * about each requested buffer is then copied into user space.
1020 int drm_legacy_infobufs(struct drm_device *dev, void *data,
1021 struct drm_file *file_priv)
1023 struct drm_device_dma *dma = dev->dma;
1024 struct drm_buf_info *request = data;
1025 int i;
1026 int count;
1028 if (drm_core_check_feature(dev, DRIVER_MODESET))
1029 return -EINVAL;
1031 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1032 return -EINVAL;
1034 if (!dma)
1035 return -EINVAL;
1037 spin_lock(&dev->buf_lock);
1038 if (atomic_read(&dev->buf_alloc)) {
1039 spin_unlock(&dev->buf_lock);
1040 return -EBUSY;
1042 ++dev->buf_use; /* Can't allocate more after this call */
1043 spin_unlock(&dev->buf_lock);
1045 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1046 if (dma->bufs[i].buf_count)
1047 ++count;
1050 DRM_DEBUG("count = %d\n", count);
1052 if (request->count >= count) {
1053 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1054 if (dma->bufs[i].buf_count) {
1055 struct drm_buf_desc __user *to =
1056 &request->list[count];
1057 struct drm_buf_entry *from = &dma->bufs[i];
1058 if (copy_to_user(&to->count,
1059 &from->buf_count,
1060 sizeof(from->buf_count)) ||
1061 copy_to_user(&to->size,
1062 &from->buf_size,
1063 sizeof(from->buf_size)) ||
1064 copy_to_user(&to->low_mark,
1065 &from->low_mark,
1066 sizeof(from->low_mark)) ||
1067 copy_to_user(&to->high_mark,
1068 &from->high_mark,
1069 sizeof(from->high_mark)))
1070 return -EFAULT;
1072 DRM_DEBUG("%d %d %d %d %d\n",
1074 dma->bufs[i].buf_count,
1075 dma->bufs[i].buf_size,
1076 dma->bufs[i].low_mark,
1077 dma->bufs[i].high_mark);
1078 ++count;
1082 request->count = count;
1084 return 0;
1088 * Specifies a low and high water mark for buffer allocation
1090 * \param inode device inode.
1091 * \param file_priv DRM file private.
1092 * \param cmd command.
1093 * \param arg a pointer to a drm_buf_desc structure.
1094 * \return zero on success or a negative number on failure.
1096 * Verifies that the size order is bounded between the admissible orders and
1097 * updates the respective drm_device_dma::bufs entry low and high water mark.
1099 * \note This ioctl is deprecated and mostly never used.
1101 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1102 struct drm_file *file_priv)
1104 struct drm_device_dma *dma = dev->dma;
1105 struct drm_buf_desc *request = data;
1106 int order;
1107 struct drm_buf_entry *entry;
1109 if (drm_core_check_feature(dev, DRIVER_MODESET))
1110 return -EINVAL;
1112 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1113 return -EINVAL;
1115 if (!dma)
1116 return -EINVAL;
1118 DRM_DEBUG("%d, %d, %d\n",
1119 request->size, request->low_mark, request->high_mark);
1120 order = order_base_2(request->size);
1121 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1122 return -EINVAL;
1123 entry = &dma->bufs[order];
1125 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1126 return -EINVAL;
1127 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1128 return -EINVAL;
1130 entry->low_mark = request->low_mark;
1131 entry->high_mark = request->high_mark;
1133 return 0;
1137 * Unreserve the buffers in list, previously reserved using drmDMA.
1139 * \param inode device inode.
1140 * \param file_priv DRM file private.
1141 * \param cmd command.
1142 * \param arg pointer to a drm_buf_free structure.
1143 * \return zero on success or a negative number on failure.
1145 * Calls free_buffer() for each used buffer.
1146 * This function is primarily used for debugging.
1148 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1149 struct drm_file *file_priv)
1151 struct drm_device_dma *dma = dev->dma;
1152 struct drm_buf_free *request = data;
1153 int i;
1154 int idx;
1155 struct drm_buf *buf;
1156 int retcode = 0;
1158 DRM_DEBUG("%d\n", request->count);
1160 spin_lock(&dev->dma_lock);
1161 for (i = 0; i < request->count; i++) {
1162 if (copy_from_user(&idx, &request->list[i], sizeof(idx))) {
1163 retcode = -EFAULT;
1164 break;
1166 if (idx < 0 || idx >= dma->buf_count) {
1167 DRM_ERROR("Index %d (of %d max)\n",
1168 idx, dma->buf_count - 1);
1169 retcode = -EINVAL;
1170 break;
1172 buf = dma->buflist[idx];
1173 if (buf->file_priv != file_priv) {
1174 DRM_ERROR("Process %d freeing buffer not owned\n",
1175 DRM_CURRENTPID);
1176 retcode = -EINVAL;
1177 break;
1179 drm_legacy_free_buffer(dev, buf);
1181 spin_unlock(&dev->dma_lock);
1183 return retcode;
1187 * Maps all of the DMA buffers into client-virtual space (ioctl).
1189 * \param inode device inode.
1190 * \param file_priv DRM file private.
1191 * \param cmd command.
1192 * \param arg pointer to a drm_buf_map structure.
1193 * \return zero on success or a negative number on failure.
1195 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1196 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1197 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1198 * drm_mmap_dma().
1200 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1201 struct drm_file *file_priv)
1203 struct drm_device_dma *dma = dev->dma;
1204 int retcode = 0;
1205 const int zero = 0;
1206 vm_offset_t address;
1207 struct vmspace *vms;
1208 vm_ooffset_t foff;
1209 vm_size_t size;
1210 vm_offset_t vaddr;
1211 struct drm_buf_map *request = data;
1212 int i;
1214 vms = DRM_CURPROC->td_proc->p_vmspace;
1216 spin_lock(&dev->dma_lock);
1217 dev->buf_use++; /* Can't allocate more after this call */
1218 spin_unlock(&dev->dma_lock);
1220 if (request->count < dma->buf_count)
1221 goto done;
1223 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
1224 (drm_core_check_feature(dev, DRIVER_SG) &&
1225 (dma->flags & _DRM_DMA_USE_SG))) {
1226 drm_local_map_t *map = dev->agp_buffer_map;
1228 if (map == NULL) {
1229 retcode = -EINVAL;
1230 goto done;
1232 size = round_page(map->size);
1233 foff = (unsigned long)map->handle;
1234 } else {
1235 size = round_page(dma->byte_count),
1236 foff = 0;
1239 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1240 retcode = -vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1241 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1242 SLIST_FIRST(&dev->devnode->si_hlist), foff);
1243 if (retcode)
1244 goto done;
1246 request->virtual = (void *)vaddr;
1248 for (i = 0; i < dma->buf_count; i++) {
1249 if (copy_to_user(&request->list[i].idx,
1250 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1251 retcode = -EFAULT;
1252 goto done;
1254 if (copy_to_user(&request->list[i].total,
1255 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1256 retcode = -EFAULT;
1257 goto done;
1259 if (copy_to_user(&request->list[i].used, &zero,
1260 sizeof(zero))) {
1261 retcode = -EFAULT;
1262 goto done;
1264 address = vaddr + dma->buflist[i]->offset; /* *** */
1265 if (copy_to_user(&request->list[i].address, &address,
1266 sizeof(address))) {
1267 retcode = -EFAULT;
1268 goto done;
1271 done:
1272 request->count = dma->buf_count;
1273 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1275 return retcode;
1278 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1279 struct drm_file *file_priv)
1281 if (drm_core_check_feature(dev, DRIVER_MODESET))
1282 return -EINVAL;
1284 if (dev->driver->dma_ioctl)
1285 return dev->driver->dma_ioctl(dev, data, file_priv);
1286 else
1287 return -EINVAL;
1290 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1292 struct drm_map_list *entry;
1294 list_for_each_entry(entry, &dev->maplist, head) {
1295 if (entry->map && entry->map->type == _DRM_SHM &&
1296 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1297 return entry->map;
1300 return NULL;
1302 EXPORT_SYMBOL(drm_legacy_getsarea);