drm/i915: Update to Linux 3.17
[dragonfly.git] / sys / dev / drm / drm_bufs.c
blob02b2b350b9e5d772239f8ff52c68fafc6def149c
1 /**
2 * \file drm_bufs.c
3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
9 /*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
35 * $FreeBSD: src/sys/dev/drm2/drm_bufs.c,v 1.1 2012/05/22 11:07:44 kib Exp $
38 #include <sys/conf.h>
39 #include <bus/pci/pcireg.h>
40 #include <linux/types.h>
41 #include <linux/export.h>
42 #include <drm/drmP.h>
44 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
45 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
46 * address for accessing them. Cleaned up at unload.
48 static int drm_alloc_resource(struct drm_device *dev, int resource)
50 struct resource *res;
51 int rid;
53 DRM_LOCK_ASSERT(dev);
55 if (resource >= DRM_MAX_PCI_RESOURCE) {
56 DRM_ERROR("Resource %d too large\n", resource);
57 return 1;
60 if (dev->pcir[resource] != NULL) {
61 return 0;
64 DRM_UNLOCK(dev);
65 rid = PCIR_BAR(resource);
66 res = bus_alloc_resource_any(dev->dev, SYS_RES_MEMORY, &rid,
67 RF_SHAREABLE);
68 DRM_LOCK(dev);
69 if (res == NULL) {
70 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
71 return 1;
74 if (dev->pcir[resource] == NULL) {
75 dev->pcirid[resource] = rid;
76 dev->pcir[resource] = res;
79 return 0;
82 unsigned long drm_get_resource_start(struct drm_device *dev,
83 unsigned int resource)
85 if (drm_alloc_resource(dev, resource) != 0)
86 return 0;
88 return rman_get_start(dev->pcir[resource]);
91 unsigned long drm_get_resource_len(struct drm_device *dev,
92 unsigned int resource)
94 if (drm_alloc_resource(dev, resource) != 0)
95 return 0;
97 return rman_get_size(dev->pcir[resource]);
100 int drm_addmap(struct drm_device * dev, resource_size_t offset,
101 unsigned int size, enum drm_map_type type,
102 enum drm_map_flags flags, struct drm_local_map ** map_ptr)
104 struct drm_local_map *map;
105 struct drm_map_list *entry = NULL;
106 drm_dma_handle_t *dmah;
108 /* Allocate a new map structure, fill it in, and do any type-specific
109 * initialization necessary.
111 map = kmalloc(sizeof(*map), M_DRM, M_ZERO | M_WAITOK | M_NULLOK);
112 if (!map) {
113 return ENOMEM;
116 map->offset = offset;
117 map->size = size;
118 map->type = type;
119 map->flags = flags;
121 /* Only allow shared memory to be removable since we only keep enough
122 * book keeping information about shared memory to allow for removal
123 * when processes fork.
125 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
126 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
127 drm_free(map, M_DRM);
128 return EINVAL;
130 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
131 DRM_ERROR("offset/size not page aligned: 0x%jx/0x%04x\n",
132 (uintmax_t)offset, size);
133 drm_free(map, M_DRM);
134 return EINVAL;
136 if (offset + size < offset) {
137 DRM_ERROR("offset and size wrap around: 0x%jx/0x%04x\n",
138 (uintmax_t)offset, size);
139 drm_free(map, M_DRM);
140 return EINVAL;
143 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
144 (unsigned long long)map->offset, map->size, map->type);
146 /* Check if this is just another version of a kernel-allocated map, and
147 * just hand that back if so.
149 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
150 type == _DRM_SHM) {
151 list_for_each_entry(entry, &dev->maplist, head) {
152 if (entry->map->type == type && (entry->map->offset == offset ||
153 (entry->map->type == _DRM_SHM &&
154 entry->map->flags == _DRM_CONTAINS_LOCK))) {
155 entry->map->size = size;
156 DRM_DEBUG("Found kernel map %d\n", type);
157 goto done;
162 switch (map->type) {
163 case _DRM_REGISTERS:
164 map->handle = drm_ioremap(dev, map);
165 if (!(map->flags & _DRM_WRITE_COMBINING))
166 break;
167 /* FALLTHROUGH */
168 case _DRM_FRAME_BUFFER:
169 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
170 map->mtrr = 1;
171 break;
172 case _DRM_SHM:
173 map->handle = kmalloc(map->size, M_DRM, M_WAITOK | M_NULLOK);
174 DRM_DEBUG("%lu %d %p\n",
175 map->size, order_base_2(map->size), map->handle);
176 if (!map->handle) {
177 drm_free(map, M_DRM);
178 return ENOMEM;
180 map->offset = (unsigned long)map->handle;
181 if (map->flags & _DRM_CONTAINS_LOCK) {
182 /* Prevent a 2nd X Server from creating a 2nd lock */
183 DRM_LOCK(dev);
184 if (dev->lock.hw_lock != NULL) {
185 DRM_UNLOCK(dev);
186 drm_free(map->handle, M_DRM);
187 drm_free(map, M_DRM);
188 return EBUSY;
190 dev->lock.hw_lock = map->handle; /* Pointer to lock */
191 DRM_UNLOCK(dev);
193 break;
194 case _DRM_AGP:
195 /*valid = 0;*/
196 /* In some cases (i810 driver), user space may have already
197 * added the AGP base itself, because dev->agp->base previously
198 * only got set during AGP enable. So, only add the base
199 * address if the map's offset isn't already within the
200 * aperture.
202 if (map->offset < dev->agp->base ||
203 map->offset > dev->agp->base +
204 dev->agp->agp_info.ai_aperture_size - 1) {
205 map->offset += dev->agp->base;
207 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
208 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
209 if ((map->offset >= entry->bound) &&
210 (map->offset + map->size <=
211 entry->bound + entry->pages * PAGE_SIZE)) {
212 valid = 1;
213 break;
216 if (!valid) {
217 drm_free(map, M_DRM);
218 return EACCES;
220 break;
221 case _DRM_SCATTER_GATHER:
222 if (!dev->sg) {
223 drm_free(map, M_DRM);
224 return EINVAL;
226 map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset);
227 map->offset = dev->sg->vaddr + offset;
228 break;
229 case _DRM_CONSISTENT:
230 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
231 * As we're limiting the address to 2^32-1 (or less),
232 * casting it down to 32 bits is no problem, but we
233 * need to point to a 64bit variable first. */
234 dmah = drm_pci_alloc(dev, map->size, map->size);
235 if (!dmah) {
236 kfree(map);
237 return -ENOMEM;
239 map->handle = dmah->vaddr;
240 map->offset = dmah->busaddr;
241 break;
242 default:
243 DRM_ERROR("Bad map type %d\n", map->type);
244 drm_free(map, M_DRM);
245 return EINVAL;
248 list_add(&entry->head, &dev->maplist);
250 done:
251 /* Jumped to, with lock held, when a kernel map is found. */
253 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
254 map->size);
256 *map_ptr = map;
258 return 0;
262 * Ioctl to specify a range of memory that is available for mapping by a
263 * non-root process.
265 * \param inode device inode.
266 * \param file_priv DRM file private.
267 * \param cmd command.
268 * \param arg pointer to a drm_map structure.
269 * \return zero on success or a negative value on error.
272 int drm_addmap_ioctl(struct drm_device *dev, void *data,
273 struct drm_file *file_priv)
275 struct drm_map *request = data;
276 drm_local_map_t *map;
277 int err;
279 if (!(dev->flags & (FREAD|FWRITE)))
280 return EACCES; /* Require read/write */
282 if (!capable(CAP_SYS_ADMIN) && request->type != _DRM_AGP)
283 return EACCES;
285 DRM_LOCK(dev);
286 err = drm_addmap(dev, request->offset, request->size, request->type,
287 request->flags, &map);
288 DRM_UNLOCK(dev);
289 if (err != 0)
290 return err;
292 request->offset = map->offset;
293 request->size = map->size;
294 request->type = map->type;
295 request->flags = map->flags;
296 request->mtrr = map->mtrr;
297 request->handle = (void *)map->handle;
299 return 0;
302 void drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
304 struct drm_map_list *r_list = NULL, *list_t;
305 drm_dma_handle_t dmah;
306 int found = 0;
308 DRM_LOCK_ASSERT(dev);
310 if (map == NULL)
311 return;
313 /* Find the list entry for the map and remove it */
314 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
315 if (r_list->map == map) {
316 list_del(&r_list->head);
317 drm_free(r_list, M_DRM);
318 found = 1;
319 break;
323 if (!found)
324 return;
326 switch (map->type) {
327 case _DRM_REGISTERS:
328 drm_ioremapfree(map);
329 /* FALLTHROUGH */
330 case _DRM_FRAME_BUFFER:
331 if (map->mtrr) {
332 int __unused retcode;
334 retcode = drm_mtrr_del(0, map->offset, map->size,
335 DRM_MTRR_WC);
336 DRM_DEBUG("mtrr_del = %d\n", retcode);
338 break;
339 case _DRM_SHM:
340 drm_free(map->handle, M_DRM);
341 break;
342 case _DRM_AGP:
343 case _DRM_SCATTER_GATHER:
344 break;
345 case _DRM_CONSISTENT:
346 dmah.vaddr = map->handle;
347 dmah.busaddr = map->offset;
348 drm_pci_free(dev, &dmah);
349 break;
350 default:
351 DRM_ERROR("Bad map type %d\n", map->type);
352 break;
355 drm_free(map, M_DRM);
358 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
359 * the last close of the device, and this is necessary for cleanup when things
360 * exit uncleanly. Therefore, having userland manually remove mappings seems
361 * like a pointless exercise since they're going away anyway.
363 * One use case might be after addmap is allowed for normal users for SHM and
364 * gets used by drivers that the server doesn't need to care about. This seems
365 * unlikely.
367 * \param inode device inode.
368 * \param file_priv DRM file private.
369 * \param cmd command.
370 * \param arg pointer to a struct drm_map structure.
371 * \return zero on success or a negative value on error.
373 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
374 struct drm_file *file_priv)
376 struct drm_map *request = data;
377 struct drm_local_map *map = NULL;
378 struct drm_map_list *r_list;
380 DRM_LOCK(dev);
381 list_for_each_entry(r_list, &dev->maplist, head) {
382 if (r_list->map &&
383 r_list->user_token == (unsigned long)request->handle &&
384 r_list->map->flags & _DRM_REMOVABLE) {
385 map = r_list->map;
386 break;
390 /* List has wrapped around to the head pointer, or its empty we didn't
391 * find anything.
393 if (list_empty(&dev->maplist) || !map) {
394 DRM_UNLOCK(dev);
395 return -EINVAL;
398 /* Register and framebuffer maps are permanent */
399 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
400 DRM_UNLOCK(dev);
401 return 0;
404 drm_rmmap(dev, map);
406 DRM_UNLOCK(dev);
408 return 0;
412 * Cleanup after an error on one of the addbufs() functions.
414 * \param dev DRM device.
415 * \param entry buffer entry where the error occurred.
417 * Frees any pages and buffers associated with the given entry.
419 static void drm_cleanup_buf_error(struct drm_device * dev,
420 struct drm_buf_entry * entry)
422 int i;
424 if (entry->seg_count) {
425 for (i = 0; i < entry->seg_count; i++) {
426 drm_pci_free(dev, entry->seglist[i]);
428 drm_free(entry->seglist, M_DRM);
430 entry->seg_count = 0;
433 if (entry->buf_count) {
434 for (i = 0; i < entry->buf_count; i++) {
435 drm_free(entry->buflist[i].dev_private, M_DRM);
437 drm_free(entry->buflist, M_DRM);
439 entry->buf_count = 0;
443 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
445 drm_device_dma_t *dma = dev->dma;
446 drm_buf_entry_t *entry;
447 /*drm_agp_mem_t *agp_entry;
448 int valid*/
449 drm_buf_t *buf;
450 unsigned long offset;
451 unsigned long agp_offset;
452 int count;
453 int order;
454 int size;
455 int alignment;
456 int page_order;
457 int total;
458 int byte_count;
459 int i;
460 drm_buf_t **temp_buflist;
462 count = request->count;
463 order = order_base_2(request->size);
464 size = 1 << order;
466 alignment = (request->flags & _DRM_PAGE_ALIGN)
467 ? round_page(size) : size;
468 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
469 total = PAGE_SIZE << page_order;
471 byte_count = 0;
472 agp_offset = dev->agp->base + request->agp_start;
474 DRM_DEBUG("count: %d\n", count);
475 DRM_DEBUG("order: %d\n", order);
476 DRM_DEBUG("size: %d\n", size);
477 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
478 DRM_DEBUG("alignment: %d\n", alignment);
479 DRM_DEBUG("page_order: %d\n", page_order);
480 DRM_DEBUG("total: %d\n", total);
482 /* Make sure buffers are located in AGP memory that we own */
483 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
484 * memory. Safe to ignore for now because these ioctls are still
485 * root-only.
487 /*valid = 0;
488 for (agp_entry = dev->agp->memory; agp_entry;
489 agp_entry = agp_entry->next) {
490 if ((agp_offset >= agp_entry->bound) &&
491 (agp_offset + total * count <=
492 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
493 valid = 1;
494 break;
497 if (!valid) {
498 DRM_DEBUG("zone invalid\n");
499 return EINVAL;
502 entry = &dma->bufs[order];
504 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
505 M_WAITOK | M_NULLOK | M_ZERO);
506 if (!entry->buflist) {
507 return ENOMEM;
510 entry->buf_size = size;
511 entry->page_order = page_order;
513 offset = 0;
515 while (entry->buf_count < count) {
516 buf = &entry->buflist[entry->buf_count];
517 buf->idx = dma->buf_count + entry->buf_count;
518 buf->total = alignment;
519 buf->order = order;
520 buf->used = 0;
522 buf->offset = (dma->byte_count + offset);
523 buf->bus_address = agp_offset + offset;
524 buf->address = (void *)(agp_offset + offset);
525 buf->next = NULL;
526 buf->pending = 0;
527 buf->file_priv = NULL;
529 buf->dev_priv_size = dev->driver->dev_priv_size;
530 buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
531 M_WAITOK | M_NULLOK | M_ZERO);
532 if (buf->dev_private == NULL) {
533 /* Set count correctly so we free the proper amount. */
534 entry->buf_count = count;
535 drm_cleanup_buf_error(dev, entry);
536 return ENOMEM;
539 offset += alignment;
540 entry->buf_count++;
541 byte_count += PAGE_SIZE << page_order;
544 DRM_DEBUG("byte_count: %d\n", byte_count);
546 temp_buflist = krealloc(dma->buflist,
547 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
548 M_DRM, M_WAITOK | M_NULLOK);
549 if (temp_buflist == NULL) {
550 /* Free the entry because it isn't valid */
551 drm_cleanup_buf_error(dev, entry);
552 return ENOMEM;
554 dma->buflist = temp_buflist;
556 for (i = 0; i < entry->buf_count; i++) {
557 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
560 dma->buf_count += entry->buf_count;
561 dma->byte_count += byte_count;
563 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
564 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
566 request->count = entry->buf_count;
567 request->size = size;
569 dma->flags = _DRM_DMA_USE_AGP;
571 return 0;
574 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
576 drm_device_dma_t *dma = dev->dma;
577 int count;
578 int order;
579 int size;
580 int total;
581 int page_order;
582 drm_buf_entry_t *entry;
583 drm_dma_handle_t *dmah;
584 drm_buf_t *buf;
585 int alignment;
586 unsigned long offset;
587 int i;
588 int byte_count;
589 int page_count;
590 unsigned long *temp_pagelist;
591 drm_buf_t **temp_buflist;
593 count = request->count;
594 order = order_base_2(request->size);
595 size = 1 << order;
597 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
598 request->count, request->size, size, order);
600 alignment = (request->flags & _DRM_PAGE_ALIGN)
601 ? round_page(size) : size;
602 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
603 total = PAGE_SIZE << page_order;
605 entry = &dma->bufs[order];
607 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
608 M_WAITOK | M_NULLOK | M_ZERO);
609 entry->seglist = kmalloc(count * sizeof(*entry->seglist), M_DRM,
610 M_WAITOK | M_NULLOK | M_ZERO);
612 /* Keep the original pagelist until we know all the allocations
613 * have succeeded
615 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
616 sizeof(*dma->pagelist),
617 M_DRM, M_WAITOK | M_NULLOK);
619 if (entry->buflist == NULL || entry->seglist == NULL ||
620 temp_pagelist == NULL) {
621 drm_free(temp_pagelist, M_DRM);
622 drm_free(entry->seglist, M_DRM);
623 drm_free(entry->buflist, M_DRM);
624 return ENOMEM;
627 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
628 sizeof(*dma->pagelist));
630 DRM_DEBUG("pagelist: %d entries\n",
631 dma->page_count + (count << page_order));
633 entry->buf_size = size;
634 entry->page_order = page_order;
635 byte_count = 0;
636 page_count = 0;
638 while (entry->buf_count < count) {
639 spin_unlock(&dev->dma_lock);
640 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
641 spin_lock(&dev->dma_lock);
643 if (!dmah) {
644 /* Set count correctly so we free the proper amount. */
645 entry->buf_count = count;
646 entry->seg_count = count;
647 drm_cleanup_buf_error(dev, entry);
648 drm_free(temp_pagelist, M_DRM);
649 return -ENOMEM;
652 entry->seglist[entry->seg_count++] = dmah;
653 for (i = 0; i < (1 << page_order); i++) {
654 DRM_DEBUG("page %d @ 0x%08lx\n",
655 dma->page_count + page_count,
656 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
657 temp_pagelist[dma->page_count + page_count++]
658 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
660 for (offset = 0;
661 offset + size <= total && entry->buf_count < count;
662 offset += alignment, ++entry->buf_count) {
663 buf = &entry->buflist[entry->buf_count];
664 buf->idx = dma->buf_count + entry->buf_count;
665 buf->total = alignment;
666 buf->order = order;
667 buf->used = 0;
668 buf->offset = (dma->byte_count + byte_count + offset);
669 buf->address = ((char *)dmah->vaddr + offset);
670 buf->bus_address = dmah->busaddr + offset;
671 buf->next = NULL;
672 buf->pending = 0;
673 buf->file_priv = NULL;
675 buf->dev_priv_size = dev->driver->dev_priv_size;
676 buf->dev_private = kmalloc(buf->dev_priv_size,
677 M_DRM,
678 M_WAITOK | M_NULLOK |
679 M_ZERO);
680 if (buf->dev_private == NULL) {
681 /* Set count correctly so we free the proper amount. */
682 entry->buf_count = count;
683 entry->seg_count = count;
684 drm_cleanup_buf_error(dev, entry);
685 drm_free(temp_pagelist, M_DRM);
686 return ENOMEM;
689 DRM_DEBUG("buffer %d @ %p\n",
690 entry->buf_count, buf->address);
692 byte_count += PAGE_SIZE << page_order;
695 temp_buflist = krealloc(dma->buflist,
696 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
697 M_DRM, M_WAITOK | M_NULLOK);
698 if (temp_buflist == NULL) {
699 /* Free the entry because it isn't valid */
700 drm_cleanup_buf_error(dev, entry);
701 drm_free(temp_pagelist, M_DRM);
702 return ENOMEM;
704 dma->buflist = temp_buflist;
706 for (i = 0; i < entry->buf_count; i++) {
707 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
710 /* No allocations failed, so now we can replace the orginal pagelist
711 * with the new one.
713 drm_free(dma->pagelist, M_DRM);
714 dma->pagelist = temp_pagelist;
716 dma->buf_count += entry->buf_count;
717 dma->seg_count += entry->seg_count;
718 dma->page_count += entry->seg_count << page_order;
719 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
721 request->count = entry->buf_count;
722 request->size = size;
724 return 0;
728 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
730 drm_device_dma_t *dma = dev->dma;
731 drm_buf_entry_t *entry;
732 drm_buf_t *buf;
733 unsigned long offset;
734 unsigned long agp_offset;
735 int count;
736 int order;
737 int size;
738 int alignment;
739 int page_order;
740 int total;
741 int byte_count;
742 int i;
743 drm_buf_t **temp_buflist;
745 count = request->count;
746 order = order_base_2(request->size);
747 size = 1 << order;
749 alignment = (request->flags & _DRM_PAGE_ALIGN)
750 ? round_page(size) : size;
751 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
752 total = PAGE_SIZE << page_order;
754 byte_count = 0;
755 agp_offset = request->agp_start;
757 DRM_DEBUG("count: %d\n", count);
758 DRM_DEBUG("order: %d\n", order);
759 DRM_DEBUG("size: %d\n", size);
760 DRM_DEBUG("agp_offset: %ld\n", agp_offset);
761 DRM_DEBUG("alignment: %d\n", alignment);
762 DRM_DEBUG("page_order: %d\n", page_order);
763 DRM_DEBUG("total: %d\n", total);
765 entry = &dma->bufs[order];
767 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
768 M_WAITOK | M_NULLOK | M_ZERO);
769 if (entry->buflist == NULL)
770 return ENOMEM;
772 entry->buf_size = size;
773 entry->page_order = page_order;
775 offset = 0;
777 while (entry->buf_count < count) {
778 buf = &entry->buflist[entry->buf_count];
779 buf->idx = dma->buf_count + entry->buf_count;
780 buf->total = alignment;
781 buf->order = order;
782 buf->used = 0;
784 buf->offset = (dma->byte_count + offset);
785 buf->bus_address = agp_offset + offset;
786 buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
787 buf->next = NULL;
788 buf->pending = 0;
789 buf->file_priv = NULL;
791 buf->dev_priv_size = dev->driver->dev_priv_size;
792 buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
793 M_WAITOK | M_NULLOK | M_ZERO);
794 if (buf->dev_private == NULL) {
795 /* Set count correctly so we free the proper amount. */
796 entry->buf_count = count;
797 drm_cleanup_buf_error(dev, entry);
798 return ENOMEM;
801 DRM_DEBUG("buffer %d @ %p\n",
802 entry->buf_count, buf->address);
804 offset += alignment;
805 entry->buf_count++;
806 byte_count += PAGE_SIZE << page_order;
809 DRM_DEBUG("byte_count: %d\n", byte_count);
811 temp_buflist = krealloc(dma->buflist,
812 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
813 M_DRM, M_WAITOK | M_NULLOK);
814 if (temp_buflist == NULL) {
815 /* Free the entry because it isn't valid */
816 drm_cleanup_buf_error(dev, entry);
817 return ENOMEM;
819 dma->buflist = temp_buflist;
821 for (i = 0; i < entry->buf_count; i++) {
822 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
825 dma->buf_count += entry->buf_count;
826 dma->byte_count += byte_count;
828 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
829 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
831 request->count = entry->buf_count;
832 request->size = size;
834 dma->flags = _DRM_DMA_USE_SG;
836 return 0;
840 * Add AGP buffers for DMA transfers.
842 * \param dev struct drm_device to which the buffers are to be added.
843 * \param request pointer to a struct drm_buf_desc describing the request.
844 * \return zero on success or a negative number on failure.
846 * After some sanity checks creates a drm_buf structure for each buffer and
847 * reallocates the buffer list of the same size order to accommodate the new
848 * buffers.
850 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
852 int order, ret;
854 if (request->count < 0 || request->count > 4096)
855 return EINVAL;
857 order = order_base_2(request->size);
858 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
859 return EINVAL;
861 spin_lock(&dev->dma_lock);
863 /* No more allocations after first buffer-using ioctl. */
864 if (dev->buf_use != 0) {
865 spin_unlock(&dev->dma_lock);
866 return EBUSY;
868 /* No more than one allocation per order */
869 if (dev->dma->bufs[order].buf_count != 0) {
870 spin_unlock(&dev->dma_lock);
871 return ENOMEM;
874 ret = drm_do_addbufs_agp(dev, request);
876 spin_unlock(&dev->dma_lock);
878 return ret;
881 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
883 int order, ret;
885 if (!capable(CAP_SYS_ADMIN))
886 return EACCES;
888 if (request->count < 0 || request->count > 4096)
889 return EINVAL;
891 order = order_base_2(request->size);
892 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
893 return EINVAL;
895 spin_lock(&dev->dma_lock);
897 /* No more allocations after first buffer-using ioctl. */
898 if (dev->buf_use != 0) {
899 spin_unlock(&dev->dma_lock);
900 return EBUSY;
902 /* No more than one allocation per order */
903 if (dev->dma->bufs[order].buf_count != 0) {
904 spin_unlock(&dev->dma_lock);
905 return ENOMEM;
908 ret = drm_do_addbufs_sg(dev, request);
910 spin_unlock(&dev->dma_lock);
912 return ret;
915 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
917 int order, ret;
919 if (!capable(CAP_SYS_ADMIN))
920 return EACCES;
922 if (request->count < 0 || request->count > 4096)
923 return EINVAL;
925 order = order_base_2(request->size);
926 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
927 return EINVAL;
929 spin_lock(&dev->dma_lock);
931 /* No more allocations after first buffer-using ioctl. */
932 if (dev->buf_use != 0) {
933 spin_unlock(&dev->dma_lock);
934 return EBUSY;
936 /* No more than one allocation per order */
937 if (dev->dma->bufs[order].buf_count != 0) {
938 spin_unlock(&dev->dma_lock);
939 return ENOMEM;
942 ret = drm_do_addbufs_pci(dev, request);
944 spin_unlock(&dev->dma_lock);
946 return ret;
950 * Add buffers for DMA transfers (ioctl).
952 * \param inode device inode.
953 * \param file_priv DRM file private.
954 * \param cmd command.
955 * \param arg pointer to a struct drm_buf_desc request.
956 * \return zero on success or a negative number on failure.
958 * According with the memory type specified in drm_buf_desc::flags and the
959 * build options, it dispatches the call either to addbufs_agp(),
960 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
961 * PCI memory respectively.
963 int drm_addbufs(struct drm_device *dev, void *data,
964 struct drm_file *file_priv)
966 struct drm_buf_desc *request = data;
967 int err;
969 if (request->flags & _DRM_AGP_BUFFER)
970 err = drm_addbufs_agp(dev, request);
971 else if (request->flags & _DRM_SG_BUFFER)
972 err = drm_addbufs_sg(dev, request);
973 else
974 err = drm_addbufs_pci(dev, request);
976 return err;
980 * Get information about the buffer mappings.
982 * This was originally mean for debugging purposes, or by a sophisticated
983 * client library to determine how best to use the available buffers (e.g.,
984 * large buffers can be used for image transfer).
986 * \param inode device inode.
987 * \param file_priv DRM file private.
988 * \param cmd command.
989 * \param arg pointer to a drm_buf_info structure.
990 * \return zero on success or a negative number on failure.
992 * Increments drm_device::buf_use while holding the drm_device::buf_lock
993 * lock, preventing of allocating more buffers after this call. Information
994 * about each requested buffer is then copied into user space.
996 int drm_infobufs(struct drm_device *dev, void *data,
997 struct drm_file *file_priv)
999 struct drm_device_dma *dma = dev->dma;
1000 struct drm_buf_info *request = data;
1001 int i;
1002 int count;
1004 if (drm_core_check_feature(dev, DRIVER_MODESET))
1005 return -EINVAL;
1007 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1008 return -EINVAL;
1010 if (!dma)
1011 return -EINVAL;
1013 spin_lock(&dev->buf_lock);
1014 if (atomic_read(&dev->buf_alloc)) {
1015 spin_unlock(&dev->buf_lock);
1016 return -EBUSY;
1018 ++dev->buf_use; /* Can't allocate more after this call */
1019 spin_unlock(&dev->buf_lock);
1021 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1022 if (dma->bufs[i].buf_count)
1023 ++count;
1026 DRM_DEBUG("count = %d\n", count);
1028 if (request->count >= count) {
1029 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1030 if (dma->bufs[i].buf_count) {
1031 struct drm_buf_desc __user *to =
1032 &request->list[count];
1033 struct drm_buf_entry *from = &dma->bufs[i];
1034 if (copy_to_user(&to->count,
1035 &from->buf_count,
1036 sizeof(from->buf_count)) ||
1037 copy_to_user(&to->size,
1038 &from->buf_size,
1039 sizeof(from->buf_size)) ||
1040 copy_to_user(&to->low_mark,
1041 &from->low_mark,
1042 sizeof(from->low_mark)) ||
1043 copy_to_user(&to->high_mark,
1044 &from->high_mark,
1045 sizeof(from->high_mark)))
1046 return -EFAULT;
1048 DRM_DEBUG("%d %d %d %d %d\n",
1050 dma->bufs[i].buf_count,
1051 dma->bufs[i].buf_size,
1052 dma->bufs[i].low_mark,
1053 dma->bufs[i].high_mark);
1054 ++count;
1058 request->count = count;
1060 return 0;
1064 * Specifies a low and high water mark for buffer allocation
1066 * \param inode device inode.
1067 * \param file_priv DRM file private.
1068 * \param cmd command.
1069 * \param arg a pointer to a drm_buf_desc structure.
1070 * \return zero on success or a negative number on failure.
1072 * Verifies that the size order is bounded between the admissible orders and
1073 * updates the respective drm_device_dma::bufs entry low and high water mark.
1075 * \note This ioctl is deprecated and mostly never used.
1077 int drm_markbufs(struct drm_device *dev, void *data,
1078 struct drm_file *file_priv)
1080 struct drm_device_dma *dma = dev->dma;
1081 struct drm_buf_desc *request = data;
1082 int order;
1083 struct drm_buf_entry *entry;
1085 if (drm_core_check_feature(dev, DRIVER_MODESET))
1086 return -EINVAL;
1088 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1089 return -EINVAL;
1091 if (!dma)
1092 return -EINVAL;
1094 DRM_DEBUG("%d, %d, %d\n",
1095 request->size, request->low_mark, request->high_mark);
1096 order = order_base_2(request->size);
1097 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1098 return -EINVAL;
1099 entry = &dma->bufs[order];
1101 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1102 return -EINVAL;
1103 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1104 return -EINVAL;
1106 entry->low_mark = request->low_mark;
1107 entry->high_mark = request->high_mark;
1109 return 0;
1113 * Unreserve the buffers in list, previously reserved using drmDMA.
1115 * \param inode device inode.
1116 * \param file_priv DRM file private.
1117 * \param cmd command.
1118 * \param arg pointer to a drm_buf_free structure.
1119 * \return zero on success or a negative number on failure.
1121 * Calls free_buffer() for each used buffer.
1122 * This function is primarily used for debugging.
1124 int drm_freebufs(struct drm_device *dev, void *data,
1125 struct drm_file *file_priv)
1127 drm_device_dma_t *dma = dev->dma;
1128 struct drm_buf_free *request = data;
1129 int i;
1130 int idx;
1131 drm_buf_t *buf;
1132 int retcode = 0;
1134 DRM_DEBUG("%d\n", request->count);
1136 spin_lock(&dev->dma_lock);
1137 for (i = 0; i < request->count; i++) {
1138 if (copy_from_user(&idx, &request->list[i], sizeof(idx))) {
1139 retcode = EFAULT;
1140 break;
1142 if (idx < 0 || idx >= dma->buf_count) {
1143 DRM_ERROR("Index %d (of %d max)\n",
1144 idx, dma->buf_count - 1);
1145 retcode = EINVAL;
1146 break;
1148 buf = dma->buflist[idx];
1149 if (buf->file_priv != file_priv) {
1150 DRM_ERROR("Process %d freeing buffer not owned\n",
1151 DRM_CURRENTPID);
1152 retcode = EINVAL;
1153 break;
1155 drm_free_buffer(dev, buf);
1157 spin_unlock(&dev->dma_lock);
1159 return retcode;
1163 * Maps all of the DMA buffers into client-virtual space (ioctl).
1165 * \param inode device inode.
1166 * \param file_priv DRM file private.
1167 * \param cmd command.
1168 * \param arg pointer to a drm_buf_map structure.
1169 * \return zero on success or a negative number on failure.
1171 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1172 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1173 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1174 * drm_mmap_dma().
1176 int drm_mapbufs(struct drm_device *dev, void *data,
1177 struct drm_file *file_priv)
1179 drm_device_dma_t *dma = dev->dma;
1180 int retcode = 0;
1181 const int zero = 0;
1182 vm_offset_t address;
1183 struct vmspace *vms;
1184 vm_ooffset_t foff;
1185 vm_size_t size;
1186 vm_offset_t vaddr;
1187 struct drm_buf_map *request = data;
1188 int i;
1190 vms = DRM_CURPROC->td_proc->p_vmspace;
1192 spin_lock(&dev->dma_lock);
1193 dev->buf_use++; /* Can't allocate more after this call */
1194 spin_unlock(&dev->dma_lock);
1196 if (request->count < dma->buf_count)
1197 goto done;
1199 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1200 (drm_core_check_feature(dev, DRIVER_SG) &&
1201 (dma->flags & _DRM_DMA_USE_SG))) {
1202 drm_local_map_t *map = dev->agp_buffer_map;
1204 if (map == NULL) {
1205 retcode = EINVAL;
1206 goto done;
1208 size = round_page(map->size);
1209 foff = (unsigned long)map->handle;
1210 } else {
1211 size = round_page(dma->byte_count),
1212 foff = 0;
1215 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1216 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1217 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1218 SLIST_FIRST(&dev->devnode->si_hlist), foff);
1219 if (retcode)
1220 goto done;
1222 request->virtual = (void *)vaddr;
1224 for (i = 0; i < dma->buf_count; i++) {
1225 if (copy_to_user(&request->list[i].idx,
1226 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1227 retcode = EFAULT;
1228 goto done;
1230 if (copy_to_user(&request->list[i].total,
1231 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1232 retcode = EFAULT;
1233 goto done;
1235 if (copy_to_user(&request->list[i].used, &zero,
1236 sizeof(zero))) {
1237 retcode = EFAULT;
1238 goto done;
1240 address = vaddr + dma->buflist[i]->offset; /* *** */
1241 if (copy_to_user(&request->list[i].address, &address,
1242 sizeof(address))) {
1243 retcode = EFAULT;
1244 goto done;
1247 done:
1248 request->count = dma->buf_count;
1249 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1251 return retcode;