e1000 - Literally import e1000 driver from FreeBSD
[dragonfly.git] / sys / dev / drm / drm_bufs.c
blobb36dafea93f61c26efe16fe63a251f592ab20b80
1 /*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors:
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
31 /** @file drm_bufs.c
32 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
35 #include "bus/pci/pcireg.h"
37 #include "dev/drm/drmP.h"
39 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
40 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
41 * address for accessing them. Cleaned up at unload.
43 static int drm_alloc_resource(struct drm_device *dev, int resource)
45 if (resource >= DRM_MAX_PCI_RESOURCE) {
46 DRM_ERROR("Resource %d too large\n", resource);
47 return 1;
50 DRM_UNLOCK();
51 if (dev->pcir[resource] != NULL) {
52 DRM_LOCK();
53 return 0;
56 dev->pcirid[resource] = PCIR_BAR(resource);
57 dev->pcir[resource] = bus_alloc_resource_any(dev->device,
58 SYS_RES_MEMORY, &dev->pcirid[resource], RF_SHAREABLE);
59 DRM_LOCK();
61 if (dev->pcir[resource] == NULL) {
62 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
63 return 1;
66 return 0;
69 unsigned long drm_get_resource_start(struct drm_device *dev,
70 unsigned int resource)
72 if (drm_alloc_resource(dev, resource) != 0)
73 return 0;
75 return rman_get_start(dev->pcir[resource]);
78 unsigned long drm_get_resource_len(struct drm_device *dev,
79 unsigned int resource)
81 if (drm_alloc_resource(dev, resource) != 0)
82 return 0;
84 return rman_get_size(dev->pcir[resource]);
87 int drm_addmap(struct drm_device * dev, unsigned long offset,
88 unsigned long size,
89 enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
91 drm_local_map_t *map;
92 int align;
93 /*drm_agp_mem_t *entry;
94 int valid;*/
96 /* Only allow shared memory to be removable since we only keep enough
97 * book keeping information about shared memory to allow for removal
98 * when processes fork.
100 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
101 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
102 return EINVAL;
104 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
105 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
106 offset, size);
107 return EINVAL;
109 if (offset + size < offset) {
110 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
111 offset, size);
112 return EINVAL;
114 if (size == 0) {
115 DRM_ERROR("size is 0: 0x%lx/0x%lx\n", offset, size);
116 return EINVAL;
119 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
120 size, type);
122 /* Check if this is just another version of a kernel-allocated map, and
123 * just hand that back if so.
125 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
126 type == _DRM_SHM) {
127 TAILQ_FOREACH(map, &dev->maplist, link) {
128 if (map->type == type && (map->offset == offset ||
129 (map->type == _DRM_SHM &&
130 map->flags == _DRM_CONTAINS_LOCK))) {
131 map->size = size;
132 DRM_DEBUG("Found kernel map %d\n", type);
133 goto done;
137 DRM_UNLOCK();
139 /* Allocate a new map structure, fill it in, and do any type-specific
140 * initialization necessary.
142 map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
143 if (!map) {
144 DRM_LOCK();
145 return ENOMEM;
148 map->offset = offset;
149 map->size = size;
150 map->type = type;
151 map->flags = flags;
153 switch (map->type) {
154 case _DRM_REGISTERS:
155 map->handle = drm_ioremap(dev, map);
156 if (!(map->flags & _DRM_WRITE_COMBINING))
157 break;
158 /* FALLTHROUGH */
159 case _DRM_FRAME_BUFFER:
160 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
161 map->mtrr = 1;
162 break;
163 case _DRM_SHM:
164 map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
165 DRM_DEBUG("%lu %d %p\n",
166 map->size, drm_order(map->size), map->handle);
167 if (!map->handle) {
168 free(map, DRM_MEM_MAPS);
169 DRM_LOCK();
170 return ENOMEM;
172 map->offset = (unsigned long)map->handle;
173 if (map->flags & _DRM_CONTAINS_LOCK) {
174 /* Prevent a 2nd X Server from creating a 2nd lock */
175 DRM_LOCK();
176 if (dev->lock.hw_lock != NULL) {
177 DRM_UNLOCK();
178 free(map->handle, DRM_MEM_MAPS);
179 free(map, DRM_MEM_MAPS);
180 return EBUSY;
182 dev->lock.hw_lock = map->handle; /* Pointer to lock */
183 DRM_UNLOCK();
185 break;
186 case _DRM_AGP:
187 /*valid = 0;*/
188 /* In some cases (i810 driver), user space may have already
189 * added the AGP base itself, because dev->agp->base previously
190 * only got set during AGP enable. So, only add the base
191 * address if the map's offset isn't already within the
192 * aperture.
194 if (map->offset < dev->agp->base ||
195 map->offset > dev->agp->base +
196 dev->agp->info.ai_aperture_size - 1) {
197 map->offset += dev->agp->base;
199 map->mtrr = dev->agp->mtrr; /* for getmap */
200 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
201 if ((map->offset >= entry->bound) &&
202 (map->offset + map->size <=
203 entry->bound + entry->pages * PAGE_SIZE)) {
204 valid = 1;
205 break;
208 if (!valid) {
209 free(map, DRM_MEM_MAPS);
210 DRM_LOCK();
211 return EACCES;
213 break;
214 case _DRM_SCATTER_GATHER:
215 if (!dev->sg) {
216 free(map, DRM_MEM_MAPS);
217 DRM_LOCK();
218 return EINVAL;
220 map->offset += dev->sg->handle;
221 break;
222 case _DRM_CONSISTENT:
223 /* Unfortunately, we don't get any alignment specification from
224 * the caller, so we have to guess. drm_pci_alloc requires
225 * a power-of-two alignment, so try to align the bus address of
226 * the map to it size if possible, otherwise just assume
227 * PAGE_SIZE alignment.
229 align = map->size;
230 if ((align & (align - 1)) != 0)
231 align = PAGE_SIZE;
232 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
233 if (map->dmah == NULL) {
234 free(map, DRM_MEM_MAPS);
235 DRM_LOCK();
236 return ENOMEM;
238 map->handle = map->dmah->vaddr;
239 map->offset = map->dmah->busaddr;
240 break;
241 default:
242 DRM_ERROR("Bad map type %d\n", map->type);
243 free(map, DRM_MEM_MAPS);
244 DRM_LOCK();
245 return EINVAL;
248 DRM_LOCK();
249 TAILQ_INSERT_TAIL(&dev->maplist, map, link);
251 done:
252 /* Jumped to, with lock held, when a kernel map is found. */
254 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
255 map->size);
257 *map_ptr = map;
259 return 0;
262 int drm_addmap_ioctl(struct drm_device *dev, void *data,
263 struct drm_file *file_priv)
265 struct drm_map *request = data;
266 drm_local_map_t *map;
267 int err;
269 if (!(dev->flags & (FREAD|FWRITE)))
270 return EACCES; /* Require read/write */
272 if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
273 return EACCES;
275 DRM_LOCK();
276 err = drm_addmap(dev, request->offset, request->size, request->type,
277 request->flags, &map);
278 DRM_UNLOCK();
279 if (err != 0)
280 return err;
282 request->offset = map->offset;
283 request->size = map->size;
284 request->type = map->type;
285 request->flags = map->flags;
286 request->mtrr = map->mtrr;
287 request->handle = map->handle;
289 if (request->type != _DRM_SHM) {
290 request->handle = (void *)request->offset;
293 return 0;
296 void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
298 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
300 if (map == NULL)
301 return;
303 TAILQ_REMOVE(&dev->maplist, map, link);
305 switch (map->type) {
306 case _DRM_REGISTERS:
307 if (map->bsr == NULL)
308 drm_ioremapfree(map);
309 /* FALLTHROUGH */
310 case _DRM_FRAME_BUFFER:
311 if (map->mtrr) {
312 int __unused retcode;
314 retcode = drm_mtrr_del(0, map->offset, map->size,
315 DRM_MTRR_WC);
316 DRM_DEBUG("mtrr_del = %d\n", retcode);
318 break;
319 case _DRM_SHM:
320 free(map->handle, DRM_MEM_MAPS);
321 break;
322 case _DRM_AGP:
323 case _DRM_SCATTER_GATHER:
324 break;
325 case _DRM_CONSISTENT:
326 drm_pci_free(dev, map->dmah);
327 break;
328 default:
329 DRM_ERROR("Bad map type %d\n", map->type);
330 break;
333 if (map->bsr != NULL) {
334 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
335 map->bsr);
338 free(map, DRM_MEM_MAPS);
341 /* Remove a map private from list and deallocate resources if the mapping
342 * isn't in use.
345 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
346 struct drm_file *file_priv)
348 drm_local_map_t *map;
349 struct drm_map *request = data;
351 DRM_LOCK();
352 TAILQ_FOREACH(map, &dev->maplist, link) {
353 if (map->handle == request->handle &&
354 map->flags & _DRM_REMOVABLE)
355 break;
358 /* No match found. */
359 if (map == NULL) {
360 DRM_UNLOCK();
361 return EINVAL;
364 drm_rmmap(dev, map);
366 DRM_UNLOCK();
368 return 0;
372 static void drm_cleanup_buf_error(struct drm_device *dev,
373 drm_buf_entry_t *entry)
375 int i;
377 if (entry->seg_count) {
378 for (i = 0; i < entry->seg_count; i++) {
379 drm_pci_free(dev, entry->seglist[i]);
381 free(entry->seglist, DRM_MEM_SEGS);
383 entry->seg_count = 0;
386 if (entry->buf_count) {
387 for (i = 0; i < entry->buf_count; i++) {
388 free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
390 free(entry->buflist, DRM_MEM_BUFS);
392 entry->buf_count = 0;
396 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
398 drm_device_dma_t *dma = dev->dma;
399 drm_buf_entry_t *entry;
400 /*drm_agp_mem_t *agp_entry;
401 int valid*/
402 drm_buf_t *buf;
403 unsigned long offset;
404 unsigned long agp_offset;
405 int count;
406 int order;
407 int size;
408 int alignment;
409 int page_order;
410 int total;
411 int byte_count;
412 int i;
413 drm_buf_t **temp_buflist;
415 count = request->count;
416 order = drm_order(request->size);
417 size = 1 << order;
419 alignment = (request->flags & _DRM_PAGE_ALIGN)
420 ? round_page(size) : size;
421 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
422 total = PAGE_SIZE << page_order;
424 byte_count = 0;
425 agp_offset = dev->agp->base + request->agp_start;
427 DRM_DEBUG("count: %d\n", count);
428 DRM_DEBUG("order: %d\n", order);
429 DRM_DEBUG("size: %d\n", size);
430 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
431 DRM_DEBUG("alignment: %d\n", alignment);
432 DRM_DEBUG("page_order: %d\n", page_order);
433 DRM_DEBUG("total: %d\n", total);
435 /* Make sure buffers are located in AGP memory that we own */
436 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
437 * memory. Safe to ignore for now because these ioctls are still
438 * root-only.
440 /*valid = 0;
441 for (agp_entry = dev->agp->memory; agp_entry;
442 agp_entry = agp_entry->next) {
443 if ((agp_offset >= agp_entry->bound) &&
444 (agp_offset + total * count <=
445 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
446 valid = 1;
447 break;
450 if (!valid) {
451 DRM_DEBUG("zone invalid\n");
452 return EINVAL;
455 entry = &dma->bufs[order];
457 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
458 M_NOWAIT | M_ZERO);
459 if (!entry->buflist) {
460 return ENOMEM;
463 entry->buf_size = size;
464 entry->page_order = page_order;
466 offset = 0;
468 while (entry->buf_count < count) {
469 buf = &entry->buflist[entry->buf_count];
470 buf->idx = dma->buf_count + entry->buf_count;
471 buf->total = alignment;
472 buf->order = order;
473 buf->used = 0;
475 buf->offset = (dma->byte_count + offset);
476 buf->bus_address = agp_offset + offset;
477 buf->address = (void *)(agp_offset + offset);
478 buf->next = NULL;
479 buf->pending = 0;
480 buf->file_priv = NULL;
482 buf->dev_priv_size = dev->driver->buf_priv_size;
483 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
484 M_NOWAIT | M_ZERO);
485 if (buf->dev_private == NULL) {
486 /* Set count correctly so we free the proper amount. */
487 entry->buf_count = count;
488 drm_cleanup_buf_error(dev, entry);
489 return ENOMEM;
492 offset += alignment;
493 entry->buf_count++;
494 byte_count += PAGE_SIZE << page_order;
497 DRM_DEBUG("byte_count: %d\n", byte_count);
499 temp_buflist = realloc(dma->buflist,
500 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
501 DRM_MEM_BUFS, M_NOWAIT);
502 if (temp_buflist == NULL) {
503 /* Free the entry because it isn't valid */
504 drm_cleanup_buf_error(dev, entry);
505 return ENOMEM;
507 dma->buflist = temp_buflist;
509 for (i = 0; i < entry->buf_count; i++) {
510 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
513 dma->buf_count += entry->buf_count;
514 dma->byte_count += byte_count;
516 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
517 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
519 request->count = entry->buf_count;
520 request->size = size;
522 dma->flags = _DRM_DMA_USE_AGP;
524 return 0;
527 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
529 drm_device_dma_t *dma = dev->dma;
530 int count;
531 int order;
532 int size;
533 int total;
534 int page_order;
535 drm_buf_entry_t *entry;
536 drm_buf_t *buf;
537 int alignment;
538 unsigned long offset;
539 int i;
540 int byte_count;
541 int page_count;
542 unsigned long *temp_pagelist;
543 drm_buf_t **temp_buflist;
545 count = request->count;
546 order = drm_order(request->size);
547 size = 1 << order;
549 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
550 request->count, request->size, size, order);
552 alignment = (request->flags & _DRM_PAGE_ALIGN)
553 ? round_page(size) : size;
554 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
555 total = PAGE_SIZE << page_order;
557 entry = &dma->bufs[order];
559 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
560 M_NOWAIT | M_ZERO);
561 entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
562 M_NOWAIT | M_ZERO);
564 /* Keep the original pagelist until we know all the allocations
565 * have succeeded
567 temp_pagelist = malloc((dma->page_count + (count << page_order)) *
568 sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
570 if (entry->buflist == NULL || entry->seglist == NULL ||
571 temp_pagelist == NULL) {
572 free(temp_pagelist, DRM_MEM_PAGES);
573 free(entry->seglist, DRM_MEM_SEGS);
574 free(entry->buflist, DRM_MEM_BUFS);
575 return ENOMEM;
578 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
579 sizeof(*dma->pagelist));
581 DRM_DEBUG("pagelist: %d entries\n",
582 dma->page_count + (count << page_order));
584 entry->buf_size = size;
585 entry->page_order = page_order;
586 byte_count = 0;
587 page_count = 0;
589 while (entry->buf_count < count) {
590 DRM_SPINUNLOCK(&dev->dma_lock);
591 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
592 0xfffffffful);
593 DRM_SPINLOCK(&dev->dma_lock);
594 if (dmah == NULL) {
595 /* Set count correctly so we free the proper amount. */
596 entry->buf_count = count;
597 entry->seg_count = count;
598 drm_cleanup_buf_error(dev, entry);
599 free(temp_pagelist, DRM_MEM_PAGES);
600 return ENOMEM;
603 entry->seglist[entry->seg_count++] = dmah;
604 for (i = 0; i < (1 << page_order); i++) {
605 DRM_DEBUG("page %d @ %p\n",
606 dma->page_count + page_count,
607 (char *)dmah->vaddr + PAGE_SIZE * i);
608 temp_pagelist[dma->page_count + page_count++] =
609 (long)dmah->vaddr + PAGE_SIZE * i;
611 for (offset = 0;
612 offset + size <= total && entry->buf_count < count;
613 offset += alignment, ++entry->buf_count) {
614 buf = &entry->buflist[entry->buf_count];
615 buf->idx = dma->buf_count + entry->buf_count;
616 buf->total = alignment;
617 buf->order = order;
618 buf->used = 0;
619 buf->offset = (dma->byte_count + byte_count + offset);
620 buf->address = ((char *)dmah->vaddr + offset);
621 buf->bus_address = dmah->busaddr + offset;
622 buf->next = NULL;
623 buf->pending = 0;
624 buf->file_priv = NULL;
626 buf->dev_priv_size = dev->driver->buf_priv_size;
627 buf->dev_private = malloc(buf->dev_priv_size,
628 DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
629 if (buf->dev_private == NULL) {
630 /* Set count correctly so we free the proper amount. */
631 entry->buf_count = count;
632 entry->seg_count = count;
633 drm_cleanup_buf_error(dev, entry);
634 free(temp_pagelist, DRM_MEM_PAGES);
635 return ENOMEM;
638 DRM_DEBUG("buffer %d @ %p\n",
639 entry->buf_count, buf->address);
641 byte_count += PAGE_SIZE << page_order;
644 temp_buflist = realloc(dma->buflist,
645 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
646 DRM_MEM_BUFS, M_NOWAIT);
647 if (temp_buflist == NULL) {
648 /* Free the entry because it isn't valid */
649 drm_cleanup_buf_error(dev, entry);
650 free(temp_pagelist, DRM_MEM_PAGES);
651 return ENOMEM;
653 dma->buflist = temp_buflist;
655 for (i = 0; i < entry->buf_count; i++) {
656 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
659 /* No allocations failed, so now we can replace the orginal pagelist
660 * with the new one.
662 free(dma->pagelist, DRM_MEM_PAGES);
663 dma->pagelist = temp_pagelist;
665 dma->buf_count += entry->buf_count;
666 dma->seg_count += entry->seg_count;
667 dma->page_count += entry->seg_count << page_order;
668 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
670 request->count = entry->buf_count;
671 request->size = size;
673 return 0;
677 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
679 drm_device_dma_t *dma = dev->dma;
680 drm_buf_entry_t *entry;
681 drm_buf_t *buf;
682 unsigned long offset;
683 unsigned long agp_offset;
684 int count;
685 int order;
686 int size;
687 int alignment;
688 int page_order;
689 int total;
690 int byte_count;
691 int i;
692 drm_buf_t **temp_buflist;
694 count = request->count;
695 order = drm_order(request->size);
696 size = 1 << order;
698 alignment = (request->flags & _DRM_PAGE_ALIGN)
699 ? round_page(size) : size;
700 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
701 total = PAGE_SIZE << page_order;
703 byte_count = 0;
704 agp_offset = request->agp_start;
706 DRM_DEBUG("count: %d\n", count);
707 DRM_DEBUG("order: %d\n", order);
708 DRM_DEBUG("size: %d\n", size);
709 DRM_DEBUG("agp_offset: %ld\n", agp_offset);
710 DRM_DEBUG("alignment: %d\n", alignment);
711 DRM_DEBUG("page_order: %d\n", page_order);
712 DRM_DEBUG("total: %d\n", total);
714 entry = &dma->bufs[order];
716 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
717 M_NOWAIT | M_ZERO);
718 if (entry->buflist == NULL)
719 return ENOMEM;
721 entry->buf_size = size;
722 entry->page_order = page_order;
724 offset = 0;
726 while (entry->buf_count < count) {
727 buf = &entry->buflist[entry->buf_count];
728 buf->idx = dma->buf_count + entry->buf_count;
729 buf->total = alignment;
730 buf->order = order;
731 buf->used = 0;
733 buf->offset = (dma->byte_count + offset);
734 buf->bus_address = agp_offset + offset;
735 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
736 buf->next = NULL;
737 buf->pending = 0;
738 buf->file_priv = NULL;
740 buf->dev_priv_size = dev->driver->buf_priv_size;
741 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
742 M_NOWAIT | M_ZERO);
743 if (buf->dev_private == NULL) {
744 /* Set count correctly so we free the proper amount. */
745 entry->buf_count = count;
746 drm_cleanup_buf_error(dev, entry);
747 return ENOMEM;
750 DRM_DEBUG("buffer %d @ %p\n",
751 entry->buf_count, buf->address);
753 offset += alignment;
754 entry->buf_count++;
755 byte_count += PAGE_SIZE << page_order;
758 DRM_DEBUG("byte_count: %d\n", byte_count);
760 temp_buflist = realloc(dma->buflist,
761 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
762 DRM_MEM_BUFS, M_NOWAIT);
763 if (temp_buflist == NULL) {
764 /* Free the entry because it isn't valid */
765 drm_cleanup_buf_error(dev, entry);
766 return ENOMEM;
768 dma->buflist = temp_buflist;
770 for (i = 0; i < entry->buf_count; i++) {
771 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
774 dma->buf_count += entry->buf_count;
775 dma->byte_count += byte_count;
777 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
778 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
780 request->count = entry->buf_count;
781 request->size = size;
783 dma->flags = _DRM_DMA_USE_SG;
785 return 0;
788 int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
790 int order, ret;
792 if (request->count < 0 || request->count > 4096)
793 return EINVAL;
795 order = drm_order(request->size);
796 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
797 return EINVAL;
799 DRM_SPINLOCK(&dev->dma_lock);
801 /* No more allocations after first buffer-using ioctl. */
802 if (dev->buf_use != 0) {
803 DRM_SPINUNLOCK(&dev->dma_lock);
804 return EBUSY;
806 /* No more than one allocation per order */
807 if (dev->dma->bufs[order].buf_count != 0) {
808 DRM_SPINUNLOCK(&dev->dma_lock);
809 return ENOMEM;
812 ret = drm_do_addbufs_agp(dev, request);
814 DRM_SPINUNLOCK(&dev->dma_lock);
816 return ret;
819 int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
821 int order, ret;
823 if (!DRM_SUSER(DRM_CURPROC))
824 return EACCES;
826 if (request->count < 0 || request->count > 4096)
827 return EINVAL;
829 order = drm_order(request->size);
830 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
831 return EINVAL;
833 DRM_SPINLOCK(&dev->dma_lock);
835 /* No more allocations after first buffer-using ioctl. */
836 if (dev->buf_use != 0) {
837 DRM_SPINUNLOCK(&dev->dma_lock);
838 return EBUSY;
840 /* No more than one allocation per order */
841 if (dev->dma->bufs[order].buf_count != 0) {
842 DRM_SPINUNLOCK(&dev->dma_lock);
843 return ENOMEM;
846 ret = drm_do_addbufs_sg(dev, request);
848 DRM_SPINUNLOCK(&dev->dma_lock);
850 return ret;
853 int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
855 int order, ret;
857 if (!DRM_SUSER(DRM_CURPROC))
858 return EACCES;
860 if (request->count < 0 || request->count > 4096)
861 return EINVAL;
863 order = drm_order(request->size);
864 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
865 return EINVAL;
867 DRM_SPINLOCK(&dev->dma_lock);
869 /* No more allocations after first buffer-using ioctl. */
870 if (dev->buf_use != 0) {
871 DRM_SPINUNLOCK(&dev->dma_lock);
872 return EBUSY;
874 /* No more than one allocation per order */
875 if (dev->dma->bufs[order].buf_count != 0) {
876 DRM_SPINUNLOCK(&dev->dma_lock);
877 return ENOMEM;
880 ret = drm_do_addbufs_pci(dev, request);
882 DRM_SPINUNLOCK(&dev->dma_lock);
884 return ret;
887 int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
889 struct drm_buf_desc *request = data;
890 int err;
892 if (request->flags & _DRM_AGP_BUFFER)
893 err = drm_addbufs_agp(dev, request);
894 else if (request->flags & _DRM_SG_BUFFER)
895 err = drm_addbufs_sg(dev, request);
896 else
897 err = drm_addbufs_pci(dev, request);
899 return err;
902 int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
904 drm_device_dma_t *dma = dev->dma;
905 struct drm_buf_info *request = data;
906 int i;
907 int count;
908 int retcode = 0;
910 DRM_SPINLOCK(&dev->dma_lock);
911 ++dev->buf_use; /* Can't allocate more after this call */
912 DRM_SPINUNLOCK(&dev->dma_lock);
914 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
915 if (dma->bufs[i].buf_count)
916 ++count;
919 DRM_DEBUG("count = %d\n", count);
921 if (request->count >= count) {
922 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
923 if (dma->bufs[i].buf_count) {
924 struct drm_buf_desc from;
926 from.count = dma->bufs[i].buf_count;
927 from.size = dma->bufs[i].buf_size;
928 from.low_mark = dma->bufs[i].freelist.low_mark;
929 from.high_mark = dma->bufs[i].freelist.high_mark;
931 if (DRM_COPY_TO_USER(&request->list[count], &from,
932 sizeof(struct drm_buf_desc)) != 0) {
933 retcode = EFAULT;
934 break;
937 DRM_DEBUG("%d %d %d %d %d\n",
938 i, dma->bufs[i].buf_count,
939 dma->bufs[i].buf_size,
940 dma->bufs[i].freelist.low_mark,
941 dma->bufs[i].freelist.high_mark);
942 ++count;
946 request->count = count;
948 return retcode;
951 int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
953 drm_device_dma_t *dma = dev->dma;
954 struct drm_buf_desc *request = data;
955 int order;
957 DRM_DEBUG("%d, %d, %d\n",
958 request->size, request->low_mark, request->high_mark);
961 order = drm_order(request->size);
962 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
963 request->low_mark < 0 || request->high_mark < 0) {
964 return EINVAL;
967 DRM_SPINLOCK(&dev->dma_lock);
968 if (request->low_mark > dma->bufs[order].buf_count ||
969 request->high_mark > dma->bufs[order].buf_count) {
970 DRM_SPINUNLOCK(&dev->dma_lock);
971 return EINVAL;
974 dma->bufs[order].freelist.low_mark = request->low_mark;
975 dma->bufs[order].freelist.high_mark = request->high_mark;
976 DRM_SPINUNLOCK(&dev->dma_lock);
978 return 0;
981 int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
983 drm_device_dma_t *dma = dev->dma;
984 struct drm_buf_free *request = data;
985 int i;
986 int idx;
987 drm_buf_t *buf;
988 int retcode = 0;
990 DRM_DEBUG("%d\n", request->count);
992 DRM_SPINLOCK(&dev->dma_lock);
993 for (i = 0; i < request->count; i++) {
994 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
995 retcode = EFAULT;
996 break;
998 if (idx < 0 || idx >= dma->buf_count) {
999 DRM_ERROR("Index %d (of %d max)\n",
1000 idx, dma->buf_count - 1);
1001 retcode = EINVAL;
1002 break;
1004 buf = dma->buflist[idx];
1005 if (buf->file_priv != file_priv) {
1006 DRM_ERROR("Process %d freeing buffer not owned\n",
1007 DRM_CURRENTPID);
1008 retcode = EINVAL;
1009 break;
1011 drm_free_buffer(dev, buf);
1013 DRM_SPINUNLOCK(&dev->dma_lock);
1015 return retcode;
1018 int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1020 drm_device_dma_t *dma = dev->dma;
1021 int retcode = 0;
1022 const int zero = 0;
1023 vm_offset_t address;
1024 struct vmspace *vms;
1025 vm_ooffset_t foff;
1026 vm_size_t size;
1027 vm_offset_t vaddr;
1028 struct drm_buf_map *request = data;
1029 int i;
1031 vms = DRM_CURPROC->td_proc->p_vmspace;
1033 DRM_SPINLOCK(&dev->dma_lock);
1034 dev->buf_use++; /* Can't allocate more after this call */
1035 DRM_SPINUNLOCK(&dev->dma_lock);
1037 if (request->count < dma->buf_count)
1038 goto done;
1040 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1041 (drm_core_check_feature(dev, DRIVER_SG) &&
1042 (dma->flags & _DRM_DMA_USE_SG))) {
1043 drm_local_map_t *map = dev->agp_buffer_map;
1045 if (map == NULL) {
1046 retcode = EINVAL;
1047 goto done;
1049 size = round_page(map->size);
1050 foff = map->offset;
1051 } else {
1052 size = round_page(dma->byte_count),
1053 foff = 0;
1056 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1057 #if __FreeBSD_version >= 600023
1058 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1059 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
1060 dev->devnode, foff);
1061 #else
1062 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1063 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1064 SLIST_FIRST(&dev->devnode->si_hlist), foff);
1065 #endif
1066 if (retcode)
1067 goto done;
1069 request->virtual = (void *)vaddr;
1071 for (i = 0; i < dma->buf_count; i++) {
1072 if (DRM_COPY_TO_USER(&request->list[i].idx,
1073 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1074 retcode = EFAULT;
1075 goto done;
1077 if (DRM_COPY_TO_USER(&request->list[i].total,
1078 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1079 retcode = EFAULT;
1080 goto done;
1082 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1083 sizeof(zero))) {
1084 retcode = EFAULT;
1085 goto done;
1087 address = vaddr + dma->buflist[i]->offset; /* *** */
1088 if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1089 sizeof(address))) {
1090 retcode = EFAULT;
1091 goto done;
1095 done:
1096 request->count = dma->buf_count;
1098 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1100 return retcode;
1104 * Compute order. Can be made faster.
1106 int drm_order(unsigned long size)
1108 int order;
1110 if (size == 0)
1111 return 0;
1113 order = flsl(size) - 1;
1114 if (size & ~(1ul << order))
1115 ++order;
1117 return order;