Sync with HEAD.
[dragonfly.git] / sys / dev / drm / drm_bufs.c
blob4912c3f22e2f185467a8824e13cef1c9851158cf
1 /*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors:
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
29 * $DragonFly: src/sys/dev/drm/drm_bufs.c,v 1.1 2008/04/05 18:12:29 hasso Exp $
32 /** @file drm_bufs.c
33 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
36 #ifdef __DragonFly__
37 #include <bus/pci/pcireg.h>
38 #else
39 #include "dev/pci/pcireg.h"
40 #endif
42 #include "drmP.h"
46 * Compute order. Can be made faster.
48 int drm_order(unsigned long size)
50 int order;
51 unsigned long tmp;
53 for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
55 if ( size & ~(1 << order) )
56 ++order;
58 return order;
61 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
62 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
63 * address for accessing them. Cleaned up at unload.
65 static int drm_alloc_resource(drm_device_t *dev, int resource)
67 if (resource >= DRM_MAX_PCI_RESOURCE) {
68 DRM_ERROR("Resource %d too large\n", resource);
69 return 1;
72 DRM_UNLOCK();
73 if (dev->pcir[resource] != NULL) {
74 DRM_LOCK();
75 return 0;
78 dev->pcirid[resource] = PCIR_BAR(resource);
79 dev->pcir[resource] = bus_alloc_resource_any(dev->device,
80 SYS_RES_MEMORY, &dev->pcirid[resource], RF_SHAREABLE);
81 DRM_LOCK();
83 if (dev->pcir[resource] == NULL) {
84 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
85 return 1;
88 return 0;
91 unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
93 if (drm_alloc_resource(dev, resource) != 0)
94 return 0;
96 return rman_get_start(dev->pcir[resource]);
99 unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
101 if (drm_alloc_resource(dev, resource) != 0)
102 return 0;
104 return rman_get_size(dev->pcir[resource]);
107 int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
108 drm_map_type_t type, drm_map_flags_t flags, drm_local_map_t **map_ptr)
110 drm_local_map_t *map;
111 int align;
112 /*drm_agp_mem_t *entry;
113 int valid;*/
115 /* Only allow shared memory to be removable since we only keep enough
116 * book keeping information about shared memory to allow for removal
117 * when processes fork.
119 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
120 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
121 return EINVAL;
123 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
124 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
125 offset, size);
126 return EINVAL;
128 if (offset + size < offset) {
129 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
130 offset, size);
131 return EINVAL;
134 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
135 size, type);
137 /* Check if this is just another version of a kernel-allocated map, and
138 * just hand that back if so.
140 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
141 type == _DRM_SHM) {
142 TAILQ_FOREACH(map, &dev->maplist, link) {
143 if (map->type == type && (map->offset == offset ||
144 (map->type == _DRM_SHM &&
145 map->flags == _DRM_CONTAINS_LOCK))) {
146 map->size = size;
147 DRM_DEBUG("Found kernel map %d\n", type);
148 goto done;
152 DRM_UNLOCK();
154 /* Allocate a new map structure, fill it in, and do any type-specific
155 * initialization necessary.
157 map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
158 if ( !map )
159 return ENOMEM;
161 map->offset = offset;
162 map->size = size;
163 map->type = type;
164 map->flags = flags;
166 switch ( map->type ) {
167 case _DRM_REGISTERS:
168 map->handle = drm_ioremap(dev, map);
169 if (!(map->flags & _DRM_WRITE_COMBINING))
170 break;
171 /* FALLTHROUGH */
172 case _DRM_FRAME_BUFFER:
173 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
174 map->mtrr = 1;
175 break;
176 case _DRM_SHM:
177 map->handle = malloc(map->size, M_DRM, M_NOWAIT);
178 DRM_DEBUG( "%lu %d %p\n",
179 map->size, drm_order(map->size), map->handle );
180 if ( !map->handle ) {
181 free(map, M_DRM);
182 return ENOMEM;
184 map->offset = (unsigned long)map->handle;
185 if ( map->flags & _DRM_CONTAINS_LOCK ) {
186 /* Prevent a 2nd X Server from creating a 2nd lock */
187 DRM_LOCK();
188 if (dev->lock.hw_lock != NULL) {
189 DRM_UNLOCK();
190 free(map->handle, M_DRM);
191 free(map, M_DRM);
192 return EBUSY;
194 dev->lock.hw_lock = map->handle; /* Pointer to lock */
195 DRM_UNLOCK();
197 break;
198 case _DRM_AGP:
199 /*valid = 0;*/
200 /* In some cases (i810 driver), user space may have already
201 * added the AGP base itself, because dev->agp->base previously
202 * only got set during AGP enable. So, only add the base
203 * address if the map's offset isn't already within the
204 * aperture.
206 if (map->offset < dev->agp->base ||
207 map->offset > dev->agp->base +
208 dev->agp->info.ai_aperture_size - 1) {
209 map->offset += dev->agp->base;
211 map->mtrr = dev->agp->mtrr; /* for getmap */
212 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
213 if ((map->offset >= entry->bound) &&
214 (map->offset + map->size <=
215 entry->bound + entry->pages * PAGE_SIZE)) {
216 valid = 1;
217 break;
220 if (!valid) {
221 free(map, M_DRM);
222 return EACCES;
224 break;
225 case _DRM_SCATTER_GATHER:
226 if (!dev->sg) {
227 free(map, M_DRM);
228 return EINVAL;
230 map->offset = map->offset + dev->sg->handle;
231 break;
232 case _DRM_CONSISTENT:
233 /* Unfortunately, we don't get any alignment specification from
234 * the caller, so we have to guess. drm_pci_alloc requires
235 * a power-of-two alignment, so try to align the bus address of
236 * the map to it size if possible, otherwise just assume
237 * PAGE_SIZE alignment.
239 align = map->size;
240 if ((align & (align - 1)) != 0)
241 align = PAGE_SIZE;
242 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
243 if (map->dmah == NULL) {
244 free(map, M_DRM);
245 return ENOMEM;
247 map->handle = map->dmah->vaddr;
248 map->offset = map->dmah->busaddr;
249 break;
250 default:
251 DRM_ERROR("Bad map type %d\n", map->type);
252 free(map, M_DRM);
253 return EINVAL;
256 DRM_LOCK();
257 TAILQ_INSERT_TAIL(&dev->maplist, map, link);
259 done:
260 /* Jumped to, with lock held, when a kernel map is found. */
262 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
263 map->size);
265 *map_ptr = map;
267 return 0;
270 int drm_addmap_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
272 drm_map_t *request = data;
273 drm_local_map_t *map;
274 int err;
276 if (!(dev->flags & (FREAD|FWRITE)))
277 return EACCES; /* Require read/write */
279 if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
280 return EACCES;
282 DRM_LOCK();
283 err = drm_addmap(dev, request->offset, request->size, request->type,
284 request->flags, &map);
285 DRM_UNLOCK();
286 if (err != 0)
287 return err;
289 request->offset = map->offset;
290 request->size = map->size;
291 request->type = map->type;
292 request->flags = map->flags;
293 request->mtrr = map->mtrr;
294 request->handle = map->handle;
296 if (request->type != _DRM_SHM) {
297 request->handle = (void *)request->offset;
300 return 0;
303 void drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
305 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
307 TAILQ_REMOVE(&dev->maplist, map, link);
309 switch (map->type) {
310 case _DRM_REGISTERS:
311 if (map->bsr == NULL)
312 drm_ioremapfree(map);
313 /* FALLTHROUGH */
314 case _DRM_FRAME_BUFFER:
315 if (map->mtrr) {
316 int __unused retcode;
318 retcode = drm_mtrr_del(0, map->offset, map->size,
319 DRM_MTRR_WC);
320 DRM_DEBUG("mtrr_del = %d\n", retcode);
322 break;
323 case _DRM_SHM:
324 free(map->handle, M_DRM);
325 break;
326 case _DRM_AGP:
327 case _DRM_SCATTER_GATHER:
328 break;
329 case _DRM_CONSISTENT:
330 drm_pci_free(dev, map->dmah);
331 break;
332 default:
333 DRM_ERROR("Bad map type %d\n", map->type);
334 break;
337 if (map->bsr != NULL) {
338 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
339 map->bsr);
342 free(map, M_DRM);
345 /* Remove a map private from list and deallocate resources if the mapping
346 * isn't in use.
349 int drm_rmmap_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
351 drm_local_map_t *map;
352 drm_map_t *request = data;
354 DRM_LOCK();
355 TAILQ_FOREACH(map, &dev->maplist, link) {
356 if (map->handle == request->handle &&
357 map->flags & _DRM_REMOVABLE)
358 break;
361 /* No match found. */
362 if (map == NULL) {
363 DRM_UNLOCK();
364 return EINVAL;
367 drm_rmmap(dev, map);
369 DRM_UNLOCK();
371 return 0;
375 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
377 int i;
379 if (entry->seg_count) {
380 for (i = 0; i < entry->seg_count; i++) {
381 drm_pci_free(dev, entry->seglist[i]);
383 free(entry->seglist, M_DRM);
385 entry->seg_count = 0;
388 if (entry->buf_count) {
389 for (i = 0; i < entry->buf_count; i++) {
390 free(entry->buflist[i].dev_private, M_DRM);
392 free(entry->buflist, M_DRM);
394 entry->buf_count = 0;
398 static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
400 drm_device_dma_t *dma = dev->dma;
401 drm_buf_entry_t *entry;
402 /*drm_agp_mem_t *agp_entry;
403 int valid*/
404 drm_buf_t *buf;
405 unsigned long offset;
406 unsigned long agp_offset;
407 int count;
408 int order;
409 int size;
410 int alignment;
411 int page_order;
412 int total;
413 int byte_count;
414 int i;
415 drm_buf_t **temp_buflist;
417 count = request->count;
418 order = drm_order(request->size);
419 size = 1 << order;
421 alignment = (request->flags & _DRM_PAGE_ALIGN)
422 ? round_page(size) : size;
423 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
424 total = PAGE_SIZE << page_order;
426 byte_count = 0;
427 agp_offset = dev->agp->base + request->agp_start;
429 DRM_DEBUG( "count: %d\n", count );
430 DRM_DEBUG( "order: %d\n", order );
431 DRM_DEBUG( "size: %d\n", size );
432 DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
433 DRM_DEBUG( "alignment: %d\n", alignment );
434 DRM_DEBUG( "page_order: %d\n", page_order );
435 DRM_DEBUG( "total: %d\n", total );
437 /* Make sure buffers are located in AGP memory that we own */
438 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
439 * memory. Safe to ignore for now because these ioctls are still
440 * root-only.
442 /*valid = 0;
443 for (agp_entry = dev->agp->memory; agp_entry;
444 agp_entry = agp_entry->next) {
445 if ((agp_offset >= agp_entry->bound) &&
446 (agp_offset + total * count <=
447 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
448 valid = 1;
449 break;
452 if (!valid) {
453 DRM_DEBUG("zone invalid\n");
454 return EINVAL;
457 entry = &dma->bufs[order];
459 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
460 M_NOWAIT | M_ZERO);
461 if ( !entry->buflist ) {
462 return ENOMEM;
465 entry->buf_size = size;
466 entry->page_order = page_order;
468 offset = 0;
470 while ( entry->buf_count < count ) {
471 buf = &entry->buflist[entry->buf_count];
472 buf->idx = dma->buf_count + entry->buf_count;
473 buf->total = alignment;
474 buf->order = order;
475 buf->used = 0;
477 buf->offset = (dma->byte_count + offset);
478 buf->bus_address = agp_offset + offset;
479 buf->address = (void *)(agp_offset + offset);
480 buf->next = NULL;
481 buf->pending = 0;
482 buf->file_priv = NULL;
484 buf->dev_priv_size = dev->driver.buf_priv_size;
485 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
486 M_NOWAIT | M_ZERO);
487 if (buf->dev_private == NULL) {
488 /* Set count correctly so we free the proper amount. */
489 entry->buf_count = count;
490 drm_cleanup_buf_error(dev, entry);
491 return ENOMEM;
494 offset += alignment;
495 entry->buf_count++;
496 byte_count += PAGE_SIZE << page_order;
499 DRM_DEBUG( "byte_count: %d\n", byte_count );
501 temp_buflist = realloc(dma->buflist,
502 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
503 M_NOWAIT);
504 if (temp_buflist == NULL) {
505 /* Free the entry because it isn't valid */
506 drm_cleanup_buf_error(dev, entry);
507 return ENOMEM;
509 dma->buflist = temp_buflist;
511 for ( i = 0 ; i < entry->buf_count ; i++ ) {
512 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
515 dma->buf_count += entry->buf_count;
516 dma->byte_count += byte_count;
518 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
519 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
521 request->count = entry->buf_count;
522 request->size = size;
524 dma->flags = _DRM_DMA_USE_AGP;
526 return 0;
529 static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
531 drm_device_dma_t *dma = dev->dma;
532 int count;
533 int order;
534 int size;
535 int total;
536 int page_order;
537 drm_buf_entry_t *entry;
538 drm_buf_t *buf;
539 int alignment;
540 unsigned long offset;
541 int i;
542 int byte_count;
543 int page_count;
544 unsigned long *temp_pagelist;
545 drm_buf_t **temp_buflist;
547 count = request->count;
548 order = drm_order(request->size);
549 size = 1 << order;
551 DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
552 request->count, request->size, size, order );
554 alignment = (request->flags & _DRM_PAGE_ALIGN)
555 ? round_page(size) : size;
556 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
557 total = PAGE_SIZE << page_order;
559 entry = &dma->bufs[order];
561 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
562 M_NOWAIT | M_ZERO);
563 entry->seglist = malloc(count * sizeof(*entry->seglist), M_DRM,
564 M_NOWAIT | M_ZERO);
566 /* Keep the original pagelist until we know all the allocations
567 * have succeeded
569 temp_pagelist = malloc((dma->page_count + (count << page_order)) *
570 sizeof(*dma->pagelist), M_DRM, M_NOWAIT);
572 if (entry->buflist == NULL || entry->seglist == NULL ||
573 temp_pagelist == NULL) {
574 free(entry->buflist, M_DRM);
575 free(entry->seglist, M_DRM);
576 return ENOMEM;
579 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
580 sizeof(*dma->pagelist));
582 DRM_DEBUG( "pagelist: %d entries\n",
583 dma->page_count + (count << page_order) );
585 entry->buf_size = size;
586 entry->page_order = page_order;
587 byte_count = 0;
588 page_count = 0;
590 while ( entry->buf_count < count ) {
591 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
592 0xfffffffful);
593 if (dmah == NULL) {
594 /* Set count correctly so we free the proper amount. */
595 entry->buf_count = count;
596 entry->seg_count = count;
597 drm_cleanup_buf_error(dev, entry);
598 free(temp_pagelist, M_DRM);
599 return ENOMEM;
602 entry->seglist[entry->seg_count++] = dmah;
603 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
604 DRM_DEBUG( "page %d @ %p\n",
605 dma->page_count + page_count,
606 (char *)dmah->vaddr + PAGE_SIZE * i );
607 temp_pagelist[dma->page_count + page_count++] =
608 (long)dmah->vaddr + PAGE_SIZE * i;
610 for ( offset = 0 ;
611 offset + size <= total && entry->buf_count < count ;
612 offset += alignment, ++entry->buf_count ) {
613 buf = &entry->buflist[entry->buf_count];
614 buf->idx = dma->buf_count + entry->buf_count;
615 buf->total = alignment;
616 buf->order = order;
617 buf->used = 0;
618 buf->offset = (dma->byte_count + byte_count + offset);
619 buf->address = ((char *)dmah->vaddr + offset);
620 buf->bus_address = dmah->busaddr + offset;
621 buf->next = NULL;
622 buf->pending = 0;
623 buf->file_priv = NULL;
625 buf->dev_priv_size = dev->driver.buf_priv_size;
626 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
627 M_NOWAIT | M_ZERO);
628 if (buf->dev_private == NULL) {
629 /* Set count correctly so we free the proper amount. */
630 entry->buf_count = count;
631 entry->seg_count = count;
632 drm_cleanup_buf_error(dev, entry);
633 free(temp_pagelist, M_DRM);
634 return ENOMEM;
637 DRM_DEBUG( "buffer %d @ %p\n",
638 entry->buf_count, buf->address );
640 byte_count += PAGE_SIZE << page_order;
643 temp_buflist = realloc(dma->buflist,
644 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
645 M_NOWAIT);
646 if (temp_buflist == NULL) {
647 /* Free the entry because it isn't valid */
648 drm_cleanup_buf_error(dev, entry);
649 free(temp_pagelist, M_DRM);
650 return ENOMEM;
652 dma->buflist = temp_buflist;
654 for ( i = 0 ; i < entry->buf_count ; i++ ) {
655 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
658 /* No allocations failed, so now we can replace the orginal pagelist
659 * with the new one.
661 free(dma->pagelist, M_DRM);
662 dma->pagelist = temp_pagelist;
664 dma->buf_count += entry->buf_count;
665 dma->seg_count += entry->seg_count;
666 dma->page_count += entry->seg_count << page_order;
667 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
669 request->count = entry->buf_count;
670 request->size = size;
672 return 0;
676 static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
678 drm_device_dma_t *dma = dev->dma;
679 drm_buf_entry_t *entry;
680 drm_buf_t *buf;
681 unsigned long offset;
682 unsigned long agp_offset;
683 int count;
684 int order;
685 int size;
686 int alignment;
687 int page_order;
688 int total;
689 int byte_count;
690 int i;
691 drm_buf_t **temp_buflist;
693 count = request->count;
694 order = drm_order(request->size);
695 size = 1 << order;
697 alignment = (request->flags & _DRM_PAGE_ALIGN)
698 ? round_page(size) : size;
699 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
700 total = PAGE_SIZE << page_order;
702 byte_count = 0;
703 agp_offset = request->agp_start;
705 DRM_DEBUG( "count: %d\n", count );
706 DRM_DEBUG( "order: %d\n", order );
707 DRM_DEBUG( "size: %d\n", size );
708 DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
709 DRM_DEBUG( "alignment: %d\n", alignment );
710 DRM_DEBUG( "page_order: %d\n", page_order );
711 DRM_DEBUG( "total: %d\n", total );
713 entry = &dma->bufs[order];
715 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
716 M_NOWAIT | M_ZERO);
717 if (entry->buflist == NULL)
718 return ENOMEM;
720 entry->buf_size = size;
721 entry->page_order = page_order;
723 offset = 0;
725 while ( entry->buf_count < count ) {
726 buf = &entry->buflist[entry->buf_count];
727 buf->idx = dma->buf_count + entry->buf_count;
728 buf->total = alignment;
729 buf->order = order;
730 buf->used = 0;
732 buf->offset = (dma->byte_count + offset);
733 buf->bus_address = agp_offset + offset;
734 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
735 buf->next = NULL;
736 buf->pending = 0;
737 buf->file_priv = NULL;
739 buf->dev_priv_size = dev->driver.buf_priv_size;
740 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
741 M_NOWAIT | M_ZERO);
742 if (buf->dev_private == NULL) {
743 /* Set count correctly so we free the proper amount. */
744 entry->buf_count = count;
745 drm_cleanup_buf_error(dev, entry);
746 return ENOMEM;
749 DRM_DEBUG( "buffer %d @ %p\n",
750 entry->buf_count, buf->address );
752 offset += alignment;
753 entry->buf_count++;
754 byte_count += PAGE_SIZE << page_order;
757 DRM_DEBUG( "byte_count: %d\n", byte_count );
759 temp_buflist = realloc(dma->buflist,
760 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
761 M_NOWAIT);
762 if (temp_buflist == NULL) {
763 /* Free the entry because it isn't valid */
764 drm_cleanup_buf_error(dev, entry);
765 return ENOMEM;
767 dma->buflist = temp_buflist;
769 for ( i = 0 ; i < entry->buf_count ; i++ ) {
770 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
773 dma->buf_count += entry->buf_count;
774 dma->byte_count += byte_count;
776 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
777 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
779 request->count = entry->buf_count;
780 request->size = size;
782 dma->flags = _DRM_DMA_USE_SG;
784 return 0;
787 int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
789 int order, ret;
791 DRM_SPINLOCK(&dev->dma_lock);
793 if (request->count < 0 || request->count > 4096)
794 return EINVAL;
796 order = drm_order(request->size);
797 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
798 return EINVAL;
800 /* No more allocations after first buffer-using ioctl. */
801 if (dev->buf_use != 0) {
802 DRM_SPINUNLOCK(&dev->dma_lock);
803 return EBUSY;
805 /* No more than one allocation per order */
806 if (dev->dma->bufs[order].buf_count != 0) {
807 DRM_SPINUNLOCK(&dev->dma_lock);
808 return ENOMEM;
811 ret = drm_do_addbufs_agp(dev, request);
813 DRM_SPINUNLOCK(&dev->dma_lock);
815 return ret;
818 int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
820 int order, ret;
822 DRM_SPINLOCK(&dev->dma_lock);
824 if (!DRM_SUSER(DRM_CURPROC))
825 return EACCES;
827 if (request->count < 0 || request->count > 4096)
828 return EINVAL;
830 order = drm_order(request->size);
831 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
832 return EINVAL;
834 /* No more allocations after first buffer-using ioctl. */
835 if (dev->buf_use != 0) {
836 DRM_SPINUNLOCK(&dev->dma_lock);
837 return EBUSY;
839 /* No more than one allocation per order */
840 if (dev->dma->bufs[order].buf_count != 0) {
841 DRM_SPINUNLOCK(&dev->dma_lock);
842 return ENOMEM;
845 ret = drm_do_addbufs_sg(dev, request);
847 DRM_SPINUNLOCK(&dev->dma_lock);
849 return ret;
852 int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
854 int order, ret;
856 DRM_SPINLOCK(&dev->dma_lock);
858 if (!DRM_SUSER(DRM_CURPROC))
859 return EACCES;
861 if (request->count < 0 || request->count > 4096)
862 return EINVAL;
864 order = drm_order(request->size);
865 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
866 return EINVAL;
868 /* No more allocations after first buffer-using ioctl. */
869 if (dev->buf_use != 0) {
870 DRM_SPINUNLOCK(&dev->dma_lock);
871 return EBUSY;
873 /* No more than one allocation per order */
874 if (dev->dma->bufs[order].buf_count != 0) {
875 DRM_SPINUNLOCK(&dev->dma_lock);
876 return ENOMEM;
879 ret = drm_do_addbufs_pci(dev, request);
881 DRM_SPINUNLOCK(&dev->dma_lock);
883 return ret;
886 int drm_addbufs_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
888 drm_buf_desc_t *request = data;
889 int err;
891 if (request->flags & _DRM_AGP_BUFFER)
892 err = drm_addbufs_agp(dev, request);
893 else if (request->flags & _DRM_SG_BUFFER)
894 err = drm_addbufs_sg(dev, request);
895 else
896 err = drm_addbufs_pci(dev, request);
898 return err;
901 int drm_infobufs(drm_device_t *dev, void *data, struct drm_file *file_priv)
903 drm_device_dma_t *dma = dev->dma;
904 drm_buf_info_t *request = data;
905 int i;
906 int count;
907 int retcode = 0;
909 DRM_SPINLOCK(&dev->dma_lock);
910 ++dev->buf_use; /* Can't allocate more after this call */
911 DRM_SPINUNLOCK(&dev->dma_lock);
913 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
914 if ( dma->bufs[i].buf_count ) ++count;
917 DRM_DEBUG( "count = %d\n", count );
919 if ( request->count >= count ) {
920 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
921 if ( dma->bufs[i].buf_count ) {
922 drm_buf_desc_t from;
924 from.count = dma->bufs[i].buf_count;
925 from.size = dma->bufs[i].buf_size;
926 from.low_mark = dma->bufs[i].freelist.low_mark;
927 from.high_mark = dma->bufs[i].freelist.high_mark;
929 if (DRM_COPY_TO_USER(&request->list[count], &from,
930 sizeof(drm_buf_desc_t)) != 0) {
931 retcode = EFAULT;
932 break;
935 DRM_DEBUG( "%d %d %d %d %d\n",
937 dma->bufs[i].buf_count,
938 dma->bufs[i].buf_size,
939 dma->bufs[i].freelist.low_mark,
940 dma->bufs[i].freelist.high_mark );
941 ++count;
945 request->count = count;
947 return retcode;
950 int drm_markbufs(drm_device_t *dev, void *data, struct drm_file *file_priv)
952 drm_device_dma_t *dma = dev->dma;
953 drm_buf_desc_t *request = data;
954 int order;
956 DRM_DEBUG( "%d, %d, %d\n",
957 request->size, request->low_mark, request->high_mark );
960 order = drm_order(request->size);
961 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
962 request->low_mark < 0 || request->high_mark < 0) {
963 return EINVAL;
966 DRM_SPINLOCK(&dev->dma_lock);
967 if (request->low_mark > dma->bufs[order].buf_count ||
968 request->high_mark > dma->bufs[order].buf_count) {
969 return EINVAL;
972 dma->bufs[order].freelist.low_mark = request->low_mark;
973 dma->bufs[order].freelist.high_mark = request->high_mark;
974 DRM_SPINUNLOCK(&dev->dma_lock);
976 return 0;
979 int drm_freebufs(drm_device_t *dev, void *data, struct drm_file *file_priv)
981 drm_device_dma_t *dma = dev->dma;
982 drm_buf_free_t *request = data;
983 int i;
984 int idx;
985 drm_buf_t *buf;
986 int retcode = 0;
988 DRM_DEBUG( "%d\n", request->count );
990 DRM_SPINLOCK(&dev->dma_lock);
991 for ( i = 0 ; i < request->count ; i++ ) {
992 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
993 retcode = EFAULT;
994 break;
996 if ( idx < 0 || idx >= dma->buf_count ) {
997 DRM_ERROR( "Index %d (of %d max)\n",
998 idx, dma->buf_count - 1 );
999 retcode = EINVAL;
1000 break;
1002 buf = dma->buflist[idx];
1003 if ( buf->file_priv != file_priv ) {
1004 DRM_ERROR("Process %d freeing buffer not owned\n",
1005 DRM_CURRENTPID);
1006 retcode = EINVAL;
1007 break;
1009 drm_free_buffer(dev, buf);
1011 DRM_SPINUNLOCK(&dev->dma_lock);
1013 return retcode;
1016 int drm_mapbufs(drm_device_t *dev, void *data, struct drm_file *file_priv)
1018 drm_device_dma_t *dma = dev->dma;
1019 int retcode = 0;
1020 const int zero = 0;
1021 vm_offset_t address;
1022 struct vmspace *vms;
1023 #if defined(__FreeBSD__) || defined(__DragonFly__)
1024 vm_ooffset_t foff;
1025 vm_size_t size;
1026 vm_offset_t vaddr;
1027 #elif defined(__NetBSD__) || defined(__OpenBSD__)
1028 struct vnode *vn;
1029 voff_t foff;
1030 vsize_t size;
1031 vaddr_t vaddr;
1032 #endif /* __NetBSD__ || __OpenBSD__ */
1034 drm_buf_map_t *request = data;
1035 int i;
1037 #if defined(__NetBSD__) || defined(__OpenBSD__)
1038 if (!vfinddev(kdev, VCHR, &vn))
1039 return 0; /* FIXME: Shouldn't this be EINVAL or something? */
1040 #endif /* __NetBSD__ || __OpenBSD */
1042 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 || defined(__DragonFly__)
1043 vms = DRM_CURPROC->td_proc->p_vmspace;
1044 #else
1045 vms = DRM_CURPROC->p_vmspace;
1046 #endif
1048 DRM_SPINLOCK(&dev->dma_lock);
1049 dev->buf_use++; /* Can't allocate more after this call */
1050 DRM_SPINUNLOCK(&dev->dma_lock);
1052 if (request->count < dma->buf_count)
1053 goto done;
1055 if ((dev->driver.use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
1056 (dev->driver.use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
1057 drm_local_map_t *map = dev->agp_buffer_map;
1059 if (map == NULL) {
1060 retcode = EINVAL;
1061 goto done;
1063 size = round_page(map->size);
1064 foff = map->offset;
1065 } else {
1066 size = round_page(dma->byte_count),
1067 foff = 0;
1070 #if defined(__FreeBSD__) || defined(__DragonFly__)
1071 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1072 #if __FreeBSD_version >= 600023
1073 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1074 VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, dev->devnode, foff);
1075 #else
1076 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1077 VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&dev->devnode->si_hlist),
1078 foff);
1079 #endif
1080 #elif defined(__NetBSD__) || defined(__OpenBSD__)
1081 vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
1082 retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
1083 UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
1084 &vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1085 #endif /* __NetBSD__ || __OpenBSD */
1086 if (retcode)
1087 goto done;
1089 request->virtual = (void *)vaddr;
1091 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1092 if (DRM_COPY_TO_USER(&request->list[i].idx,
1093 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1094 retcode = EFAULT;
1095 goto done;
1097 if (DRM_COPY_TO_USER(&request->list[i].total,
1098 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1099 retcode = EFAULT;
1100 goto done;
1102 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1103 sizeof(zero))) {
1104 retcode = EFAULT;
1105 goto done;
1107 address = vaddr + dma->buflist[i]->offset; /* *** */
1108 if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1109 sizeof(address))) {
1110 retcode = EFAULT;
1111 goto done;
1115 done:
1116 request->count = dma->buf_count;
1118 DRM_DEBUG( "%d buffers, retcode = %d\n", request->count, retcode );
1120 return retcode;