kernel: remove unused lseek32,getrlimit32,setrlimit32,getdents32,smmap32
[unleashed.git] / usr / src / uts / common / io / drm / drm_bufs.c
blob517acbbb0a86a3e5d1939fd9794428aa9919fe1b
1 /*
2 * drm_bufs.h -- Generic buffer template -*- linux-c -*-
3 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
4 */
5 /*
6 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
7 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Copyright (c) 2009, Intel Corporation.
9 * All Rights Reserved.
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
30 * Authors:
31 * Rickard E. (Rik) Faith <faith@valinux.com>
32 * Gareth Hughes <gareth@valinux.com>
37 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
38 * Use is subject to license terms.
41 #include "drmP.h"
42 #include <sys/gfx_private.h>
43 #include "drm_io32.h"
46 #define PAGE_MASK (PAGE_SIZE-1)
47 #define round_page(x) (((x) + PAGE_MASK) & ~PAGE_MASK)
50 * Compute order. Can be made faster.
52 int
53 drm_order(unsigned long size)
55 int order = 0;
56 unsigned long tmp = size;
58 while (tmp >>= 1)
59 order ++;
61 if (size & ~(1 << order))
62 ++order;
64 return (order);
67 static inline drm_local_map_t *
68 drm_find_map(drm_device_t *dev, uoff_t offset, int type)
70 drm_local_map_t *map;
72 TAILQ_FOREACH(map, &dev->maplist, link) {
73 if ((map->type == type) && ((map->offset == offset) ||
74 (map->flags == _DRM_CONTAINS_LOCK) &&
75 (map->type == _DRM_SHM)))
76 return (map);
79 return (NULL);
82 int drm_addmap(drm_device_t *dev, unsigned long offset,
83 unsigned long size, drm_map_type_t type,
84 drm_map_flags_t flags, drm_local_map_t **map_ptr)
86 drm_local_map_t *map;
87 caddr_t kva;
88 int retval;
91 * Only allow shared memory to be removable since we only keep
92 * enough book keeping information about shared memory to allow
93 * for removal when processes fork.
95 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM)
96 return (EINVAL);
97 if ((offset & PAGE_MASK) || (size & PAGE_MASK))
98 return (EINVAL);
99 if (offset + size < offset)
100 return (EINVAL);
103 * Check if this is just another version of a kernel-allocated
104 * map, and just hand that back if so.
106 map = drm_find_map(dev, offset, type);
107 if (map != NULL) {
108 goto done;
112 * Allocate a new map structure, fill it in, and do any
113 * type-specific initialization necessary.
115 map = drm_alloc(sizeof (*map), DRM_MEM_MAPS);
116 if (!map)
117 return (ENOMEM);
119 map->offset = offset;
120 map->size = size;
121 map->type = type;
122 map->flags = flags;
124 switch (map->type) {
125 case _DRM_REGISTERS:
126 case _DRM_FRAME_BUFFER:
127 retval = drm_ioremap(dev, map);
128 if (retval)
129 return (retval);
130 break;
132 case _DRM_SHM:
134 * ddi_umem_alloc() grants page-aligned memory. We needn't
135 * handle alignment issue here.
137 map->handle = ddi_umem_alloc(map->size,
138 DDI_UMEM_NOSLEEP, &map->drm_umem_cookie);
139 if (!map->handle) {
140 DRM_ERROR("drm_addmap: ddi_umem_alloc failed");
141 drm_free(map, sizeof (*map), DRM_MEM_MAPS);
142 return (ENOMEM);
145 * record only low 32-bit of this handle, since 32-bit
146 * user app is incapable of passing in 64bit offset when
147 * doing mmap.
149 map->offset = (uintptr_t)map->handle;
150 map->offset &= 0xffffffffUL;
151 if (map->flags & _DRM_CONTAINS_LOCK) {
152 /* Prevent a 2nd X Server from creating a 2nd lock */
153 if (dev->lock.hw_lock != NULL) {
154 ddi_umem_free(map->drm_umem_cookie);
155 drm_free(map, sizeof (*map), DRM_MEM_MAPS);
156 return (EBUSY);
158 dev->lock.hw_lock = map->handle; /* Pointer to lock */
160 map->dev_addr = map->handle;
161 break;
162 case _DRM_SCATTER_GATHER:
163 if (!dev->sg) {
164 drm_free(map, sizeof (*map), DRM_MEM_MAPS);
165 return (EINVAL);
167 map->offset += (uintptr_t)dev->sg->virtual;
168 map->handle = (void *)(uintptr_t)map->offset;
169 map->dev_addr = dev->sg->virtual;
170 map->dev_handle = dev->sg->dmah_sg->acc_hdl;
171 break;
173 case _DRM_CONSISTENT:
174 DRM_ERROR("%d DRM_AGP_CONSISTENT", __LINE__);
175 return (ENOTSUP);
176 case _DRM_AGP:
177 map->offset += dev->agp->base;
178 kva = gfxp_map_kernel_space(map->offset, map->size,
179 GFXP_MEMORY_WRITECOMBINED);
180 if (kva == 0) {
181 drm_free(map, sizeof (*map), DRM_MEM_MAPS);
182 cmn_err(CE_WARN,
183 "drm_addmap: failed to map AGP aperture");
184 return (ENOMEM);
186 map->handle = (void *)(uintptr_t)kva;
187 map->dev_addr = kva;
188 break;
189 default:
190 drm_free(map, sizeof (*map), DRM_MEM_MAPS);
191 return (EINVAL);
194 TAILQ_INSERT_TAIL(&dev->maplist, map, link);
196 done:
197 /* Jumped to, with lock held, when a kernel map is found. */
198 *map_ptr = map;
200 return (0);
203 /*ARGSUSED*/
205 drm_addmap_ioctl(DRM_IOCTL_ARGS)
207 drm_map_t request;
208 drm_local_map_t *map;
209 int err;
210 DRM_DEVICE;
212 #ifdef _MULTI_DATAMODEL
213 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
214 drm_map_32_t request32;
215 DRM_COPYFROM_WITH_RETURN(&request32,
216 (void *)data, sizeof (request32));
217 request.offset = request32.offset;
218 request.size = request32.size;
219 request.type = request32.type;
220 request.flags = request32.flags;
221 request.mtrr = request32.mtrr;
222 } else
223 #endif
224 DRM_COPYFROM_WITH_RETURN(&request,
225 (void *)data, sizeof (request));
227 err = drm_addmap(dev, request.offset, request.size, request.type,
228 request.flags, &map);
230 if (err != 0)
231 return (err);
233 request.offset = map->offset;
234 request.size = map->size;
235 request.type = map->type;
236 request.flags = map->flags;
237 request.mtrr = map->mtrr;
238 request.handle = (uintptr_t)map->handle;
240 #ifdef _MULTI_DATAMODEL
241 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
242 drm_map_32_t request32;
243 request32.offset = request.offset;
244 request32.size = (uint32_t)request.size;
245 request32.type = request.type;
246 request32.flags = request.flags;
247 request32.handle = request.handle;
248 request32.mtrr = request.mtrr;
249 DRM_COPYTO_WITH_RETURN((void *)data,
250 &request32, sizeof (request32));
251 } else
252 #endif
253 DRM_COPYTO_WITH_RETURN((void *)data,
254 &request, sizeof (request));
256 return (0);
259 void
260 drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
262 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
264 TAILQ_REMOVE(&dev->maplist, map, link);
266 switch (map->type) {
267 case _DRM_REGISTERS:
268 drm_ioremapfree(map);
269 break;
270 /* FALLTHROUGH */
271 case _DRM_FRAME_BUFFER:
272 drm_ioremapfree(map);
273 break;
274 case _DRM_SHM:
275 ddi_umem_free(map->drm_umem_cookie);
276 break;
277 case _DRM_AGP:
279 * we mapped AGP aperture into kernel space in drm_addmap,
280 * here, unmap them and release kernel virtual address space
282 gfxp_unmap_kernel_space(map->dev_addr, map->size);
283 break;
285 case _DRM_SCATTER_GATHER:
286 break;
287 case _DRM_CONSISTENT:
288 break;
289 default:
290 break;
293 drm_free(map, sizeof (*map), DRM_MEM_MAPS);
297 * Remove a map private from list and deallocate resources if the
298 * mapping isn't in use.
300 /*ARGSUSED*/
302 drm_rmmap_ioctl(DRM_IOCTL_ARGS)
304 DRM_DEVICE;
305 drm_local_map_t *map;
306 drm_map_t request;
308 #ifdef _MULTI_DATAMODEL
309 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
310 drm_map_32_t request32;
311 DRM_COPYFROM_WITH_RETURN(&request32,
312 (void *)data, sizeof (drm_map_32_t));
313 request.offset = request32.offset;
314 request.size = request32.size;
315 request.type = request32.type;
316 request.flags = request32.flags;
317 request.handle = request32.handle;
318 request.mtrr = request32.mtrr;
319 } else
320 #endif
321 DRM_COPYFROM_WITH_RETURN(&request,
322 (void *)data, sizeof (request));
324 DRM_LOCK();
325 TAILQ_FOREACH(map, &dev->maplist, link) {
326 if (((uintptr_t)map->handle == (request.handle & 0xffffffff)) &&
327 (map->flags & _DRM_REMOVABLE))
328 break;
331 /* No match found. */
332 if (map == NULL) {
333 DRM_UNLOCK();
334 return (EINVAL);
337 drm_rmmap(dev, map);
338 DRM_UNLOCK();
340 return (0);
343 /*ARGSUSED*/
344 static void
345 drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
347 int i;
349 if (entry->seg_count) {
350 for (i = 0; i < entry->seg_count; i++) {
351 if (entry->seglist[i]) {
352 DRM_ERROR(
353 "drm_cleanup_buf_error: not implemented");
356 drm_free(entry->seglist,
357 entry->seg_count *
358 sizeof (*entry->seglist), DRM_MEM_SEGS);
359 entry->seg_count = 0;
362 if (entry->buf_count) {
363 for (i = 0; i < entry->buf_count; i++) {
364 if (entry->buflist[i].dev_private) {
365 drm_free(entry->buflist[i].dev_private,
366 entry->buflist[i].dev_priv_size,
367 DRM_MEM_BUFS);
370 drm_free(entry->buflist,
371 entry->buf_count *
372 sizeof (*entry->buflist), DRM_MEM_BUFS);
373 entry->buflist = NULL;
374 entry->buf_count = 0;
378 /*ARGSUSED*/
380 drm_markbufs(DRM_IOCTL_ARGS)
382 DRM_DEBUG("drm_markbufs");
383 return (EINVAL);
386 /*ARGSUSED*/
388 drm_infobufs(DRM_IOCTL_ARGS)
390 DRM_DEBUG("drm_infobufs");
391 return (EINVAL);
394 static int
395 drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
397 drm_device_dma_t *dma = dev->dma;
398 drm_buf_entry_t *entry;
399 drm_buf_t **temp_buflist;
400 drm_buf_t *buf;
401 unsigned long offset;
402 unsigned long agp_offset;
403 int count;
404 int order;
405 int size;
406 int alignment;
407 int page_order;
408 int byte_count;
409 int i;
411 if (!dma)
412 return (EINVAL);
414 count = request->count;
415 order = drm_order(request->size);
416 size = 1 << order;
418 alignment = (request->flags & _DRM_PAGE_ALIGN)
419 ? round_page(size) : size;
420 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
422 byte_count = 0;
423 agp_offset = dev->agp->base + request->agp_start;
425 entry = &dma->bufs[order];
427 /* No more than one allocation per order */
428 if (entry->buf_count) {
429 return (ENOMEM);
432 entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
433 DRM_MEM_BUFS);
434 if (!entry->buflist) {
435 return (ENOMEM);
437 entry->buf_size = size;
438 entry->page_order = page_order;
440 offset = 0;
442 while (entry->buf_count < count) {
443 buf = &entry->buflist[entry->buf_count];
444 buf->idx = dma->buf_count + entry->buf_count;
445 buf->total = alignment;
446 buf->order = order;
447 buf->used = 0;
449 buf->offset = (dma->byte_count + offset);
450 buf->bus_address = agp_offset + offset;
451 buf->address = (void *)(agp_offset + offset);
452 buf->next = NULL;
453 buf->pending = 0;
454 buf->filp = NULL;
456 buf->dev_priv_size = dev->driver->buf_priv_size;
457 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
458 if (buf->dev_private == NULL) {
459 /* Set count correctly so we free the proper amount. */
460 entry->buf_count = count;
461 drm_cleanup_buf_error(dev, entry);
462 return (ENOMEM);
465 offset += alignment;
466 entry->buf_count++;
467 byte_count += PAGE_SIZE << page_order;
470 temp_buflist = drm_alloc(
471 (dma->buf_count + entry->buf_count) * sizeof (*dma->buflist),
472 DRM_MEM_BUFS);
474 if (temp_buflist == NULL) {
475 /* Free the entry because it isn't valid */
476 drm_cleanup_buf_error(dev, entry);
477 DRM_ERROR(" temp_buflist is NULL");
478 return (ENOMEM);
481 bcopy(temp_buflist, dma->buflist,
482 dma->buf_count * sizeof (*dma->buflist));
483 kmem_free(dma->buflist, dma->buf_count *sizeof (*dma->buflist));
484 dma->buflist = temp_buflist;
486 for (i = 0; i < entry->buf_count; i++) {
487 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
490 dma->buf_count += entry->buf_count;
491 dma->byte_count += byte_count;
492 dma->seg_count += entry->seg_count;
493 dma->page_count += byte_count >> PAGE_SHIFT;
495 request->count = entry->buf_count;
496 request->size = size;
498 dma->flags = _DRM_DMA_USE_AGP;
500 return (0);
503 static int
504 drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
506 drm_device_dma_t *dma = dev->dma;
507 drm_buf_entry_t *entry;
508 drm_buf_t *buf;
509 unsigned long offset;
510 unsigned long agp_offset;
511 int count;
512 int order;
513 int size;
514 int alignment;
515 int page_order;
516 int byte_count;
517 int i;
518 drm_buf_t **temp_buflist;
520 count = request->count;
521 order = drm_order(request->size);
522 size = 1 << order;
524 alignment = (request->flags & _DRM_PAGE_ALIGN)
525 ? round_page(size) : size;
526 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
528 byte_count = 0;
529 agp_offset = request->agp_start;
530 entry = &dma->bufs[order];
532 entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
533 DRM_MEM_BUFS);
534 if (entry->buflist == NULL)
535 return (ENOMEM);
537 entry->buf_size = size;
538 entry->page_order = page_order;
540 offset = 0;
542 while (entry->buf_count < count) {
543 buf = &entry->buflist[entry->buf_count];
544 buf->idx = dma->buf_count + entry->buf_count;
545 buf->total = alignment;
546 buf->order = order;
547 buf->used = 0;
549 buf->offset = (dma->byte_count + offset);
550 buf->bus_address = agp_offset + offset;
551 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
552 buf->next = NULL;
553 buf->pending = 0;
554 buf->filp = NULL;
556 buf->dev_priv_size = dev->driver->buf_priv_size;
557 buf->dev_private = drm_alloc(buf->dev_priv_size,
558 DRM_MEM_BUFS);
559 if (buf->dev_private == NULL) {
560 /* Set count correctly so we free the proper amount. */
561 entry->buf_count = count;
562 drm_cleanup_buf_error(dev, entry);
563 return (ENOMEM);
566 offset += alignment;
567 entry->buf_count++;
568 byte_count += PAGE_SIZE << page_order;
571 temp_buflist = drm_realloc(dma->buflist,
572 dma->buf_count * sizeof (*dma->buflist),
573 (dma->buf_count + entry->buf_count)
574 * sizeof (*dma->buflist), DRM_MEM_BUFS);
575 if (!temp_buflist) {
576 drm_cleanup_buf_error(dev, entry);
577 return (ENOMEM);
579 dma->buflist = temp_buflist;
581 for (i = 0; i < entry->buf_count; i++) {
582 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
585 dma->buf_count += entry->buf_count;
586 dma->byte_count += byte_count;
587 request->count = entry->buf_count;
588 request->size = size;
589 dma->flags = _DRM_DMA_USE_SG;
591 return (0);
595 drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
597 int order, ret;
599 DRM_SPINLOCK(&dev->dma_lock);
601 if (request->count < 0 || request->count > 4096) {
602 DRM_SPINLOCK(&dev->dma_lock);
603 return (EINVAL);
606 order = drm_order(request->size);
607 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
608 DRM_SPINLOCK(&dev->dma_lock);
609 return (EINVAL);
612 /* No more allocations after first buffer-using ioctl. */
613 if (dev->buf_use != 0) {
614 DRM_SPINUNLOCK(&dev->dma_lock);
615 return (EBUSY);
617 /* No more than one allocation per order */
618 if (dev->dma->bufs[order].buf_count != 0) {
619 DRM_SPINUNLOCK(&dev->dma_lock);
620 return (ENOMEM);
623 ret = drm_do_addbufs_agp(dev, request);
625 DRM_SPINUNLOCK(&dev->dma_lock);
627 return (ret);
631 drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
633 int order, ret;
635 DRM_SPINLOCK(&dev->dma_lock);
637 if (request->count < 0 || request->count > 4096) {
638 DRM_SPINUNLOCK(&dev->dma_lock);
639 return (EINVAL);
642 order = drm_order(request->size);
643 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
644 DRM_SPINUNLOCK(&dev->dma_lock);
645 return (EINVAL);
648 /* No more allocations after first buffer-using ioctl. */
649 if (dev->buf_use != 0) {
650 DRM_SPINUNLOCK(&dev->dma_lock);
651 return (EBUSY);
654 /* No more than one allocation per order */
655 if (dev->dma->bufs[order].buf_count != 0) {
656 DRM_SPINUNLOCK(&dev->dma_lock);
657 return (ENOMEM);
660 ret = drm_do_addbufs_sg(dev, request);
661 DRM_SPINUNLOCK(&dev->dma_lock);
662 return (ret);
665 /*ARGSUSED*/
667 drm_addbufs_ioctl(DRM_IOCTL_ARGS)
669 DRM_DEVICE;
670 drm_buf_desc_t request;
671 int err;
673 #ifdef _MULTI_DATAMODEL
674 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
675 drm_buf_desc_32_t request32;
676 DRM_COPYFROM_WITH_RETURN(&request32,
677 (void *)data, sizeof (request32));
678 request.count = request32.count;
679 request.size = request32.size;
680 request.low_mark = request32.low_mark;
681 request.high_mark = request32.high_mark;
682 request.flags = request32.flags;
683 request.agp_start = request32.agp_start;
684 } else
685 #endif
686 DRM_COPYFROM_WITH_RETURN(&request,
687 (void *)data, sizeof (request));
689 if (request.flags & _DRM_AGP_BUFFER)
690 err = drm_addbufs_agp(dev, &request);
691 else if (request.flags & _DRM_SG_BUFFER)
692 err = drm_addbufs_sg(dev, &request);
694 #ifdef _MULTI_DATAMODEL
695 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
696 drm_buf_desc_32_t request32;
697 request32.count = request.count;
698 request32.size = request.size;
699 request32.low_mark = request.low_mark;
700 request32.high_mark = request.high_mark;
701 request32.flags = request.flags;
702 request32.agp_start = (uint32_t)request.agp_start;
703 DRM_COPYTO_WITH_RETURN((void *)data,
704 &request32, sizeof (request32));
705 } else
706 #endif
707 DRM_COPYTO_WITH_RETURN((void *)data,
708 &request, sizeof (request));
710 return (err);
713 /*ARGSUSED*/
715 drm_freebufs(DRM_IOCTL_ARGS)
717 DRM_DEVICE;
718 drm_device_dma_t *dma = dev->dma;
719 drm_buf_free_t request;
720 int i;
721 int idx;
722 drm_buf_t *buf;
723 int retcode = 0;
725 #ifdef _MULTI_DATAMODEL
726 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
727 drm_buf_free_32_t request32;
728 DRM_COPYFROM_WITH_RETURN(&request32,
729 (void*)data, sizeof (request32));
730 request.count = request32.count;
731 request.list = (int *)(uintptr_t)request32.list;
732 } else
733 #endif
734 DRM_COPYFROM_WITH_RETURN(&request,
735 (void *)data, sizeof (request));
737 for (i = 0; i < request.count; i++) {
738 if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof (idx))) {
739 retcode = EFAULT;
740 break;
742 if (idx < 0 || idx >= dma->buf_count) {
743 DRM_ERROR("drm_freebufs: Index %d (of %d max)\n",
744 idx, dma->buf_count - 1);
745 retcode = EINVAL;
746 break;
748 buf = dma->buflist[idx];
749 if (buf->filp != fpriv) {
750 DRM_ERROR(
751 "drm_freebufs: process %d not owning the buffer.\n",
752 DRM_CURRENTPID);
753 retcode = EINVAL;
754 break;
756 drm_free_buffer(dev, buf);
759 return (retcode);
762 #ifdef _LP64
763 extern caddr_t smmap64(caddr_t, size_t, int, int, int, off_t);
764 #define drm_smmap smmap64
765 #else
766 #error "No define for _LP64"
767 #endif
770 /*ARGSUSED*/
772 drm_mapbufs(DRM_IOCTL_ARGS)
774 DRM_DEVICE;
775 drm_buf_map_t request;
776 const int zero = 0;
777 unsigned long vaddr;
778 unsigned long address;
779 drm_device_dma_t *dma = dev->dma;
780 uint_t size;
781 uint_t foff;
782 int ret_tmp;
783 int i;
785 #ifdef _MULTI_DATAMODEL
786 drm_buf_map_32_t request32;
787 drm_buf_pub_32_t *list32;
788 uint_t address32;
790 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
791 DRM_COPYFROM_WITH_RETURN(&request32,
792 (void *)data, sizeof (request32));
793 request.count = request32.count;
794 request.virtual = (void *)(uintptr_t)request32.virtual;
795 request.list = (drm_buf_pub_t *)(uintptr_t)request32.list;
796 request.fd = request32.fd;
797 } else
798 #endif
799 DRM_COPYFROM_WITH_RETURN(&request,
800 (void *)data, sizeof (request));
802 dev->buf_use++;
804 if (request.count < dma->buf_count)
805 goto done;
807 if ((dev->driver->use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
808 (dev->driver->use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
809 drm_local_map_t *map = dev->agp_buffer_map;
810 if (map == NULL)
811 return (EINVAL);
812 size = round_page(map->size);
813 foff = (uintptr_t)map->handle;
814 } else {
815 size = round_page(dma->byte_count);
816 foff = 0;
818 request.virtual = drm_smmap(0, size, PROT_READ | PROT_WRITE,
819 MAP_SHARED, request.fd, foff);
820 if (request.virtual == NULL) {
821 DRM_ERROR("drm_mapbufs: request.virtual is NULL");
822 return (EINVAL);
825 vaddr = (unsigned long) request.virtual;
826 #ifdef _MULTI_DATAMODEL
827 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
828 list32 = (drm_buf_pub_32_t *)(uintptr_t)request32.list;
829 for (i = 0; i < dma->buf_count; i++) {
830 if (DRM_COPY_TO_USER(&list32[i].idx,
831 &dma->buflist[i]->idx, sizeof (list32[0].idx))) {
832 return (EFAULT);
834 if (DRM_COPY_TO_USER(&list32[i].total,
835 &dma->buflist[i]->total,
836 sizeof (list32[0].total))) {
837 return (EFAULT);
839 if (DRM_COPY_TO_USER(&list32[i].used,
840 &zero, sizeof (zero))) {
841 return (EFAULT);
843 address32 = vaddr + dma->buflist[i]->offset; /* *** */
844 ret_tmp = DRM_COPY_TO_USER(&list32[i].address,
845 &address32, sizeof (list32[0].address));
846 if (ret_tmp)
847 return (EFAULT);
849 goto done;
851 #endif
853 ASSERT(ddi_model_convert_from(mode & FMODELS) != DDI_MODEL_ILP32);
854 for (i = 0; i < dma->buf_count; i++) {
855 if (DRM_COPY_TO_USER(&request.list[i].idx,
856 &dma->buflist[i]->idx, sizeof (request.list[0].idx))) {
857 return (EFAULT);
859 if (DRM_COPY_TO_USER(&request.list[i].total,
860 &dma->buflist[i]->total, sizeof (request.list[0].total))) {
861 return (EFAULT);
863 if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
864 sizeof (zero))) {
865 return (EFAULT);
867 address = vaddr + dma->buflist[i]->offset; /* *** */
869 ret_tmp = DRM_COPY_TO_USER(&request.list[i].address,
870 &address, sizeof (address));
871 if (ret_tmp) {
872 return (EFAULT);
876 done:
877 #ifdef _MULTI_DATAMODEL
878 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
879 request32.count = dma->buf_count;
880 request32.virtual = (caddr32_t)(uintptr_t)request.virtual;
881 DRM_COPYTO_WITH_RETURN((void *)data,
882 &request32, sizeof (request32));
883 } else {
884 #endif
885 request.count = dma->buf_count;
886 DRM_COPYTO_WITH_RETURN((void *)data,
887 &request, sizeof (request));
888 #ifdef _MULTI_DATAMODEL
890 #endif
891 return (0);