3 * Header for the Direct Rendering Manager
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
7 * \par Acknowledgments:
8 * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All rights reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
35 * $DragonFly: src/sys/dev/drm/drm.h,v 1.4 2008/04/05 18:12:29 hasso Exp $
41 * The Direct Rendering Manager (DRM) is a device-independent kernel-level
42 * device driver that provides support for the XFree86 Direct Rendering
43 * Infrastructure (DRI).
45 * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
47 * -# The DRM provides synchronized access to the graphics hardware via
48 * the use of an optimized two-tiered lock.
49 * -# The DRM enforces the DRI security policy for access to the graphics
50 * hardware by only allowing authenticated X11 clients access to
51 * restricted regions of memory.
52 * -# The DRM provides a generic DMA engine, complete with multiple
53 * queues and the ability to detect the need for an OpenGL context
55 * -# The DRM is extensible via the use of small device-specific modules
56 * that rely extensively on the API exported by the DRM module.
71 # define DEPRECATED __attribute__ ((deprecated))
76 #if defined(__linux__)
77 #include <asm/ioctl.h> /* For _IO* macros */
78 #define DRM_IOCTL_NR(n) _IOC_NR(n)
79 #define DRM_IOC_VOID _IOC_NONE
80 #define DRM_IOC_READ _IOC_READ
81 #define DRM_IOC_WRITE _IOC_WRITE
82 #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
83 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
84 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
85 #include <sys/ioccom.h>
86 #define DRM_IOCTL_NR(n) ((n) & 0xff)
87 #define DRM_IOC_VOID IOC_VOID
88 #define DRM_IOC_READ IOC_OUT
89 #define DRM_IOC_WRITE IOC_IN
90 #define DRM_IOC_READWRITE IOC_INOUT
91 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
97 #if defined(__linux__) || defined(__NetBSD__)
100 #define DRM_MAX_MINOR 15
102 #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
103 #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
104 #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
105 #define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
107 #define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
108 #define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
109 #define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
110 #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
111 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
113 #if defined(__linux__)
114 typedef unsigned int drm_handle_t
;
116 #include <sys/types.h>
117 typedef unsigned long drm_handle_t
; /**< To mapped regions */
119 typedef unsigned int drm_context_t
; /**< GLXContext handle */
120 typedef unsigned int drm_drawable_t
;
121 typedef unsigned int drm_magic_t
; /**< Magic for authentication */
126 * \warning If you change this structure, make sure you change
127 * XF86DRIClipRectRec in the server as well
129 * \note KW: Actually it's illegal to change either for
130 * backwards-compatibility reasons.
132 struct drm_clip_rect
{
142 struct drm_tex_region
{
145 unsigned char in_use
;
146 unsigned char padding
;
153 * The lock structure is a simple cache-line aligned integer. To avoid
154 * processor bus contention on a multiprocessor system, there should not be any
155 * other data stored in the same cache line.
158 __volatile__
unsigned int lock
; /**< lock variable */
159 char padding
[60]; /**< Pad to cache line */
162 /* This is beyond ugly, and only works on GCC. However, it allows me to use
163 * drm.h in places (i.e., in the X-server) where I can't use size_t. The real
164 * fix is to use uint32_t instead of size_t, but that fix will break existing
165 * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems. That *will*
166 * eventually happen, though. I chose 'unsigned long' to be the fallback type
167 * because that works on all the platforms I know about. Hopefully, the
168 * real fix will happen before that bites us.
172 # define DRM_SIZE_T __SIZE_TYPE__
174 # warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!"
175 # define DRM_SIZE_T unsigned long
179 * DRM_IOCTL_VERSION ioctl argument type.
181 * \sa drmGetVersion().
184 int version_major
; /**< Major version */
185 int version_minor
; /**< Minor version */
186 int version_patchlevel
; /**< Patch level */
187 DRM_SIZE_T name_len
; /**< Length of name buffer */
188 char __user
*name
; /**< Name of driver */
189 DRM_SIZE_T date_len
; /**< Length of date buffer */
190 char __user
*date
; /**< User-space buffer to hold date */
191 DRM_SIZE_T desc_len
; /**< Length of desc buffer */
192 char __user
*desc
; /**< User-space buffer to hold desc */
196 * DRM_IOCTL_GET_UNIQUE ioctl argument type.
198 * \sa drmGetBusid() and drmSetBusId().
201 DRM_SIZE_T unique_len
; /**< Length of unique */
202 char __user
*unique
; /**< Unique name for driver instantiation */
208 int count
; /**< Length of user-space structures */
209 struct drm_version __user
*version
;
217 * DRM_IOCTL_CONTROL ioctl argument type.
219 * \sa drmCtlInstHandler() and drmCtlUninstHandler().
232 * Type of memory to map.
235 _DRM_FRAME_BUFFER
= 0, /**< WC (no caching), no core dump */
236 _DRM_REGISTERS
= 1, /**< no caching, no core dump */
237 _DRM_SHM
= 2, /**< shared, cached */
238 _DRM_AGP
= 3, /**< AGP/GART */
239 _DRM_SCATTER_GATHER
= 4, /**< Scatter/gather memory for PCI DMA */
240 _DRM_CONSISTENT
= 5, /**< Consistent memory for PCI DMA */
245 * Memory mapping flags.
248 _DRM_RESTRICTED
= 0x01, /**< Cannot be mapped to user-virtual */
249 _DRM_READ_ONLY
= 0x02,
250 _DRM_LOCKED
= 0x04, /**< shared, cached, locked */
251 _DRM_KERNEL
= 0x08, /**< kernel requires access */
252 _DRM_WRITE_COMBINING
= 0x10, /**< use write-combining if available */
253 _DRM_CONTAINS_LOCK
= 0x20, /**< SHM page that contains lock */
254 _DRM_REMOVABLE
= 0x40, /**< Removable mapping */
255 _DRM_DRIVER
= 0x80 /**< Managed by driver */
258 struct drm_ctx_priv_map
{
259 unsigned int ctx_id
; /**< Context requesting private mapping */
260 void *handle
; /**< Handle of map */
264 * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
270 unsigned long offset
; /**< Requested physical address (0 for SAREA)*/
271 unsigned long size
; /**< Requested physical size (bytes) */
272 enum drm_map_type type
; /**< Type of memory to map */
273 enum drm_map_flags flags
; /**< Flags */
274 void *handle
; /**< User-space: "Handle" to pass to mmap() */
275 /**< Kernel-space: kernel-virtual address */
276 int mtrr
; /**< MTRR slot used */
281 * DRM_IOCTL_GET_CLIENT ioctl argument type.
284 int idx
; /**< Which client desired? */
285 int auth
; /**< Is client authenticated? */
286 unsigned long pid
; /**< Process ID */
287 unsigned long uid
; /**< User ID */
288 unsigned long magic
; /**< Magic */
289 unsigned long iocs
; /**< Ioctl count */
299 _DRM_STAT_VALUE
, /**< Generic value */
300 _DRM_STAT_BYTE
, /**< Generic byte counter (1024bytes/K) */
301 _DRM_STAT_COUNT
, /**< Generic non-byte counter (1000/k) */
303 _DRM_STAT_IRQ
, /**< IRQ */
304 _DRM_STAT_PRIMARY
, /**< Primary DMA bytes */
305 _DRM_STAT_SECONDARY
, /**< Secondary DMA bytes */
306 _DRM_STAT_DMA
, /**< DMA */
307 _DRM_STAT_SPECIAL
, /**< Special DMA (e.g., priority or polled) */
308 _DRM_STAT_MISSED
/**< Missed DMA opportunity */
309 /* Add to the *END* of the list */
313 * DRM_IOCTL_GET_STATS ioctl argument type.
319 enum drm_stat_type type
;
324 * Hardware locking flags.
326 enum drm_lock_flags
{
327 _DRM_LOCK_READY
= 0x01, /**< Wait until hardware is ready for DMA */
328 _DRM_LOCK_QUIESCENT
= 0x02, /**< Wait until hardware quiescent */
329 _DRM_LOCK_FLUSH
= 0x04, /**< Flush this context's DMA queue first */
330 _DRM_LOCK_FLUSH_ALL
= 0x08, /**< Flush all DMA queues first */
331 /* These *HALT* flags aren't supported yet
332 -- they will be used to support the
333 full-screen DGA-like mode. */
334 _DRM_HALT_ALL_QUEUES
= 0x10, /**< Halt all current and future queues */
335 _DRM_HALT_CUR_QUEUES
= 0x20 /**< Halt all current queues */
339 * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
341 * \sa drmGetLock() and drmUnlock().
345 enum drm_lock_flags flags
;
352 * These values \e must match xf86drm.h.
357 /* Flags for DMA buffer dispatch */
358 _DRM_DMA_BLOCK
= 0x01, /**<
359 * Block until buffer dispatched.
361 * \note The buffer may not yet have
362 * been processed by the hardware --
363 * getting a hardware lock with the
364 * hardware quiescent will ensure
365 * that the buffer has been
368 _DRM_DMA_WHILE_LOCKED
= 0x02, /**< Dispatch while lock held */
369 _DRM_DMA_PRIORITY
= 0x04, /**< High priority dispatch */
371 /* Flags for DMA buffer request */
372 _DRM_DMA_WAIT
= 0x10, /**< Wait for free buffers */
373 _DRM_DMA_SMALLER_OK
= 0x20, /**< Smaller-than-requested buffers OK */
374 _DRM_DMA_LARGER_OK
= 0x40 /**< Larger-than-requested buffers OK */
378 * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
382 struct drm_buf_desc
{
383 int count
; /**< Number of buffers of this size */
384 int size
; /**< Size in bytes */
385 int low_mark
; /**< Low water mark */
386 int high_mark
; /**< High water mark */
388 _DRM_PAGE_ALIGN
= 0x01, /**< Align on page boundaries for DMA */
389 _DRM_AGP_BUFFER
= 0x02, /**< Buffer is in AGP space */
390 _DRM_SG_BUFFER
= 0x04, /**< Scatter/gather memory buffer */
391 _DRM_FB_BUFFER
= 0x08, /**< Buffer is in frame buffer */
392 _DRM_PCI_BUFFER_RO
= 0x10 /**< Map PCI DMA buffer read-only */
394 unsigned long agp_start
; /**<
395 * Start address of where the AGP buffers are
396 * in the AGP aperture
401 * DRM_IOCTL_INFO_BUFS ioctl argument type.
403 struct drm_buf_info
{
404 int count
; /**< Number of buffers described in list */
405 struct drm_buf_desc __user
*list
; /**< List of buffer descriptions */
409 * DRM_IOCTL_FREE_BUFS ioctl argument type.
411 struct drm_buf_free
{
422 int idx
; /**< Index into the master buffer list */
423 int total
; /**< Buffer size */
424 int used
; /**< Amount of buffer in use (for DMA) */
425 void __user
*address
; /**< Address of buffer */
429 * DRM_IOCTL_MAP_BUFS ioctl argument type.
432 int count
; /**< Length of the buffer list */
433 #if defined(__cplusplus)
434 void __user
*c_virtual
;
436 void __user
*virtual; /**< Mmap'd area in user-virtual */
438 struct drm_buf_pub __user
*list
; /**< Buffer information */
442 * DRM_IOCTL_DMA ioctl argument type.
444 * Indices here refer to the offset into the buffer list in drm_buf_get.
449 int context
; /**< Context handle */
450 int send_count
; /**< Number of buffers to send */
451 int __user
*send_indices
; /**< List of handles to buffers */
452 int __user
*send_sizes
; /**< Lengths of data to send */
453 enum drm_dma_flags flags
; /**< Flags */
454 int request_count
; /**< Number of buffers requested */
455 int request_size
; /**< Desired size for buffers */
456 int __user
*request_indices
; /**< Buffer information */
457 int __user
*request_sizes
;
458 int granted_count
; /**< Number of buffers granted */
462 _DRM_CONTEXT_PRESERVED
= 0x01,
463 _DRM_CONTEXT_2DONLY
= 0x02
467 * DRM_IOCTL_ADD_CTX ioctl argument type.
469 * \sa drmCreateContext() and drmDestroyContext().
472 drm_context_t handle
;
473 enum drm_ctx_flags flags
;
477 * DRM_IOCTL_RES_CTX ioctl argument type.
481 struct drm_ctx __user
*contexts
;
485 * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
488 drm_drawable_t handle
;
492 * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
495 DRM_DRAWABLE_CLIPRECTS
,
496 } drm_drawable_info_type_t
;
498 struct drm_update_draw
{
499 drm_drawable_t handle
;
502 unsigned long long data
;
506 * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
513 * DRM_IOCTL_IRQ_BUSID ioctl argument type.
515 * \sa drmGetInterruptFromBusID().
517 struct drm_irq_busid
{
518 int irq
; /**< IRQ number */
519 int busnum
; /**< bus number */
520 int devnum
; /**< device number */
521 int funcnum
; /**< function number */
524 enum drm_vblank_seq_type
{
525 _DRM_VBLANK_ABSOLUTE
= 0x0, /**< Wait for specific vblank sequence number */
526 _DRM_VBLANK_RELATIVE
= 0x1, /**< Wait for given number of vblanks */
527 _DRM_VBLANK_FLIP
= 0x8000000, /**< Scheduled buffer swap should flip */
528 _DRM_VBLANK_NEXTONMISS
= 0x10000000, /**< If missed, wait for next vblank */
529 _DRM_VBLANK_SECONDARY
= 0x20000000, /**< Secondary display controller */
530 _DRM_VBLANK_SIGNAL
= 0x40000000 /**< Send signal instead of blocking */
533 #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
534 #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
535 _DRM_VBLANK_NEXTONMISS)
537 struct drm_wait_vblank_request
{
538 enum drm_vblank_seq_type type
;
539 unsigned int sequence
;
540 unsigned long signal
;
543 struct drm_wait_vblank_reply
{
544 enum drm_vblank_seq_type type
;
545 unsigned int sequence
;
551 * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
553 * \sa drmWaitVBlank().
555 union drm_wait_vblank
{
556 struct drm_wait_vblank_request request
;
557 struct drm_wait_vblank_reply reply
;
561 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
563 * \sa drmAgpEnable().
565 struct drm_agp_mode
{
566 unsigned long mode
; /**< AGP mode */
570 * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
572 * \sa drmAgpAlloc() and drmAgpFree().
574 struct drm_agp_buffer
{
575 unsigned long size
; /**< In bytes -- will round to page boundary */
576 unsigned long handle
; /**< Used for binding / unbinding */
577 unsigned long type
; /**< Type of memory to allocate */
578 unsigned long physical
; /**< Physical used by i810 */
582 * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
584 * \sa drmAgpBind() and drmAgpUnbind().
586 struct drm_agp_binding
{
587 unsigned long handle
; /**< From drm_agp_buffer */
588 unsigned long offset
; /**< In bytes -- will round to page boundary */
592 * DRM_IOCTL_AGP_INFO ioctl argument type.
594 * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
595 * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
596 * drmAgpVendorId() and drmAgpDeviceId().
598 struct drm_agp_info
{
599 int agp_version_major
;
600 int agp_version_minor
;
602 unsigned long aperture_base
; /**< physical address */
603 unsigned long aperture_size
; /**< bytes */
604 unsigned long memory_allowed
; /**< bytes */
605 unsigned long memory_used
;
607 /** \name PCI information */
609 unsigned short id_vendor
;
610 unsigned short id_device
;
615 * DRM_IOCTL_SG_ALLOC ioctl argument type.
617 struct drm_scatter_gather
{
618 unsigned long size
; /**< In bytes -- will round to page boundary */
619 unsigned long handle
; /**< Used for mapping / unmapping */
623 * DRM_IOCTL_SET_VERSION ioctl argument type.
625 struct drm_set_version
{
633 #define DRM_FENCE_FLAG_EMIT 0x00000001
634 #define DRM_FENCE_FLAG_SHAREABLE 0x00000002
635 #define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
636 #define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
637 #define DRM_FENCE_FLAG_NO_USER 0x00000010
639 /* Reserved for driver use */
640 #define DRM_FENCE_MASK_DRIVER 0xFF000000
642 #define DRM_FENCE_TYPE_EXE 0x00000001
644 struct drm_fence_arg
{
646 unsigned int fence_class
;
649 unsigned int signaled
;
651 unsigned int sequence
;
653 uint64_t expand_pad
[2]; /*Future expansion */
656 /* Buffer permissions, referring to how the GPU uses the buffers.
657 * these translate to fence types used for the buffers.
658 * Typically a texture buffer is read, A destination buffer is write and
659 * a command (batch-) buffer is exe. Can be or-ed together.
662 #define DRM_BO_FLAG_READ (1ULL << 0)
663 #define DRM_BO_FLAG_WRITE (1ULL << 1)
664 #define DRM_BO_FLAG_EXE (1ULL << 2)
667 * All of the bits related to access mode
669 #define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
671 * Status flags. Can be read to determine the actual state of a buffer.
672 * Can also be set in the buffer mask before validation.
676 * Mask: Never evict this buffer. Not even with force. This type of buffer is only
677 * available to root and must be manually removed before buffer manager shutdown
681 #define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
684 * Mask: Require that the buffer is placed in mappable memory when validated.
685 * If not set the buffer may or may not be in mappable memory when validated.
686 * Flags: If set, the buffer is in mappable memory.
688 #define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
690 /* Mask: The buffer should be shareable with other processes.
691 * Flags: The buffer is shareable with other processes.
693 #define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
695 /* Mask: If set, place the buffer in cache-coherent memory if available.
696 * If clear, never place the buffer in cache coherent memory if validated.
697 * Flags: The buffer is currently in cache-coherent memory.
699 #define DRM_BO_FLAG_CACHED (1ULL << 7)
701 /* Mask: Make sure that every time this buffer is validated,
702 * it ends up on the same location provided that the memory mask is the same.
703 * The buffer will also not be evicted when claiming space for
704 * other buffers. Basically a pinned buffer but it may be thrown out as
705 * part of buffer manager shutdown or locking.
706 * Flags: Acknowledge.
708 #define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
710 /* Mask: Make sure the buffer is in cached memory when mapped
711 * Flags: Acknowledge.
712 * Buffers allocated with this flag should not be used for suballocators
713 * This type may have issues on CPUs with over-aggressive caching
714 * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
716 #define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
719 /* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
720 * Flags: Acknowledge.
722 #define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
725 * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
726 * Flags: Acknowledge.
728 #define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
729 #define DRM_BO_FLAG_TILE (1ULL << 15)
732 * Memory type flags that can be or'ed together in the mask, but only
733 * one appears in flags.
737 #define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
738 /* Translation table memory */
739 #define DRM_BO_FLAG_MEM_TT (1ULL << 25)
741 #define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
742 /* Up to the driver to define. */
743 #define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
744 #define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
745 #define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
746 #define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
747 #define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
748 /* We can add more of these now with a 64-bit flag type */
751 * This is a mask covering all of the memory type flags; easier to just
752 * use a single constant than a bunch of | values. It covers
753 * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
755 #define DRM_BO_MASK_MEM 0x00000000FF000000ULL
757 * This adds all of the CPU-mapping options in with the memory
758 * type to label all bits which change how the page gets mapped
760 #define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
761 DRM_BO_FLAG_CACHED_MAPPED | \
762 DRM_BO_FLAG_CACHED | \
763 DRM_BO_FLAG_MAPPABLE)
765 /* Driver-private flags */
766 #define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
769 * Don't block on validate and map. Instead, return EBUSY.
771 #define DRM_BO_HINT_DONT_BLOCK 0x00000002
773 * Don't place this buffer on the unfenced list. This means
774 * that the buffer will not end up having a fence associated
775 * with it as a result of this operation
777 #define DRM_BO_HINT_DONT_FENCE 0x00000004
779 * Sleep while waiting for the operation to complete.
780 * Without this flag, the kernel will, instead, spin
781 * until this operation has completed. I'm not sure
782 * why you would ever want this, so please always
783 * provide DRM_BO_HINT_WAIT_LAZY to any operation
786 #define DRM_BO_HINT_WAIT_LAZY 0x00000008
788 * The client has compute relocations refering to this buffer using the
789 * offset in the presumed_offset field. If that offset ends up matching
790 * where this buffer lands, the kernel is free to skip executing those
793 #define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
795 #define DRM_BO_INIT_MAGIC 0xfe769812
796 #define DRM_BO_INIT_MAJOR 1
797 #define DRM_BO_INIT_MINOR 0
798 #define DRM_BO_INIT_PATCH 0
801 struct drm_bo_info_req
{
806 unsigned int fence_class
;
807 unsigned int desired_tile_stride
;
808 unsigned int tile_info
;
810 uint64_t presumed_offset
;
813 struct drm_bo_create_req
{
816 uint64_t buffer_start
;
818 unsigned int page_alignment
;
826 #define DRM_BO_REP_BUSY 0x00000001
828 struct drm_bo_info_rep
{
830 uint64_t proposed_flags
;
834 uint64_t buffer_start
;
836 unsigned int fence_flags
;
837 unsigned int rep_flags
;
838 unsigned int page_alignment
;
839 unsigned int desired_tile_stride
;
840 unsigned int hw_tile_stride
;
841 unsigned int tile_info
;
843 uint64_t expand_pad
[4]; /*Future expansion */
846 struct drm_bo_arg_rep
{
847 struct drm_bo_info_rep bo_info
;
852 struct drm_bo_create_arg
{
854 struct drm_bo_create_req req
;
855 struct drm_bo_info_rep rep
;
859 struct drm_bo_handle_arg
{
863 struct drm_bo_reference_info_arg
{
865 struct drm_bo_handle_arg req
;
866 struct drm_bo_info_rep rep
;
870 struct drm_bo_map_wait_idle_arg
{
872 struct drm_bo_info_req req
;
873 struct drm_bo_info_rep rep
;
877 struct drm_bo_op_req
{
883 unsigned int arg_handle
;
884 struct drm_bo_info_req bo_req
;
888 struct drm_bo_op_arg
{
891 struct drm_bo_op_req req
;
892 struct drm_bo_arg_rep rep
;
899 #define DRM_BO_MEM_LOCAL 0
900 #define DRM_BO_MEM_TT 1
901 #define DRM_BO_MEM_VRAM 2
902 #define DRM_BO_MEM_PRIV0 3
903 #define DRM_BO_MEM_PRIV1 4
904 #define DRM_BO_MEM_PRIV2 5
905 #define DRM_BO_MEM_PRIV3 6
906 #define DRM_BO_MEM_PRIV4 7
908 #define DRM_BO_MEM_TYPES 8 /* For now. */
910 #define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
911 #define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
913 struct drm_bo_version_arg
{
919 struct drm_mm_type_arg
{
920 unsigned int mem_type
;
921 unsigned int lock_flags
;
924 struct drm_mm_init_arg
{
928 unsigned int mem_type
;
934 * \name Ioctls Definitions
938 #define DRM_IOCTL_BASE 'd'
939 #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
940 #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
941 #define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
942 #define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
944 #define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
945 #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
946 #define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
947 #define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
948 #define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
949 #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
950 #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
951 #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
953 #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
954 #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
955 #define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
956 #define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
957 #define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
958 #define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
959 #define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
960 #define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
961 #define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
962 #define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
963 #define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
965 #define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
967 #define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
968 #define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
970 #define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
971 #define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
972 #define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
973 #define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
974 #define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
975 #define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
976 #define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
977 #define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
978 #define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
979 #define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
980 #define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
981 #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
982 #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
984 #define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
985 #define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
986 #define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
987 #define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
988 #define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
989 #define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
990 #define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
991 #define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
993 #define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, struct drm_scatter_gather)
994 #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
996 #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
998 #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
1000 #define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
1001 #define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
1002 #define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
1003 #define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
1005 #define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
1006 #define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
1007 #define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
1008 #define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
1009 #define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
1010 #define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
1011 #define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
1012 #define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
1014 #define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
1015 #define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
1016 #define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
1017 #define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
1018 #define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
1019 #define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
1020 #define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
1021 #define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
1022 #define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
1028 * Device specific ioctls should only be in their respective headers
1029 * The device specific ioctl range is from 0x40 to 0x99.
1030 * Generic IOCTLS restart at 0xA0.
1032 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1033 * drmCommandReadWrite().
1035 #define DRM_COMMAND_BASE 0x40
1036 #define DRM_COMMAND_END 0xA0
1039 #if !defined(__KERNEL__) || defined(__FreeBSD__) || defined(__OpenBSD__) || \
1040 defined(__NetBSD__) || defined(__DragonFly__)
1041 typedef struct drm_clip_rect drm_clip_rect_t
;
1042 typedef struct drm_tex_region drm_tex_region_t
;
1043 typedef struct drm_hw_lock drm_hw_lock_t
;
1044 typedef struct drm_version drm_version_t
;
1045 typedef struct drm_unique drm_unique_t
;
1046 typedef struct drm_list drm_list_t
;
1047 typedef struct drm_block drm_block_t
;
1048 typedef struct drm_control drm_control_t
;
1049 typedef enum drm_map_type drm_map_type_t
;
1050 typedef enum drm_map_flags drm_map_flags_t
;
1051 typedef struct drm_ctx_priv_map drm_ctx_priv_map_t
;
1052 typedef struct drm_map drm_map_t
;
1053 typedef struct drm_client drm_client_t
;
1054 typedef enum drm_stat_type drm_stat_type_t
;
1055 typedef struct drm_stats drm_stats_t
;
1056 typedef enum drm_lock_flags drm_lock_flags_t
;
1057 typedef struct drm_lock drm_lock_t
;
1058 typedef enum drm_dma_flags drm_dma_flags_t
;
1059 typedef struct drm_buf_desc drm_buf_desc_t
;
1060 typedef struct drm_buf_info drm_buf_info_t
;
1061 typedef struct drm_buf_free drm_buf_free_t
;
1062 typedef struct drm_buf_pub drm_buf_pub_t
;
1063 typedef struct drm_buf_map drm_buf_map_t
;
1064 typedef struct drm_dma drm_dma_t
;
1065 typedef union drm_wait_vblank drm_wait_vblank_t
;
1066 typedef struct drm_agp_mode drm_agp_mode_t
;
1067 typedef enum drm_ctx_flags drm_ctx_flags_t
;
1068 typedef struct drm_ctx drm_ctx_t
;
1069 typedef struct drm_ctx_res drm_ctx_res_t
;
1070 typedef struct drm_draw drm_draw_t
;
1071 typedef struct drm_update_draw drm_update_draw_t
;
1072 typedef struct drm_auth drm_auth_t
;
1073 typedef struct drm_irq_busid drm_irq_busid_t
;
1074 typedef enum drm_vblank_seq_type drm_vblank_seq_type_t
;
1075 typedef struct drm_agp_buffer drm_agp_buffer_t
;
1076 typedef struct drm_agp_binding drm_agp_binding_t
;
1077 typedef struct drm_agp_info drm_agp_info_t
;
1078 typedef struct drm_scatter_gather drm_scatter_gather_t
;
1079 typedef struct drm_set_version drm_set_version_t
;
1081 typedef struct drm_fence_arg drm_fence_arg_t
;
1082 typedef struct drm_mm_type_arg drm_mm_type_arg_t
;
1083 typedef struct drm_mm_init_arg drm_mm_init_arg_t
;
1084 typedef enum drm_bo_type drm_bo_type_t
;