2 * drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
3 * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
6 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
7 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Copyright (c) 2009, Intel Corporation.
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
31 * Rickard E. (Rik) Faith <faith@valinux.com>
32 * Gareth Hughes <gareth@valinux.com>
37 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
38 * Use is subject to license terms.
44 #include <sys/sysmacros.h>
45 #include <sys/types.h>
47 #include <sys/modctl.h>
50 #include <sys/cmn_err.h>
51 #include <sys/varargs.h>
54 #include <sys/sunddi.h>
55 #include <sys/sunldi.h>
57 #include <sys/agpgart.h>
59 #include "drm_atomic.h"
62 #include "drm_linux_list.h"
65 #define __inline__ inline
68 #if !defined(__FUNCTION__)
70 #define __FUNCTION__ __func__
72 #define __FUNCTION__ " "
77 #define DRM_PAGE_SHIFT PAGESHIFT
78 #define DRM_PAGE_SIZE (1 << DRM_PAGE_SHIFT)
79 #define DRM_PAGE_OFFSET (DRM_PAGE_SIZE - 1)
80 #define DRM_PAGE_MASK ~(DRM_PAGE_SIZE - 1)
81 #define DRM_MB2PAGES(x) ((x) << 8)
82 #define DRM_PAGES2BYTES(x) ((x) << DRM_PAGE_SHIFT)
83 #define DRM_BYTES2PAGES(x) ((x) >> DRM_PAGE_SHIFT)
84 #define DRM_PAGES2KB(x) ((x) << 2)
85 #define DRM_ALIGNED(offset) (((offset) & DRM_PAGE_OFFSET) == 0)
87 #define PAGE_SHIFT DRM_PAGE_SHIFT
88 #define PAGE_SIZE DRM_PAGE_SIZE
90 #define DRM_MAX_INSTANCES 8
91 #define DRM_DEVNODE "drm"
92 #define DRM_UNOPENED 0
95 #define DRM_HASH_SIZE 16 /* Size of key hash table */
96 #define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
97 #define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
100 #define DRM_MEM_SAREA 1
101 #define DRM_MEM_DRIVER 2
102 #define DRM_MEM_MAGIC 3
103 #define DRM_MEM_IOCTLS 4
104 #define DRM_MEM_MAPS 5
105 #define DRM_MEM_BUFS 6
106 #define DRM_MEM_SEGS 7
107 #define DRM_MEM_PAGES 8
108 #define DRM_MEM_FILES 9
109 #define DRM_MEM_QUEUES 10
110 #define DRM_MEM_CMDS 11
111 #define DRM_MEM_MAPPINGS 12
112 #define DRM_MEM_BUFLISTS 13
113 #define DRM_MEM_DRMLISTS 14
114 #define DRM_MEM_TOTALDRM 15
115 #define DRM_MEM_BOUNDDRM 16
116 #define DRM_MEM_CTXBITMAP 17
117 #define DRM_MEM_STUB 18
118 #define DRM_MEM_SGLISTS 19
119 #define DRM_MEM_AGPLISTS 20
120 #define DRM_MEM_CTXLIST 21
121 #define DRM_MEM_MM 22
122 #define DRM_MEM_HASHTAB 23
123 #define DRM_MEM_OBJECTS 24
125 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
126 #define DRM_MAP_HASH_OFFSET 0x10000000
127 #define DRM_MAP_HASH_ORDER 12
128 #define DRM_OBJECT_HASH_ORDER 12
129 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
130 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
131 #define DRM_MM_INIT_MAX_PAGES 256
134 /* Internal types and structures */
135 #define DRM_ARRAY_SIZE(x) (sizeof (x) / sizeof (x[0]))
136 #define DRM_MIN(a, b) ((a) < (b) ? (a) : (b))
137 #define DRM_MAX(a, b) ((a) > (b) ? (a) : (b))
139 #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
141 #define __OS_HAS_AGP 1
143 #define DRM_DEV_MOD (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
144 #define DRM_DEV_UID 0
145 #define DRM_DEV_GID 0
147 #define DRM_CURRENTPID ddi_get_pid()
148 #define DRM_SPINLOCK(l) mutex_enter(l)
149 #define DRM_SPINUNLOCK(u) mutex_exit(u)
150 #define DRM_SPINLOCK_ASSERT(l)
151 #define DRM_LOCK() mutex_enter(&dev->dev_lock)
152 #define DRM_UNLOCK() mutex_exit(&dev->dev_lock)
153 #define DRM_LOCK_OWNED() ASSERT(mutex_owned(&dev->dev_lock))
154 #define spin_lock_irqsave(l, flag) mutex_enter(l)
155 #define spin_unlock_irqrestore(u, flag) mutex_exit(u)
156 #define spin_lock(l) mutex_enter(l)
157 #define spin_unlock(u) mutex_exit(u)
160 #define DRM_UDELAY(sec) delay(drv_usectohz(sec *1000))
161 #define DRM_MEMORYBARRIER()
163 typedef struct drm_file drm_file_t
;
164 typedef struct drm_device drm_device_t
;
165 typedef struct drm_driver_info drm_driver_t
;
167 #define DRM_DEVICE drm_device_t *dev = dev1
168 #define DRM_IOCTL_ARGS \
169 drm_device_t *dev1, intptr_t data, drm_file_t *fpriv, int mode
171 #define DRM_COPYFROM_WITH_RETURN(dest, src, size) \
172 if (ddi_copyin((src), (dest), (size), 0)) { \
173 DRM_ERROR("%s: copy from user failed", __func__); \
177 #define DRM_COPYTO_WITH_RETURN(dest, src, size) \
178 if (ddi_copyout((src), (dest), (size), 0)) { \
179 DRM_ERROR("%s: copy to user failed", __func__); \
183 #define DRM_COPY_FROM_USER(dest, src, size) \
184 ddi_copyin((src), (dest), (size), 0) /* flag for src */
186 #define DRM_COPY_TO_USER(dest, src, size) \
187 ddi_copyout((src), (dest), (size), 0) /* flags for dest */
189 #define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
190 ddi_copyin((arg2), (arg1), (arg3), 0)
192 #define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
193 ddi_copyout((arg2), arg1, arg3, 0)
195 #define DRM_READ8(map, offset) \
196 *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset))
197 #define DRM_READ16(map, offset) \
198 *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset))
199 #define DRM_READ32(map, offset) \
200 *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset))
201 #define DRM_WRITE8(map, offset, val) \
202 *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
203 #define DRM_WRITE16(map, offset, val) \
204 *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
205 #define DRM_WRITE32(map, offset, val) \
206 *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
208 typedef struct drm_wait_queue
{
213 #define DRM_INIT_WAITQUEUE(q, pri) \
215 mutex_init(&(q)->lock, NULL, MUTEX_DRIVER, pri); \
216 cv_init(&(q)->cv, NULL, CV_DRIVER, NULL); \
219 #define DRM_FINI_WAITQUEUE(q) \
221 mutex_destroy(&(q)->lock); \
222 cv_destroy(&(q)->cv); \
225 #define DRM_WAKEUP(q) \
227 mutex_enter(&(q)->lock); \
228 cv_broadcast(&(q)->cv); \
229 mutex_exit(&(q)->lock); \
232 #define jiffies ddi_get_lbolt()
234 #define DRM_WAIT_ON(ret, q, timeout, condition) \
235 mutex_enter(&(q)->lock); \
236 while (!(condition)) { \
237 ret = cv_reltimedwait_sig(&(q)->cv, &(q)->lock, timeout,\
242 } else if (ret == 0) { \
249 mutex_exit(&(q)->lock);
251 #define DRM_WAIT(ret, q, condition) \
252 mutex_enter(&(q)->lock); \
253 if (!(condition)) { \
254 ret = cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + 30 * DRM_HZ); \
256 /* gfx maybe hang */ \
263 mutex_exit(&(q)->lock);
266 #define DRM_GETSAREA() \
268 drm_local_map_t *map; \
269 DRM_SPINLOCK_ASSERT(&dev->dev_lock); \
270 TAILQ_FOREACH(map, &dev->maplist, link) { \
271 if (map->type == _DRM_SHM && \
272 map->flags & _DRM_CONTAINS_LOCK) { \
273 dev_priv->sarea = map; \
279 #define LOCK_TEST_WITH_RETURN(dev, fpriv) \
280 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \
281 dev->lock.filp != fpriv) { \
282 DRM_DEBUG("%s called without lock held", __func__); \
286 #define DRM_IRQ_ARGS caddr_t arg
287 #define IRQ_HANDLED DDI_INTR_CLAIMED
288 #define IRQ_NONE DDI_INTR_UNCLAIMED
296 /* Capabilities taken from src/sys/dev/pci/pcireg.h. */
298 #define PCIY_AGP 0x02
302 #define PCIY_EXPRESS 0x10
305 #define PAGE_ALIGN(addr) (((addr) + DRM_PAGE_SIZE - 1) & DRM_PAGE_MASK)
306 #define DRM_SUSER(p) (crgetsgid(p) == 0 || crgetsuid(p) == 0)
308 #define DRM_GEM_OBJIDR_HASHNODE 1024
309 #define idr_list_for_each(entry, head) \
310 for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) \
311 list_for_each(entry, &(head)->next[key])
314 * wait for 400 milliseconds
316 #define DRM_HZ drv_usectohz(400000)
318 typedef unsigned long dma_addr_t
;
319 typedef uint64_t u64
;
320 typedef uint32_t u32
;
321 typedef uint16_t u16
;
323 typedef uint_t irqreturn_t
;
325 #define DRM_SUPPORT 1
326 #define DRM_UNSUPPORT 0
328 #define __OS_HAS_AGP 1
330 typedef struct drm_pci_id_list
339 #define DRM_MASTER 0x2
340 #define DRM_ROOT_ONLY 0x4
341 typedef int drm_ioctl_t(DRM_IOCTL_ARGS
);
342 typedef struct drm_ioctl_desc
{
343 int (*func
)(DRM_IOCTL_ARGS
);
347 typedef struct drm_magic_entry
{
349 struct drm_file
*priv
;
350 struct drm_magic_entry
*next
;
353 typedef struct drm_magic_head
{
354 struct drm_magic_entry
*head
;
355 struct drm_magic_entry
*tail
;
358 typedef struct drm_buf
{
359 int idx
; /* Index into master buflist */
360 int total
; /* Buffer size */
361 int order
; /* log-base-2(total) */
362 int used
; /* Amount of buffer in use (for DMA) */
363 unsigned long offset
; /* Byte offset (used internally) */
364 void *address
; /* Address of buffer */
365 unsigned long bus_address
; /* Bus address of buffer */
366 struct drm_buf
*next
; /* Kernel-only: used for free list */
367 volatile int pending
; /* On hardware DMA queue */
369 /* Uniq. identifier of holding process */
370 int context
; /* Kernel queue for this buffer */
378 } list
; /* Which list we're on */
380 int dev_priv_size
; /* Size of buffer private stoarge */
381 void *dev_private
; /* Per-buffer private storage */
384 typedef struct drm_freelist
{
385 int initialized
; /* Freelist in use */
386 uint32_t count
; /* Number of free buffers */
387 drm_buf_t
*next
; /* End pointer */
389 int low_mark
; /* Low water mark */
390 int high_mark
; /* High water mark */
393 typedef struct drm_buf_entry
{
401 unsigned long *seglist_bus
;
403 drm_freelist_t freelist
;
406 typedef TAILQ_HEAD(drm_file_list
, drm_file
) drm_file_list_t
;
409 typedef struct drm_local_map
{
410 unsigned long offset
; /* Physical address (0 for SAREA) */
411 unsigned long size
; /* Physical size (bytes) */
412 drm_map_type_t type
; /* Type of memory mapped */
413 drm_map_flags_t flags
; /* Flags */
414 void *handle
; /* User-space: "Handle" to pass to mmap */
415 /* Kernel-space: kernel-virtual address */
416 int mtrr
; /* Boolean: MTRR used */
418 int rid
; /* PCI resource ID for bus_space */
419 int kernel_owned
; /* Boolean: 1= initmapped, 0= addmapped */
420 caddr_t dev_addr
; /* base device address */
421 ddi_acc_handle_t dev_handle
; /* The data access handle */
422 ddi_umem_cookie_t drm_umem_cookie
; /* For SAREA alloc and free */
423 TAILQ_ENTRY(drm_local_map
) link
;
428 * This structure defines the drm_mm memory object, which will be used by the
429 * DRM for its buffer objects.
431 struct drm_gem_object
{
432 /* Reference count of this object */
435 /* Handle count of this object. Each handle also holds a reference */
436 atomic_t handlecount
;
438 /* Related drm device */
439 struct drm_device
*dev
;
443 * Size of the object, in bytes. Immutable over the object's
449 * Global name for this object, starts at 1. 0 means unnamed.
450 * Access is covered by the object_name_lock in the related drm_device
455 * Memory domains. These monitor which caches contain read/write data
456 * related to the object. When transitioning from one set of domains
457 * to another, the driver is called to ensure that caches are suitably
458 * flushed and invalidated
460 uint32_t read_domains
;
461 uint32_t write_domain
;
464 * While validating an exec operation, the
465 * new read/write domain values are computed here.
466 * They will be transferred to the above values
467 * at the point that any cache flushing occurs
469 uint32_t pending_read_domains
;
470 uint32_t pending_write_domain
;
472 void *driver_private
;
474 drm_local_map_t
*map
;
475 ddi_dma_handle_t dma_hdl
;
476 ddi_acc_handle_t acc_hdl
;
478 size_t real_size
; /* real size of memory */
483 struct idr_list
*next
, *prev
;
484 struct drm_gem_object
*obj
;
490 TAILQ_ENTRY(drm_file
) link
;
498 unsigned long ioctl_count
;
500 /* Mapping of mm object handles to object pointers. */
501 struct idr_list object_idr
;
502 /* Lock for synchronization of access to object_idr. */
509 typedef struct drm_lock_data
{
510 drm_hw_lock_t
*hw_lock
; /* Hardware lock */
512 /* Uniq. identifier of holding process */
513 kcondvar_t lock_cv
; /* lock queue - SOLARIS Specific */
514 kmutex_t lock_mutex
; /* lock - SOLARIS Specific */
515 unsigned long lock_time
; /* Time of last lock in clock ticks */
519 * This structure, in drm_device_t, is always initialized while the device
520 * is open. dev->dma_lock protects the incrementing of dev->buf_use, which
521 * when set marks that no further bufs may be allocated until device teardown
522 * occurs (when the last open of the device has closed). The high/low
523 * watermarks of bufs are only touched by the X Server, and thus not
524 * concurrently accessed, so no locking is needed.
526 typedef struct drm_device_dma
{
527 drm_buf_entry_t bufs
[DRM_MAX_ORDER
+1];
529 drm_buf_t
**buflist
; /* Vector of pointers info bufs */
532 unsigned long *pagelist
;
533 unsigned long byte_count
;
535 _DRM_DMA_USE_AGP
= 0x01,
536 _DRM_DMA_USE_SG
= 0x02
540 typedef struct drm_agp_mem
{
542 unsigned long bound
; /* address */
545 struct drm_agp_mem
*prev
;
546 struct drm_agp_mem
*next
;
549 typedef struct drm_agp_head
{
552 drm_agp_mem_t
*memory
;
558 int cant_use_aperture
;
559 unsigned long page_mask
;
560 ldi_ident_t agpgart_li
;
561 ldi_handle_t agpgart_lh
;
565 typedef struct drm_dma_handle
{
566 ddi_dma_handle_t dma_hdl
;
567 ddi_acc_handle_t acc_hdl
;
568 ddi_dma_cookie_t cookie
;
570 uintptr_t vaddr
; /* virtual addr */
571 uintptr_t paddr
; /* physical addr */
572 size_t real_sz
; /* real size of memory */
575 typedef struct drm_sg_mem
{
576 unsigned long handle
;
580 ddi_umem_cookie_t
*umem_cookie
;
581 drm_dma_handle_t
*dmah_sg
;
582 drm_dma_handle_t
*dmah_gart
; /* Handle to PCI memory */
586 * Generic memory manager structs
590 struct list_head fl_entry
;
591 struct list_head ml_entry
;
600 struct list_head fl_entry
;
601 struct list_head ml_entry
;
604 typedef TAILQ_HEAD(drm_map_list
, drm_local_map
) drm_map_list_t
;
606 typedef TAILQ_HEAD(drm_vbl_sig_list
, drm_vbl_sig
) drm_vbl_sig_list_t
;
607 typedef struct drm_vbl_sig
{
608 TAILQ_ENTRY(drm_vbl_sig
) link
;
609 unsigned int sequence
;
615 /* used for clone device */
616 typedef TAILQ_HEAD(drm_cminor_list
, drm_cminor
) drm_cminor_list_t
;
617 typedef struct drm_cminor
{
618 TAILQ_ENTRY(drm_cminor
) link
;
623 /* location of GART table */
624 #define DRM_ATI_GART_MAIN 1
625 #define DRM_ATI_GART_FB 2
627 typedef struct ati_pcigart_info
{
628 int gart_table_location
;
632 drm_local_map_t mapping
;
633 } drm_ati_pcigart_info
;
635 /* DRM device structure */
637 struct drm_driver_info
{
638 int (*load
)(struct drm_device
*, unsigned long);
639 int (*firstopen
)(struct drm_device
*);
640 int (*open
)(struct drm_device
*, drm_file_t
*);
641 void (*preclose
)(struct drm_device
*, drm_file_t
*);
642 void (*postclose
)(struct drm_device
*, drm_file_t
*);
643 void (*lastclose
)(struct drm_device
*);
644 int (*unload
)(struct drm_device
*);
645 void (*reclaim_buffers_locked
)(struct drm_device
*, drm_file_t
*);
646 int (*presetup
)(struct drm_device
*);
647 int (*postsetup
)(struct drm_device
*);
648 int (*open_helper
)(struct drm_device
*, drm_file_t
*);
649 void (*free_filp_priv
)(struct drm_device
*, drm_file_t
*);
650 void (*release
)(struct drm_device
*, void *);
651 int (*dma_ioctl
)(DRM_IOCTL_ARGS
);
652 void (*dma_ready
)(struct drm_device
*);
653 int (*dma_quiescent
)(struct drm_device
*);
654 int (*dma_flush_block_and_flush
)(struct drm_device
*,
655 int, drm_lock_flags_t
);
656 int (*dma_flush_unblock
)(struct drm_device
*, int,
658 int (*context_ctor
)(struct drm_device
*, int);
659 int (*context_dtor
)(struct drm_device
*, int);
660 int (*kernel_context_switch
)(struct drm_device
*, int, int);
661 int (*kernel_context_switch_unlock
)(struct drm_device
*);
662 int (*device_is_agp
) (struct drm_device
*);
663 int (*irq_preinstall
)(struct drm_device
*);
664 void (*irq_postinstall
)(struct drm_device
*);
665 void (*irq_uninstall
)(struct drm_device
*dev
);
666 uint_t (*irq_handler
)(DRM_IRQ_ARGS
);
667 int (*vblank_wait
)(struct drm_device
*, unsigned int *);
668 int (*vblank_wait2
)(struct drm_device
*, unsigned int *);
669 /* added for intel minimized vblank */
670 u32 (*get_vblank_counter
)(struct drm_device
*dev
, int crtc
);
671 int (*enable_vblank
)(struct drm_device
*dev
, int crtc
);
672 void (*disable_vblank
)(struct drm_device
*dev
, int crtc
);
675 * Driver-specific constructor for drm_gem_objects, to set up
676 * obj->driver_private.
678 * Returns 0 on success.
680 int (*gem_init_object
) (struct drm_gem_object
*obj
);
681 void (*gem_free_object
) (struct drm_gem_object
*obj
);
684 drm_ioctl_desc_t
*driver_ioctls
;
685 int max_driver_ioctl
;
690 int driver_patchlevel
;
691 const char *driver_name
; /* Simple driver name */
692 const char *driver_desc
; /* Longer driver name */
693 const char *driver_date
; /* Date of last major changes. */
696 unsigned require_agp
:1;
699 unsigned use_pci_dma
:1;
700 unsigned use_dma_queue
:1;
702 unsigned use_vbl_irq
:1;
703 unsigned use_vbl_irq2
:1;
704 unsigned use_mtrr
:1;
709 * hardware-specific code needs to initialize mutexes which
710 * can be used in interrupt context, so they need to know
711 * the interrupt priority. Interrupt cookie in drm_device
712 * structure is the intr_block field.
714 #define DRM_INTR_PRI(dev) \
715 DDI_INTR_PRI((dev)->intr_block)
718 drm_driver_t
*driver
;
719 drm_cminor_list_t minordevs
;
723 const char *desc
; /* current driver description */
727 ddi_iblock_cookie_t intr_block
;
728 uint32_t pci_device
; /* PCI device id */
730 char *unique
; /* Unique identifier: e.g., busid */
731 int unique_len
; /* Length of unique field */
732 int if_version
; /* Highest interface version set */
733 int flags
; /* Flags to open(2) */
736 kmutex_t vbl_lock
; /* protects vblank operations */
737 kmutex_t dma_lock
; /* protects dev->dma */
738 kmutex_t irq_lock
; /* protects irq condition checks */
739 kmutex_t dev_lock
; /* protects everything else */
740 drm_lock_data_t lock
; /* Information on hardware lock */
741 kmutex_t struct_mutex
; /* < For others */
744 int open_count
; /* Outstanding files open */
745 int buf_use
; /* Buffers in use -- cannot alloc */
747 /* Performance counters */
748 unsigned long counters
;
749 drm_stat_type_t types
[15];
753 drm_file_list_t files
;
754 drm_magic_head_t magiclist
[DRM_HASH_SIZE
];
756 /* Linked list of mappable regions. Protected by dev_lock */
757 drm_map_list_t maplist
;
759 drm_local_map_t
**context_sareas
;
762 /* DMA queues (contexts) */
763 drm_device_dma_t
*dma
; /* Optional pointer for DMA support */
765 /* Context support */
766 int irq
; /* Interrupt used by board */
767 int irq_enabled
; /* True if the irq handler is enabled */
772 atomic_t context_flag
; /* Context swapping flag */
773 int last_context
; /* Last current context */
775 /* Only used for Radeon */
776 atomic_t vbl_received
;
777 atomic_t vbl_received2
;
779 drm_vbl_sig_list_t vbl_sig_list
;
780 drm_vbl_sig_list_t vbl_sig_list2
;
782 * At load time, disabling the vblank interrupt won't be allowed since
783 * old clients may not call the modeset ioctl and therefore misbehave.
784 * Once the modeset ioctl *has* been called though, we can safely
785 * disable them when unused.
787 int vblank_disable_allowed
;
789 wait_queue_head_t vbl_queue
; /* vbl wait channel */
790 /* vbl wait channel array */
791 wait_queue_head_t
*vbl_queues
;
793 /* number of VBLANK interrupts */
794 /* (driver must alloc the right number of counters) */
795 atomic_t
*_vblank_count
;
796 /* signal list to send on VBLANK */
797 struct drm_vbl_sig_list
*vbl_sigs
;
799 /* number of signals pending on all crtcs */
800 atomic_t vbl_signal_pending
;
801 /* number of users of vblank interrupts per crtc */
802 atomic_t
*vblank_refcount
;
803 /* protected by dev->vbl_lock, used for wraparound handling */
805 /* so we don't call enable more than */
806 atomic_t
*vblank_enabled
;
807 /* Display driver is setting mode */
808 int *vblank_inmodeset
;
809 /* Don't wait while crtc is likely disabled */
811 /* size of vblank counter register */
812 u32 max_vblank_count
;
814 kmutex_t tasklet_lock
;
815 void (*locked_tasklet_func
)(struct drm_device
*dev
);
819 drm_sg_mem_t
*sg
; /* Scatter gather memory */
820 uint32_t *ctx_bitmap
;
822 unsigned int agp_buffer_token
;
823 drm_local_map_t
*agp_buffer_map
;
825 kstat_t
*asoft_ksp
; /* kstat support */
827 /* name Drawable information */
829 unsigned int drw_bitfield_length
;
831 unsigned int drw_info_length
;
832 drm_drawable_info_t
**drw_info
;
834 /* \name GEM information */
836 kmutex_t object_name_lock
;
837 struct idr_list object_name_idr
;
838 atomic_t object_count
;
839 atomic_t object_memory
;
845 uint32_t invalidate_domains
; /* domains pending invalidation */
846 uint32_t flush_domains
; /* domains pending flush */
855 /* Memory management support (drm_memory.c) */
856 void drm_mem_init(void);
857 void drm_mem_uninit(void);
858 void *drm_alloc(size_t, int);
859 void *drm_calloc(size_t, size_t, int);
860 void *drm_realloc(void *, size_t, size_t, int);
861 void drm_free(void *, size_t, int);
862 int drm_ioremap(drm_device_t
*, drm_local_map_t
*);
863 void drm_ioremapfree(drm_local_map_t
*);
865 void drm_core_ioremap(struct drm_local_map
*, struct drm_device
*);
866 void drm_core_ioremapfree(struct drm_local_map
*, struct drm_device
*);
868 void drm_pci_free(drm_device_t
*, drm_dma_handle_t
*);
869 void *drm_pci_alloc(drm_device_t
*, size_t, size_t, dma_addr_t
, int);
871 struct drm_local_map
*drm_core_findmap(struct drm_device
*, unsigned long);
873 int drm_context_switch(drm_device_t
*, int, int);
874 int drm_context_switch_complete(drm_device_t
*, int);
875 int drm_ctxbitmap_init(drm_device_t
*);
876 void drm_ctxbitmap_cleanup(drm_device_t
*);
877 void drm_ctxbitmap_free(drm_device_t
*, int);
878 int drm_ctxbitmap_next(drm_device_t
*);
880 /* Locking IOCTL support (drm_lock.c) */
881 int drm_lock_take(drm_lock_data_t
*, unsigned int);
882 int drm_lock_transfer(drm_device_t
*,
883 drm_lock_data_t
*, unsigned int);
884 int drm_lock_free(drm_device_t
*,
885 volatile unsigned int *, unsigned int);
887 /* Buffer management support (drm_bufs.c) */
888 unsigned long drm_get_resource_start(drm_device_t
*, unsigned int);
889 unsigned long drm_get_resource_len(drm_device_t
*, unsigned int);
890 int drm_initmap(drm_device_t
*, unsigned long, unsigned long,
891 unsigned int, int, int);
892 void drm_rmmap(drm_device_t
*, drm_local_map_t
*);
893 int drm_addmap(drm_device_t
*, unsigned long, unsigned long,
894 drm_map_type_t
, drm_map_flags_t
, drm_local_map_t
**);
895 int drm_order(unsigned long);
897 /* DMA support (drm_dma.c) */
898 int drm_dma_setup(drm_device_t
*);
899 void drm_dma_takedown(drm_device_t
*);
900 void drm_free_buffer(drm_device_t
*, drm_buf_t
*);
901 void drm_reclaim_buffers(drm_device_t
*, drm_file_t
*);
902 #define drm_core_reclaim_buffers drm_reclaim_buffers
904 /* IRQ support (drm_irq.c) */
905 int drm_irq_install(drm_device_t
*);
906 int drm_irq_uninstall(drm_device_t
*);
907 uint_t
drm_irq_handler(DRM_IRQ_ARGS
);
908 void drm_driver_irq_preinstall(drm_device_t
*);
909 void drm_driver_irq_postinstall(drm_device_t
*);
910 void drm_driver_irq_uninstall(drm_device_t
*);
911 int drm_vblank_wait(drm_device_t
*, unsigned int *);
912 void drm_vbl_send_signals(drm_device_t
*);
913 void drm_handle_vblank(struct drm_device
*dev
, int crtc
);
914 u32
drm_vblank_count(struct drm_device
*dev
, int crtc
);
915 int drm_vblank_get(struct drm_device
*dev
, int crtc
);
916 void drm_vblank_put(struct drm_device
*dev
, int crtc
);
917 int drm_vblank_init(struct drm_device
*dev
, int num_crtcs
);
918 void drm_vblank_cleanup(struct drm_device
*dev
);
919 int drm_modeset_ctl(DRM_IOCTL_ARGS
);
921 /* AGP/GART support (drm_agpsupport.c) */
922 int drm_device_is_agp(drm_device_t
*);
923 int drm_device_is_pcie(drm_device_t
*);
924 drm_agp_head_t
*drm_agp_init(drm_device_t
*);
925 void drm_agp_fini(drm_device_t
*);
926 int drm_agp_do_release(drm_device_t
*);
927 void *drm_agp_allocate_memory(size_t pages
,
928 uint32_t type
, drm_device_t
*dev
);
929 int drm_agp_free_memory(agp_allocate_t
*handle
, drm_device_t
*dev
);
930 int drm_agp_bind_memory(unsigned int, uint32_t, drm_device_t
*);
931 int drm_agp_unbind_memory(unsigned long, drm_device_t
*);
932 int drm_agp_bind_pages(drm_device_t
*dev
,
934 unsigned long num_pages
,
935 uint32_t gtt_offset
);
936 int drm_agp_unbind_pages(drm_device_t
*dev
,
937 unsigned long num_pages
,
940 void drm_agp_chipset_flush(struct drm_device
*dev
);
941 void drm_agp_rebind(struct drm_device
*dev
);
943 /* kstat support (drm_kstats.c) */
944 int drm_init_kstats(drm_device_t
*);
945 void drm_fini_kstats(drm_device_t
*);
947 /* Scatter Gather Support (drm_scatter.c) */
948 void drm_sg_cleanup(drm_device_t
*, drm_sg_mem_t
*);
950 /* ATI PCIGART support (ati_pcigart.c) */
951 int drm_ati_pcigart_init(drm_device_t
*, drm_ati_pcigart_info
*);
952 int drm_ati_pcigart_cleanup(drm_device_t
*, drm_ati_pcigart_info
*);
954 /* Locking IOCTL support (drm_drv.c) */
955 int drm_lock(DRM_IOCTL_ARGS
);
956 int drm_unlock(DRM_IOCTL_ARGS
);
957 int drm_version(DRM_IOCTL_ARGS
);
958 int drm_setversion(DRM_IOCTL_ARGS
);
959 /* Cache management (drm_cache.c) */
960 void drm_clflush_pages(caddr_t
*pages
, unsigned long num_pages
);
962 /* Misc. IOCTL support (drm_ioctl.c) */
963 int drm_irq_by_busid(DRM_IOCTL_ARGS
);
964 int drm_getunique(DRM_IOCTL_ARGS
);
965 int drm_setunique(DRM_IOCTL_ARGS
);
966 int drm_getmap(DRM_IOCTL_ARGS
);
967 int drm_getclient(DRM_IOCTL_ARGS
);
968 int drm_getstats(DRM_IOCTL_ARGS
);
969 int drm_noop(DRM_IOCTL_ARGS
);
971 /* Context IOCTL support (drm_context.c) */
972 int drm_resctx(DRM_IOCTL_ARGS
);
973 int drm_addctx(DRM_IOCTL_ARGS
);
974 int drm_modctx(DRM_IOCTL_ARGS
);
975 int drm_getctx(DRM_IOCTL_ARGS
);
976 int drm_switchctx(DRM_IOCTL_ARGS
);
977 int drm_newctx(DRM_IOCTL_ARGS
);
978 int drm_rmctx(DRM_IOCTL_ARGS
);
979 int drm_setsareactx(DRM_IOCTL_ARGS
);
980 int drm_getsareactx(DRM_IOCTL_ARGS
);
982 /* Drawable IOCTL support (drm_drawable.c) */
983 int drm_adddraw(DRM_IOCTL_ARGS
);
984 int drm_rmdraw(DRM_IOCTL_ARGS
);
985 int drm_update_draw(DRM_IOCTL_ARGS
);
987 /* Authentication IOCTL support (drm_auth.c) */
988 int drm_getmagic(DRM_IOCTL_ARGS
);
989 int drm_authmagic(DRM_IOCTL_ARGS
);
990 int drm_remove_magic(drm_device_t
*, drm_magic_t
);
991 drm_file_t
*drm_find_file(drm_device_t
*, drm_magic_t
);
992 /* Buffer management support (drm_bufs.c) */
993 int drm_addmap_ioctl(DRM_IOCTL_ARGS
);
994 int drm_rmmap_ioctl(DRM_IOCTL_ARGS
);
995 int drm_addbufs_ioctl(DRM_IOCTL_ARGS
);
996 int drm_infobufs(DRM_IOCTL_ARGS
);
997 int drm_markbufs(DRM_IOCTL_ARGS
);
998 int drm_freebufs(DRM_IOCTL_ARGS
);
999 int drm_mapbufs(DRM_IOCTL_ARGS
);
1001 /* DMA support (drm_dma.c) */
1002 int drm_dma(DRM_IOCTL_ARGS
);
1004 /* IRQ support (drm_irq.c) */
1005 int drm_control(DRM_IOCTL_ARGS
);
1006 int drm_wait_vblank(DRM_IOCTL_ARGS
);
1008 /* AGP/GART support (drm_agpsupport.c) */
1009 int drm_agp_acquire(DRM_IOCTL_ARGS
);
1010 int drm_agp_release(DRM_IOCTL_ARGS
);
1011 int drm_agp_enable(DRM_IOCTL_ARGS
);
1012 int drm_agp_info(DRM_IOCTL_ARGS
);
1013 int drm_agp_alloc(DRM_IOCTL_ARGS
);
1014 int drm_agp_free(DRM_IOCTL_ARGS
);
1015 int drm_agp_unbind(DRM_IOCTL_ARGS
);
1016 int drm_agp_bind(DRM_IOCTL_ARGS
);
1018 /* Scatter Gather Support (drm_scatter.c) */
1019 int drm_sg_alloc(DRM_IOCTL_ARGS
);
1020 int drm_sg_free(DRM_IOCTL_ARGS
);
1023 struct drm_mm_node
*drm_mm_get_block(struct drm_mm_node
*parent
,
1024 unsigned long size
, unsigned alignment
);
1025 struct drm_mm_node
*drm_mm_search_free(const struct drm_mm
*mm
,
1027 unsigned alignment
, int best_match
);
1029 extern void drm_mm_clean_ml(const struct drm_mm
*mm
);
1030 extern int drm_debug_flag
;
1032 /* We add function to support DRM_DEBUG,DRM_ERROR,DRM_INFO */
1033 extern void drm_debug(const char *fmt
, ...);
1034 extern void drm_error(const char *fmt
, ...);
1035 extern void drm_info(const char *fmt
, ...);
1038 #define DRM_DEBUG if (drm_debug_flag >= 2) drm_debug
1039 #define DRM_INFO if (drm_debug_flag >= 1) drm_info
1041 #define DRM_DEBUG(...)
1042 #define DRM_INFO(...)
1045 #define DRM_ERROR drm_error
1048 #define MAX_INSTNUMS 16
1050 extern int drm_dev_to_instance(dev_t
);
1051 extern int drm_dev_to_minor(dev_t
);
1052 extern void *drm_supp_register(dev_info_t
*, drm_device_t
*);
1053 extern int drm_supp_unregister(void *);
1055 extern int drm_open(drm_device_t
*, drm_cminor_t
*, int, int, cred_t
*);
1056 extern int drm_close(drm_device_t
*, int, int, int, cred_t
*);
1057 extern int drm_attach(drm_device_t
*);
1058 extern int drm_detach(drm_device_t
*);
1059 extern int drm_probe(drm_device_t
*, drm_pci_id_list_t
*);
1061 extern int drm_pci_init(drm_device_t
*);
1062 extern void drm_pci_end(drm_device_t
*);
1063 extern int pci_get_info(drm_device_t
*, int *, int *, int *);
1064 extern int pci_get_irq(drm_device_t
*);
1065 extern int pci_get_vendor(drm_device_t
*);
1066 extern int pci_get_device(drm_device_t
*);
1068 extern struct drm_drawable_info
*drm_get_drawable_info(drm_device_t
*,
1070 /* File Operations helpers (drm_fops.c) */
1071 extern drm_file_t
*drm_find_file_by_proc(drm_device_t
*, cred_t
*);
1072 extern drm_cminor_t
*drm_find_file_by_minor(drm_device_t
*, int);
1073 extern int drm_open_helper(drm_device_t
*, drm_cminor_t
*, int, int,
1076 /* Graphics Execution Manager library functions (drm_gem.c) */
1077 int drm_gem_init(struct drm_device
*dev
);
1078 void drm_gem_object_free(struct drm_gem_object
*obj
);
1079 struct drm_gem_object
*drm_gem_object_alloc(struct drm_device
*dev
,
1081 void drm_gem_object_handle_free(struct drm_gem_object
*obj
);
1083 void drm_gem_object_reference(struct drm_gem_object
*obj
);
1084 void drm_gem_object_unreference(struct drm_gem_object
*obj
);
1086 int drm_gem_handle_create(struct drm_file
*file_priv
,
1087 struct drm_gem_object
*obj
,
1089 void drm_gem_object_handle_reference(struct drm_gem_object
*obj
);
1091 void drm_gem_object_handle_unreference(struct drm_gem_object
*obj
);
1093 struct drm_gem_object
*drm_gem_object_lookup(struct drm_file
*filp
,
1095 int drm_gem_close_ioctl(DRM_IOCTL_ARGS
);
1096 int drm_gem_flink_ioctl(DRM_IOCTL_ARGS
);
1097 int drm_gem_open_ioctl(DRM_IOCTL_ARGS
);
1098 void drm_gem_open(struct drm_file
*file_private
);
1099 void drm_gem_release(struct drm_device
*dev
, struct drm_file
*file_private
);
1102 #endif /* _DRMP_H */