drm/i915/ringbuffer: Drop the redundant dev from the vfunc interface
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
blobba4a393e6d16ca2349434bd587ae1a5631e5affb
1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 struct intel_hw_status_page {
5 u32 __iomem *page_addr;
6 unsigned int gfx_addr;
7 struct drm_gem_object *obj;
8 };
10 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
11 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
12 #define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
13 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
14 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
15 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
16 #define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
17 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
19 struct drm_i915_gem_execbuffer2;
20 struct intel_ring_buffer {
21 const char *name;
22 enum intel_ring_id {
23 RING_RENDER = 0x1,
24 RING_BSD = 0x2,
25 RING_BLT = 0x4,
26 } id;
27 u32 mmio_base;
28 unsigned long size;
29 void *virtual_start;
30 struct drm_device *dev;
31 struct drm_gem_object *gem_object;
33 unsigned int head;
34 unsigned int tail;
35 int space;
36 struct intel_hw_status_page status_page;
38 u32 irq_gem_seqno; /* last seq seem at irq time */
39 u32 waiting_gem_seqno;
40 int user_irq_refcount;
41 void (*user_irq_get)(struct intel_ring_buffer *ring);
42 void (*user_irq_put)(struct intel_ring_buffer *ring);
44 int (*init)(struct intel_ring_buffer *ring);
46 void (*write_tail)(struct intel_ring_buffer *ring,
47 u32 value);
48 void (*flush)(struct intel_ring_buffer *ring,
49 u32 invalidate_domains,
50 u32 flush_domains);
51 u32 (*add_request)(struct intel_ring_buffer *ring,
52 u32 flush_domains);
53 u32 (*get_seqno)(struct intel_ring_buffer *ring);
54 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
55 struct drm_i915_gem_execbuffer2 *exec,
56 struct drm_clip_rect *cliprects,
57 uint64_t exec_offset);
59 /**
60 * List of objects currently involved in rendering from the
61 * ringbuffer.
63 * Includes buffers having the contents of their GPU caches
64 * flushed, not necessarily primitives. last_rendering_seqno
65 * represents when the rendering involved will be completed.
67 * A reference is held on the buffer while on this list.
69 struct list_head active_list;
71 /**
72 * List of breadcrumbs associated with GPU requests currently
73 * outstanding.
75 struct list_head request_list;
77 /**
78 * List of objects currently pending a GPU write flush.
80 * All elements on this list will belong to either the
81 * active_list or flushing_list, last_rendering_seqno can
82 * be used to differentiate between the two elements.
84 struct list_head gpu_write_list;
86 /**
87 * Do we have some not yet emitted requests outstanding?
89 bool outstanding_lazy_request;
91 wait_queue_head_t irq_queue;
92 drm_local_map_t map;
95 static inline u32
96 intel_read_status_page(struct intel_ring_buffer *ring,
97 int reg)
99 return ioread32(ring->status_page.page_addr + reg);
102 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
103 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
104 void intel_ring_begin(struct intel_ring_buffer *ring, int n);
106 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
107 u32 data)
109 iowrite32(data, ring->virtual_start + ring->tail);
110 ring->tail += 4;
113 void intel_ring_advance(struct intel_ring_buffer *ring);
115 u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
117 int intel_init_render_ring_buffer(struct drm_device *dev);
118 int intel_init_bsd_ring_buffer(struct drm_device *dev);
119 int intel_init_blt_ring_buffer(struct drm_device *dev);
121 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
122 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
124 #endif /* _INTEL_RINGBUFFER_H_ */