1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 struct intel_hw_status_page
{
5 u32 __iomem
*page_addr
;
7 struct drm_gem_object
*obj
;
10 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
11 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
12 #define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
13 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
14 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
15 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
16 #define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
17 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
19 struct drm_i915_gem_execbuffer2
;
20 struct intel_ring_buffer
{
30 struct drm_device
*dev
;
31 struct drm_gem_object
*gem_object
;
36 struct intel_hw_status_page status_page
;
38 u32 irq_gem_seqno
; /* last seq seem at irq time */
39 u32 waiting_gem_seqno
;
40 int user_irq_refcount
;
41 void (*user_irq_get
)(struct intel_ring_buffer
*ring
);
42 void (*user_irq_put
)(struct intel_ring_buffer
*ring
);
44 int (*init
)(struct intel_ring_buffer
*ring
);
46 void (*write_tail
)(struct intel_ring_buffer
*ring
,
48 void (*flush
)(struct intel_ring_buffer
*ring
,
49 u32 invalidate_domains
,
51 u32 (*add_request
)(struct intel_ring_buffer
*ring
,
53 u32 (*get_seqno
)(struct intel_ring_buffer
*ring
);
54 int (*dispatch_execbuffer
)(struct intel_ring_buffer
*ring
,
55 struct drm_i915_gem_execbuffer2
*exec
,
56 struct drm_clip_rect
*cliprects
,
57 uint64_t exec_offset
);
60 * List of objects currently involved in rendering from the
63 * Includes buffers having the contents of their GPU caches
64 * flushed, not necessarily primitives. last_rendering_seqno
65 * represents when the rendering involved will be completed.
67 * A reference is held on the buffer while on this list.
69 struct list_head active_list
;
72 * List of breadcrumbs associated with GPU requests currently
75 struct list_head request_list
;
78 * List of objects currently pending a GPU write flush.
80 * All elements on this list will belong to either the
81 * active_list or flushing_list, last_rendering_seqno can
82 * be used to differentiate between the two elements.
84 struct list_head gpu_write_list
;
87 * Do we have some not yet emitted requests outstanding?
89 bool outstanding_lazy_request
;
91 wait_queue_head_t irq_queue
;
96 intel_read_status_page(struct intel_ring_buffer
*ring
,
99 return ioread32(ring
->status_page
.page_addr
+ reg
);
102 void intel_cleanup_ring_buffer(struct intel_ring_buffer
*ring
);
103 int intel_wait_ring_buffer(struct intel_ring_buffer
*ring
, int n
);
104 void intel_ring_begin(struct intel_ring_buffer
*ring
, int n
);
106 static inline void intel_ring_emit(struct intel_ring_buffer
*ring
,
109 iowrite32(data
, ring
->virtual_start
+ ring
->tail
);
113 void intel_ring_advance(struct intel_ring_buffer
*ring
);
115 u32
intel_ring_get_seqno(struct intel_ring_buffer
*ring
);
117 int intel_init_render_ring_buffer(struct drm_device
*dev
);
118 int intel_init_bsd_ring_buffer(struct drm_device
*dev
);
119 int intel_init_blt_ring_buffer(struct drm_device
*dev
);
121 u32
intel_ring_get_active_head(struct intel_ring_buffer
*ring
);
122 void intel_ring_setup_status_page(struct intel_ring_buffer
*ring
);
124 #endif /* _INTEL_RINGBUFFER_H_ */