1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 #include <linux/hashtable.h>
6 #define I915_CMD_HASH_ORDER 9
9 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
10 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
11 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
13 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
14 * cacheline, the Head Pointer must not be greater than the Tail
17 #define I915_RING_FREE_SPACE 64
19 struct intel_hw_status_page
{
21 unsigned int gfx_addr
;
22 struct drm_i915_gem_object
*obj
;
25 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
26 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
28 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
29 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
31 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
32 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
34 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
35 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
37 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
38 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
40 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
41 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
43 enum intel_ring_hangcheck_action
{
51 #define HANGCHECK_SCORE_RING_HUNG 31
53 struct intel_ring_hangcheck
{
57 enum intel_ring_hangcheck_action action
;
61 struct intel_ringbuffer
{
62 struct drm_i915_gem_object
*obj
;
63 void __iomem
*virtual_start
;
71 /** We track the position of the requests in the ring buffer, and
72 * when each is retired we increment last_retired_head as the GPU
73 * must have finished processing the request and so we know we
74 * can advance the ringbuffer up to that position.
76 * last_retired_head is set to -1 after the value is consumed so
77 * we can detect new retirements.
79 u32 last_retired_head
;
82 struct intel_engine_cs
{
91 #define I915_NUM_RINGS 5
92 #define LAST_USER_RING (VECS + 1)
94 struct drm_device
*dev
;
95 struct intel_ringbuffer
*buffer
;
97 struct intel_hw_status_page status_page
;
99 unsigned irq_refcount
; /* protected by dev_priv->irq_lock */
100 u32 irq_enable_mask
; /* bitmask to enable ring interrupt */
102 bool __must_check (*irq_get
)(struct intel_engine_cs
*ring
);
103 void (*irq_put
)(struct intel_engine_cs
*ring
);
105 int (*init
)(struct intel_engine_cs
*ring
);
107 void (*write_tail
)(struct intel_engine_cs
*ring
,
109 int __must_check (*flush
)(struct intel_engine_cs
*ring
,
110 u32 invalidate_domains
,
112 int (*add_request
)(struct intel_engine_cs
*ring
);
113 /* Some chipsets are not quite as coherent as advertised and need
114 * an expensive kick to force a true read of the up-to-date seqno.
115 * However, the up-to-date seqno is not always required and the last
116 * seen value is good enough. Note that the seqno will always be
117 * monotonic, even if not coherent.
119 u32 (*get_seqno
)(struct intel_engine_cs
*ring
,
120 bool lazy_coherency
);
121 void (*set_seqno
)(struct intel_engine_cs
*ring
,
123 int (*dispatch_execbuffer
)(struct intel_engine_cs
*ring
,
124 u64 offset
, u32 length
,
126 #define I915_DISPATCH_SECURE 0x1
127 #define I915_DISPATCH_PINNED 0x2
128 void (*cleanup
)(struct intel_engine_cs
*ring
);
131 u32 sync_seqno
[I915_NUM_RINGS
-1];
134 /* our mbox written by others */
135 u32 wait
[I915_NUM_RINGS
];
136 /* mboxes this ring signals to */
137 u32 signal
[I915_NUM_RINGS
];
141 int (*sync_to
)(struct intel_engine_cs
*ring
,
142 struct intel_engine_cs
*to
,
144 int (*signal
)(struct intel_engine_cs
*signaller
,
145 /* num_dwords needed by caller */
146 unsigned int num_dwords
);
150 * List of objects currently involved in rendering from the
153 * Includes buffers having the contents of their GPU caches
154 * flushed, not necessarily primitives. last_rendering_seqno
155 * represents when the rendering involved will be completed.
157 * A reference is held on the buffer while on this list.
159 struct list_head active_list
;
162 * List of breadcrumbs associated with GPU requests currently
165 struct list_head request_list
;
168 * Do we have some not yet emitted requests outstanding?
170 struct drm_i915_gem_request
*preallocated_lazy_request
;
171 u32 outstanding_lazy_seqno
;
172 bool gpu_caches_dirty
;
175 wait_queue_head_t irq_queue
;
177 struct intel_context
*default_context
;
178 struct intel_context
*last_context
;
180 struct intel_ring_hangcheck hangcheck
;
183 struct drm_i915_gem_object
*obj
;
185 volatile u32
*cpu_page
;
188 bool needs_cmd_parser
;
191 * Table of commands the command parser needs to know about
194 DECLARE_HASHTABLE(cmd_hash
, I915_CMD_HASH_ORDER
);
197 * Table of registers allowed in commands that read/write registers.
199 const u32
*reg_table
;
203 * Table of registers allowed in commands that read/write registers, but
204 * only from the DRM master.
206 const u32
*master_reg_table
;
207 int master_reg_count
;
210 * Returns the bitmask for the length field of the specified command.
211 * Return 0 for an unrecognized/invalid command.
213 * If the command parser finds an entry for a command in the ring's
214 * cmd_tables, it gets the command's length based on the table entry.
215 * If not, it calls this function to determine the per-ring length field
216 * encoding for the command (i.e. certain opcode ranges use certain bits
217 * to encode the command length in the header).
219 u32 (*get_cmd_length_mask
)(u32 cmd_header
);
223 intel_ring_initialized(struct intel_engine_cs
*ring
)
225 return ring
->buffer
&& ring
->buffer
->obj
;
228 static inline unsigned
229 intel_ring_flag(struct intel_engine_cs
*ring
)
231 return 1 << ring
->id
;
235 intel_ring_sync_index(struct intel_engine_cs
*ring
,
236 struct intel_engine_cs
*other
)
241 * cs -> 0 = vcs, 1 = bcs
242 * vcs -> 0 = bcs, 1 = cs,
243 * bcs -> 0 = cs, 1 = vcs.
246 idx
= (other
- ring
) - 1;
248 idx
+= I915_NUM_RINGS
;
254 intel_read_status_page(struct intel_engine_cs
*ring
,
257 /* Ensure that the compiler doesn't optimize away the load. */
259 return ring
->status_page
.page_addr
[reg
];
263 intel_write_status_page(struct intel_engine_cs
*ring
,
266 ring
->status_page
.page_addr
[reg
] = value
;
270 * Reads a dword out of the status page, which is written to from the command
271 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
274 * The following dwords have a reserved meaning:
275 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
276 * 0x04: ring 0 head pointer
277 * 0x05: ring 1 head pointer (915-class)
278 * 0x06: ring 2 head pointer (915-class)
279 * 0x10-0x1b: Context status DWords (GM45)
280 * 0x1f: Last written status offset. (GM45)
282 * The area from dword 0x20 to 0x3ff is available for driver usage.
284 #define I915_GEM_HWS_INDEX 0x20
285 #define I915_GEM_HWS_SCRATCH_INDEX 0x30
286 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
288 void intel_stop_ring_buffer(struct intel_engine_cs
*ring
);
289 void intel_cleanup_ring_buffer(struct intel_engine_cs
*ring
);
291 int __must_check
intel_ring_begin(struct intel_engine_cs
*ring
, int n
);
292 int __must_check
intel_ring_cacheline_align(struct intel_engine_cs
*ring
);
293 static inline void intel_ring_emit(struct intel_engine_cs
*ring
,
296 struct intel_ringbuffer
*ringbuf
= ring
->buffer
;
297 iowrite32(data
, ringbuf
->virtual_start
+ ringbuf
->tail
);
300 static inline void intel_ring_advance(struct intel_engine_cs
*ring
)
302 struct intel_ringbuffer
*ringbuf
= ring
->buffer
;
303 ringbuf
->tail
&= ringbuf
->size
- 1;
305 void __intel_ring_advance(struct intel_engine_cs
*ring
);
307 int __must_check
intel_ring_idle(struct intel_engine_cs
*ring
);
308 void intel_ring_init_seqno(struct intel_engine_cs
*ring
, u32 seqno
);
309 int intel_ring_flush_all_caches(struct intel_engine_cs
*ring
);
310 int intel_ring_invalidate_all_caches(struct intel_engine_cs
*ring
);
312 int intel_init_render_ring_buffer(struct drm_device
*dev
);
313 int intel_init_bsd_ring_buffer(struct drm_device
*dev
);
314 int intel_init_bsd2_ring_buffer(struct drm_device
*dev
);
315 int intel_init_blt_ring_buffer(struct drm_device
*dev
);
316 int intel_init_vebox_ring_buffer(struct drm_device
*dev
);
318 u64
intel_ring_get_active_head(struct intel_engine_cs
*ring
);
319 void intel_ring_setup_status_page(struct intel_engine_cs
*ring
);
321 static inline u32
intel_ring_get_tail(struct intel_engine_cs
*ring
)
323 return ring
->buffer
->tail
;
326 static inline u32
intel_ring_get_seqno(struct intel_engine_cs
*ring
)
328 BUG_ON(ring
->outstanding_lazy_seqno
== 0);
329 return ring
->outstanding_lazy_seqno
;
332 static inline void i915_trace_irq_get(struct intel_engine_cs
*ring
, u32 seqno
)
334 if (ring
->trace_irq_seqno
== 0 && ring
->irq_get(ring
))
335 ring
->trace_irq_seqno
= seqno
;
339 int intel_render_ring_init_dri(struct drm_device
*dev
, u64 start
, u32 size
);
341 #endif /* _INTEL_RINGBUFFER_H_ */