GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / gpu / drm / radeon / r600_cs.c
blob7243f0f98aec381b2129069210470817f88fcba4
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
28 #include <linux/kernel.h>
29 #include "drmP.h"
30 #include "radeon.h"
31 #include "r600d.h"
32 #include "r600_reg_safe.h"
34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
35 struct radeon_cs_reloc **cs_reloc);
36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
37 struct radeon_cs_reloc **cs_reloc);
38 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
39 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
43 struct r600_cs_track {
44 /* configuration we miror so that we use same code btw kms/ums */
45 u32 group_size;
46 u32 nbanks;
47 u32 npipes;
48 /* value we track */
49 u32 sq_config;
50 u32 nsamples;
51 u32 cb_color_base_last[8];
52 struct radeon_bo *cb_color_bo[8];
53 u32 cb_color_bo_offset[8];
54 struct radeon_bo *cb_color_frag_bo[8];
55 struct radeon_bo *cb_color_tile_bo[8];
56 u32 cb_color_info[8];
57 u32 cb_color_size_idx[8];
58 u32 cb_target_mask;
59 u32 cb_shader_mask;
60 u32 cb_color_size[8];
61 u32 vgt_strmout_en;
62 u32 vgt_strmout_buffer_en;
63 u32 db_depth_control;
64 u32 db_depth_info;
65 u32 db_depth_size_idx;
66 u32 db_depth_view;
67 u32 db_depth_size;
68 u32 db_offset;
69 struct radeon_bo *db_bo;
72 static inline int r600_bpe_from_format(u32 *bpe, u32 format)
74 switch (format) {
75 case V_038004_COLOR_8:
76 case V_038004_COLOR_4_4:
77 case V_038004_COLOR_3_3_2:
78 case V_038004_FMT_1:
79 *bpe = 1;
80 break;
81 case V_038004_COLOR_16:
82 case V_038004_COLOR_16_FLOAT:
83 case V_038004_COLOR_8_8:
84 case V_038004_COLOR_5_6_5:
85 case V_038004_COLOR_6_5_5:
86 case V_038004_COLOR_1_5_5_5:
87 case V_038004_COLOR_4_4_4_4:
88 case V_038004_COLOR_5_5_5_1:
89 *bpe = 2;
90 break;
91 case V_038004_FMT_8_8_8:
92 *bpe = 3;
93 break;
94 case V_038004_COLOR_32:
95 case V_038004_COLOR_32_FLOAT:
96 case V_038004_COLOR_16_16:
97 case V_038004_COLOR_16_16_FLOAT:
98 case V_038004_COLOR_8_24:
99 case V_038004_COLOR_8_24_FLOAT:
100 case V_038004_COLOR_24_8:
101 case V_038004_COLOR_24_8_FLOAT:
102 case V_038004_COLOR_10_11_11:
103 case V_038004_COLOR_10_11_11_FLOAT:
104 case V_038004_COLOR_11_11_10:
105 case V_038004_COLOR_11_11_10_FLOAT:
106 case V_038004_COLOR_2_10_10_10:
107 case V_038004_COLOR_8_8_8_8:
108 case V_038004_COLOR_10_10_10_2:
109 case V_038004_FMT_5_9_9_9_SHAREDEXP:
110 case V_038004_FMT_32_AS_8:
111 case V_038004_FMT_32_AS_8_8:
112 *bpe = 4;
113 break;
114 case V_038004_COLOR_X24_8_32_FLOAT:
115 case V_038004_COLOR_32_32:
116 case V_038004_COLOR_32_32_FLOAT:
117 case V_038004_COLOR_16_16_16_16:
118 case V_038004_COLOR_16_16_16_16_FLOAT:
119 *bpe = 8;
120 break;
121 case V_038004_FMT_16_16_16:
122 case V_038004_FMT_16_16_16_FLOAT:
123 *bpe = 6;
124 break;
125 case V_038004_FMT_32_32_32:
126 case V_038004_FMT_32_32_32_FLOAT:
127 *bpe = 12;
128 break;
129 case V_038004_COLOR_32_32_32_32:
130 case V_038004_COLOR_32_32_32_32_FLOAT:
131 *bpe = 16;
132 break;
133 case V_038004_FMT_GB_GR:
134 case V_038004_FMT_BG_RG:
135 case V_038004_COLOR_INVALID:
136 default:
137 *bpe = 16;
138 return -EINVAL;
140 return 0;
143 static void r600_cs_track_init(struct r600_cs_track *track)
145 int i;
147 /* assume DX9 mode */
148 track->sq_config = DX9_CONSTS;
149 for (i = 0; i < 8; i++) {
150 track->cb_color_base_last[i] = 0;
151 track->cb_color_size[i] = 0;
152 track->cb_color_size_idx[i] = 0;
153 track->cb_color_info[i] = 0;
154 track->cb_color_bo[i] = NULL;
155 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
157 track->cb_target_mask = 0xFFFFFFFF;
158 track->cb_shader_mask = 0xFFFFFFFF;
159 track->db_bo = NULL;
160 /* assume the biggest format and that htile is enabled */
161 track->db_depth_info = 7 | (1 << 25);
162 track->db_depth_view = 0xFFFFC000;
163 track->db_depth_size = 0xFFFFFFFF;
164 track->db_depth_size_idx = 0;
165 track->db_depth_control = 0xFFFFFFFF;
168 static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
170 struct r600_cs_track *track = p->track;
171 u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align;
172 volatile u32 *ib = p->ib->ptr;
174 if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
175 dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
176 return -EINVAL;
178 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
179 if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
180 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
181 __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
182 i, track->cb_color_info[i]);
183 return -EINVAL;
185 /* pitch is the number of 8x8 tiles per row */
186 pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1;
187 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
188 height = size / (pitch * 8 * bpe);
189 if (height > 8192)
190 height = 8192;
191 if (height > 7)
192 height &= ~0x7;
193 switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
194 case V_0280A0_ARRAY_LINEAR_GENERAL:
195 /* technically height & 0x7 */
196 break;
197 case V_0280A0_ARRAY_LINEAR_ALIGNED:
198 pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
199 if (!IS_ALIGNED(pitch, pitch_align)) {
200 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
201 __func__, __LINE__, pitch);
202 return -EINVAL;
204 if (!IS_ALIGNED(height, 8)) {
205 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
206 __func__, __LINE__, height);
207 return -EINVAL;
209 break;
210 case V_0280A0_ARRAY_1D_TILED_THIN1:
211 pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8;
212 if (!IS_ALIGNED(pitch, pitch_align)) {
213 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
214 __func__, __LINE__, pitch);
215 return -EINVAL;
217 if (!IS_ALIGNED(height, 8)) {
218 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
219 __func__, __LINE__, height);
220 return -EINVAL;
222 break;
223 case V_0280A0_ARRAY_2D_TILED_THIN1:
224 pitch_align = max((u32)track->nbanks,
225 (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks));
226 if (!IS_ALIGNED(pitch, pitch_align)) {
227 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
228 __func__, __LINE__, pitch);
229 return -EINVAL;
231 if (!IS_ALIGNED((height / 8), track->npipes)) {
232 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
233 __func__, __LINE__, height);
234 return -EINVAL;
236 break;
237 default:
238 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
239 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
240 track->cb_color_info[i]);
241 return -EINVAL;
243 /* check offset */
244 tmp = height * pitch * 8 * bpe;
245 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
246 dev_warn(p->dev, "%s offset[%d] %d too big\n", __func__, i, track->cb_color_bo_offset[i]);
247 return -EINVAL;
249 if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) {
250 dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]);
251 return -EINVAL;
253 /* limit max tile */
254 tmp = (height * pitch * 8) >> 6;
255 if (tmp < slice_tile_max)
256 slice_tile_max = tmp;
257 tmp = S_028060_PITCH_TILE_MAX(pitch - 1) |
258 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
259 ib[track->cb_color_size_idx[i]] = tmp;
260 return 0;
263 static int r600_cs_track_check(struct radeon_cs_parser *p)
265 struct r600_cs_track *track = p->track;
266 u32 tmp;
267 int r, i;
268 volatile u32 *ib = p->ib->ptr;
270 /* on legacy kernel we don't perform advanced check */
271 if (p->rdev == NULL)
272 return 0;
273 /* we don't support out buffer yet */
274 if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
275 dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
276 return -EINVAL;
278 /* check that we have a cb for each enabled target, we don't check
279 * shader_mask because it seems mesa isn't always setting it :(
281 tmp = track->cb_target_mask;
282 for (i = 0; i < 8; i++) {
283 if ((tmp >> (i * 4)) & 0xF) {
284 /* at least one component is enabled */
285 if (track->cb_color_bo[i] == NULL) {
286 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
287 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
288 return -EINVAL;
290 /* perform rewrite of CB_COLOR[0-7]_SIZE */
291 r = r600_cs_track_validate_cb(p, i);
292 if (r)
293 return r;
296 /* Check depth buffer */
297 if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
298 G_028800_Z_ENABLE(track->db_depth_control)) {
299 u32 nviews, bpe, ntiles, pitch, pitch_align, height, size;
300 if (track->db_bo == NULL) {
301 dev_warn(p->dev, "z/stencil with no depth buffer\n");
302 return -EINVAL;
304 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
305 dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
306 return -EINVAL;
308 switch (G_028010_FORMAT(track->db_depth_info)) {
309 case V_028010_DEPTH_16:
310 bpe = 2;
311 break;
312 case V_028010_DEPTH_X8_24:
313 case V_028010_DEPTH_8_24:
314 case V_028010_DEPTH_X8_24_FLOAT:
315 case V_028010_DEPTH_8_24_FLOAT:
316 case V_028010_DEPTH_32_FLOAT:
317 bpe = 4;
318 break;
319 case V_028010_DEPTH_X24_8_32_FLOAT:
320 bpe = 8;
321 break;
322 default:
323 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
324 return -EINVAL;
326 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
327 if (!track->db_depth_size_idx) {
328 dev_warn(p->dev, "z/stencil buffer size not set\n");
329 return -EINVAL;
331 tmp = radeon_bo_size(track->db_bo) - track->db_offset;
332 tmp = (tmp / bpe) >> 6;
333 if (!tmp) {
334 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
335 track->db_depth_size, bpe, track->db_offset,
336 radeon_bo_size(track->db_bo));
337 return -EINVAL;
339 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
340 } else {
341 size = radeon_bo_size(track->db_bo);
342 pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1;
343 height = size / (pitch * 8 * bpe);
344 height &= ~0x7;
345 if (!height)
346 height = 8;
348 switch (G_028010_ARRAY_MODE(track->db_depth_info)) {
349 case V_028010_ARRAY_1D_TILED_THIN1:
350 pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8);
351 if (!IS_ALIGNED(pitch, pitch_align)) {
352 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
353 __func__, __LINE__, pitch);
354 return -EINVAL;
356 if (!IS_ALIGNED(height, 8)) {
357 dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
358 __func__, __LINE__, height);
359 return -EINVAL;
361 break;
362 case V_028010_ARRAY_2D_TILED_THIN1:
363 pitch_align = max((u32)track->nbanks,
364 (u32)(((track->group_size / 8) / bpe) * track->nbanks));
365 if (!IS_ALIGNED(pitch, pitch_align)) {
366 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
367 __func__, __LINE__, pitch);
368 return -EINVAL;
370 if (!IS_ALIGNED((height / 8), track->npipes)) {
371 dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
372 __func__, __LINE__, height);
373 return -EINVAL;
375 break;
376 default:
377 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
378 G_028010_ARRAY_MODE(track->db_depth_info),
379 track->db_depth_info);
380 return -EINVAL;
382 if (!IS_ALIGNED(track->db_offset, track->group_size)) {
383 dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset);
384 return -EINVAL;
386 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
387 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
388 tmp = ntiles * bpe * 64 * nviews;
389 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
390 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n",
391 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
392 radeon_bo_size(track->db_bo));
393 return -EINVAL;
397 return 0;
401 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
402 * @parser: parser structure holding parsing context.
403 * @pkt: where to store packet informations
405 * Assume that chunk_ib_index is properly set. Will return -EINVAL
406 * if packet is bigger than remaining ib size. or if packets is unknown.
408 int r600_cs_packet_parse(struct radeon_cs_parser *p,
409 struct radeon_cs_packet *pkt,
410 unsigned idx)
412 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
413 uint32_t header;
415 if (idx >= ib_chunk->length_dw) {
416 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
417 idx, ib_chunk->length_dw);
418 return -EINVAL;
420 header = radeon_get_ib_value(p, idx);
421 pkt->idx = idx;
422 pkt->type = CP_PACKET_GET_TYPE(header);
423 pkt->count = CP_PACKET_GET_COUNT(header);
424 pkt->one_reg_wr = 0;
425 switch (pkt->type) {
426 case PACKET_TYPE0:
427 pkt->reg = CP_PACKET0_GET_REG(header);
428 break;
429 case PACKET_TYPE3:
430 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
431 break;
432 case PACKET_TYPE2:
433 pkt->count = -1;
434 break;
435 default:
436 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
437 return -EINVAL;
439 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
440 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
441 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
442 return -EINVAL;
444 return 0;
448 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
449 * @parser: parser structure holding parsing context.
450 * @data: pointer to relocation data
451 * @offset_start: starting offset
452 * @offset_mask: offset mask (to align start offset on)
453 * @reloc: reloc informations
455 * Check next packet is relocation packet3, do bo validation and compute
456 * GPU offset using the provided start.
458 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
459 struct radeon_cs_reloc **cs_reloc)
461 struct radeon_cs_chunk *relocs_chunk;
462 struct radeon_cs_packet p3reloc;
463 unsigned idx;
464 int r;
466 if (p->chunk_relocs_idx == -1) {
467 DRM_ERROR("No relocation chunk !\n");
468 return -EINVAL;
470 *cs_reloc = NULL;
471 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
472 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
473 if (r) {
474 return r;
476 p->idx += p3reloc.count + 2;
477 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
478 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
479 p3reloc.idx);
480 return -EINVAL;
482 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
483 if (idx >= relocs_chunk->length_dw) {
484 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
485 idx, relocs_chunk->length_dw);
486 return -EINVAL;
488 *cs_reloc = p->relocs_ptr[(idx / 4)];
489 return 0;
493 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
494 * @parser: parser structure holding parsing context.
495 * @data: pointer to relocation data
496 * @offset_start: starting offset
497 * @offset_mask: offset mask (to align start offset on)
498 * @reloc: reloc informations
500 * Check next packet is relocation packet3, do bo validation and compute
501 * GPU offset using the provided start.
503 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
504 struct radeon_cs_reloc **cs_reloc)
506 struct radeon_cs_chunk *relocs_chunk;
507 struct radeon_cs_packet p3reloc;
508 unsigned idx;
509 int r;
511 if (p->chunk_relocs_idx == -1) {
512 DRM_ERROR("No relocation chunk !\n");
513 return -EINVAL;
515 *cs_reloc = NULL;
516 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
517 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
518 if (r) {
519 return r;
521 p->idx += p3reloc.count + 2;
522 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
523 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
524 p3reloc.idx);
525 return -EINVAL;
527 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
528 if (idx >= relocs_chunk->length_dw) {
529 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
530 idx, relocs_chunk->length_dw);
531 return -EINVAL;
533 *cs_reloc = p->relocs;
534 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
535 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
536 return 0;
540 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
541 * @parser: parser structure holding parsing context.
543 * Check next packet is relocation packet3, do bo validation and compute
544 * GPU offset using the provided start.
546 static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
548 struct radeon_cs_packet p3reloc;
549 int r;
551 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
552 if (r) {
553 return 0;
555 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
556 return 0;
558 return 1;
562 * r600_cs_packet_next_vline() - parse userspace VLINE packet
563 * @parser: parser structure holding parsing context.
565 * Userspace sends a special sequence for VLINE waits.
566 * PACKET0 - VLINE_START_END + value
567 * PACKET3 - WAIT_REG_MEM poll vline status reg
568 * RELOC (P3) - crtc_id in reloc.
570 * This function parses this and relocates the VLINE START END
571 * and WAIT_REG_MEM packets to the correct crtc.
572 * It also detects a switched off crtc and nulls out the
573 * wait in that case.
575 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
577 struct drm_mode_object *obj;
578 struct drm_crtc *crtc;
579 struct radeon_crtc *radeon_crtc;
580 struct radeon_cs_packet p3reloc, wait_reg_mem;
581 int crtc_id;
582 int r;
583 uint32_t header, h_idx, reg, wait_reg_mem_info;
584 volatile uint32_t *ib;
586 ib = p->ib->ptr;
588 /* parse the WAIT_REG_MEM */
589 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
590 if (r)
591 return r;
593 /* check its a WAIT_REG_MEM */
594 if (wait_reg_mem.type != PACKET_TYPE3 ||
595 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
596 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
597 r = -EINVAL;
598 return r;
601 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
602 /* bit 4 is reg (0) or mem (1) */
603 if (wait_reg_mem_info & 0x10) {
604 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
605 r = -EINVAL;
606 return r;
608 /* waiting for value to be equal */
609 if ((wait_reg_mem_info & 0x7) != 0x3) {
610 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
611 r = -EINVAL;
612 return r;
614 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
615 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
616 r = -EINVAL;
617 return r;
620 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
621 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
622 r = -EINVAL;
623 return r;
626 /* jump over the NOP */
627 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
628 if (r)
629 return r;
631 h_idx = p->idx - 2;
632 p->idx += wait_reg_mem.count + 2;
633 p->idx += p3reloc.count + 2;
635 header = radeon_get_ib_value(p, h_idx);
636 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
637 reg = CP_PACKET0_GET_REG(header);
639 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
640 if (!obj) {
641 DRM_ERROR("cannot find crtc %d\n", crtc_id);
642 r = -EINVAL;
643 goto out;
645 crtc = obj_to_crtc(obj);
646 radeon_crtc = to_radeon_crtc(crtc);
647 crtc_id = radeon_crtc->crtc_id;
649 if (!crtc->enabled) {
650 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
651 ib[h_idx + 2] = PACKET2(0);
652 ib[h_idx + 3] = PACKET2(0);
653 ib[h_idx + 4] = PACKET2(0);
654 ib[h_idx + 5] = PACKET2(0);
655 ib[h_idx + 6] = PACKET2(0);
656 ib[h_idx + 7] = PACKET2(0);
657 ib[h_idx + 8] = PACKET2(0);
658 } else if (crtc_id == 1) {
659 switch (reg) {
660 case AVIVO_D1MODE_VLINE_START_END:
661 header &= ~R600_CP_PACKET0_REG_MASK;
662 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
663 break;
664 default:
665 DRM_ERROR("unknown crtc reloc\n");
666 r = -EINVAL;
667 goto out;
669 ib[h_idx] = header;
670 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
672 out:
673 return r;
676 static int r600_packet0_check(struct radeon_cs_parser *p,
677 struct radeon_cs_packet *pkt,
678 unsigned idx, unsigned reg)
680 int r;
682 switch (reg) {
683 case AVIVO_D1MODE_VLINE_START_END:
684 r = r600_cs_packet_parse_vline(p);
685 if (r) {
686 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
687 idx, reg);
688 return r;
690 break;
691 default:
692 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
693 reg, idx);
694 return -EINVAL;
696 return 0;
699 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
700 struct radeon_cs_packet *pkt)
702 unsigned reg, i;
703 unsigned idx;
704 int r;
706 idx = pkt->idx + 1;
707 reg = pkt->reg;
708 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
709 r = r600_packet0_check(p, pkt, idx, reg);
710 if (r) {
711 return r;
714 return 0;
718 * r600_cs_check_reg() - check if register is authorized or not
719 * @parser: parser structure holding parsing context
720 * @reg: register we are testing
721 * @idx: index into the cs buffer
723 * This function will test against r600_reg_safe_bm and return 0
724 * if register is safe. If register is not flag as safe this function
725 * will test it against a list of register needind special handling.
727 static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
729 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
730 struct radeon_cs_reloc *reloc;
731 u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
732 u32 m, i, tmp, *ib;
733 int r;
735 i = (reg >> 7);
736 if (i > last_reg) {
737 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
738 return -EINVAL;
740 m = 1 << ((reg >> 2) & 31);
741 if (!(r600_reg_safe_bm[i] & m))
742 return 0;
743 ib = p->ib->ptr;
744 switch (reg) {
745 /* force following reg to 0 in an attemp to disable out buffer
746 * which will need us to better understand how it works to perform
747 * security check on it (Jerome)
749 case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
750 case R_008C44_SQ_ESGS_RING_SIZE:
751 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
752 case R_008C54_SQ_ESTMP_RING_SIZE:
753 case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
754 case R_008C74_SQ_FBUF_RING_SIZE:
755 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
756 case R_008C5C_SQ_GSTMP_RING_SIZE:
757 case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
758 case R_008C4C_SQ_GSVS_RING_SIZE:
759 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
760 case R_008C6C_SQ_PSTMP_RING_SIZE:
761 case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
762 case R_008C7C_SQ_REDUC_RING_SIZE:
763 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
764 case R_008C64_SQ_VSTMP_RING_SIZE:
765 case R_0288C8_SQ_GS_VERT_ITEMSIZE:
766 /* get value to populate the IB don't remove */
767 tmp =radeon_get_ib_value(p, idx);
768 ib[idx] = 0;
769 break;
770 case SQ_CONFIG:
771 track->sq_config = radeon_get_ib_value(p, idx);
772 break;
773 case R_028800_DB_DEPTH_CONTROL:
774 track->db_depth_control = radeon_get_ib_value(p, idx);
775 break;
776 case R_028010_DB_DEPTH_INFO:
777 if (r600_cs_packet_next_is_pkt3_nop(p)) {
778 r = r600_cs_packet_next_reloc(p, &reloc);
779 if (r) {
780 dev_warn(p->dev, "bad SET_CONTEXT_REG "
781 "0x%04X\n", reg);
782 return -EINVAL;
784 track->db_depth_info = radeon_get_ib_value(p, idx);
785 ib[idx] &= C_028010_ARRAY_MODE;
786 track->db_depth_info &= C_028010_ARRAY_MODE;
787 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
788 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
789 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
790 } else {
791 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
792 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
794 } else
795 track->db_depth_info = radeon_get_ib_value(p, idx);
796 break;
797 case R_028004_DB_DEPTH_VIEW:
798 track->db_depth_view = radeon_get_ib_value(p, idx);
799 break;
800 case R_028000_DB_DEPTH_SIZE:
801 track->db_depth_size = radeon_get_ib_value(p, idx);
802 track->db_depth_size_idx = idx;
803 break;
804 case R_028AB0_VGT_STRMOUT_EN:
805 track->vgt_strmout_en = radeon_get_ib_value(p, idx);
806 break;
807 case R_028B20_VGT_STRMOUT_BUFFER_EN:
808 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
809 break;
810 case R_028238_CB_TARGET_MASK:
811 track->cb_target_mask = radeon_get_ib_value(p, idx);
812 break;
813 case R_02823C_CB_SHADER_MASK:
814 track->cb_shader_mask = radeon_get_ib_value(p, idx);
815 break;
816 case R_028C04_PA_SC_AA_CONFIG:
817 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
818 track->nsamples = 1 << tmp;
819 break;
820 case R_0280A0_CB_COLOR0_INFO:
821 case R_0280A4_CB_COLOR1_INFO:
822 case R_0280A8_CB_COLOR2_INFO:
823 case R_0280AC_CB_COLOR3_INFO:
824 case R_0280B0_CB_COLOR4_INFO:
825 case R_0280B4_CB_COLOR5_INFO:
826 case R_0280B8_CB_COLOR6_INFO:
827 case R_0280BC_CB_COLOR7_INFO:
828 if (r600_cs_packet_next_is_pkt3_nop(p)) {
829 r = r600_cs_packet_next_reloc(p, &reloc);
830 if (r) {
831 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
832 return -EINVAL;
834 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
835 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
836 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
837 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
838 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
839 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
840 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
841 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
843 } else {
844 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
845 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
847 break;
848 case R_028060_CB_COLOR0_SIZE:
849 case R_028064_CB_COLOR1_SIZE:
850 case R_028068_CB_COLOR2_SIZE:
851 case R_02806C_CB_COLOR3_SIZE:
852 case R_028070_CB_COLOR4_SIZE:
853 case R_028074_CB_COLOR5_SIZE:
854 case R_028078_CB_COLOR6_SIZE:
855 case R_02807C_CB_COLOR7_SIZE:
856 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
857 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
858 track->cb_color_size_idx[tmp] = idx;
859 break;
860 /* This register were added late, there is userspace
861 * which does provide relocation for those but set
862 * 0 offset. In order to avoid breaking old userspace
863 * we detect this and set address to point to last
864 * CB_COLOR0_BASE, note that if userspace doesn't set
865 * CB_COLOR0_BASE before this register we will report
866 * error. Old userspace always set CB_COLOR0_BASE
867 * before any of this.
869 case R_0280E0_CB_COLOR0_FRAG:
870 case R_0280E4_CB_COLOR1_FRAG:
871 case R_0280E8_CB_COLOR2_FRAG:
872 case R_0280EC_CB_COLOR3_FRAG:
873 case R_0280F0_CB_COLOR4_FRAG:
874 case R_0280F4_CB_COLOR5_FRAG:
875 case R_0280F8_CB_COLOR6_FRAG:
876 case R_0280FC_CB_COLOR7_FRAG:
877 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
878 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
879 if (!track->cb_color_base_last[tmp]) {
880 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
881 return -EINVAL;
883 ib[idx] = track->cb_color_base_last[tmp];
884 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
885 } else {
886 r = r600_cs_packet_next_reloc(p, &reloc);
887 if (r) {
888 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
889 return -EINVAL;
891 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
892 track->cb_color_frag_bo[tmp] = reloc->robj;
894 break;
895 case R_0280C0_CB_COLOR0_TILE:
896 case R_0280C4_CB_COLOR1_TILE:
897 case R_0280C8_CB_COLOR2_TILE:
898 case R_0280CC_CB_COLOR3_TILE:
899 case R_0280D0_CB_COLOR4_TILE:
900 case R_0280D4_CB_COLOR5_TILE:
901 case R_0280D8_CB_COLOR6_TILE:
902 case R_0280DC_CB_COLOR7_TILE:
903 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
904 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
905 if (!track->cb_color_base_last[tmp]) {
906 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
907 return -EINVAL;
909 ib[idx] = track->cb_color_base_last[tmp];
910 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
911 } else {
912 r = r600_cs_packet_next_reloc(p, &reloc);
913 if (r) {
914 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
915 return -EINVAL;
917 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
918 track->cb_color_tile_bo[tmp] = reloc->robj;
920 break;
921 case CB_COLOR0_BASE:
922 case CB_COLOR1_BASE:
923 case CB_COLOR2_BASE:
924 case CB_COLOR3_BASE:
925 case CB_COLOR4_BASE:
926 case CB_COLOR5_BASE:
927 case CB_COLOR6_BASE:
928 case CB_COLOR7_BASE:
929 r = r600_cs_packet_next_reloc(p, &reloc);
930 if (r) {
931 dev_warn(p->dev, "bad SET_CONTEXT_REG "
932 "0x%04X\n", reg);
933 return -EINVAL;
935 tmp = (reg - CB_COLOR0_BASE) / 4;
936 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
937 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
938 track->cb_color_base_last[tmp] = ib[idx];
939 track->cb_color_bo[tmp] = reloc->robj;
940 break;
941 case DB_DEPTH_BASE:
942 r = r600_cs_packet_next_reloc(p, &reloc);
943 if (r) {
944 dev_warn(p->dev, "bad SET_CONTEXT_REG "
945 "0x%04X\n", reg);
946 return -EINVAL;
948 track->db_offset = radeon_get_ib_value(p, idx) << 8;
949 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
950 track->db_bo = reloc->robj;
951 break;
952 case DB_HTILE_DATA_BASE:
953 case SQ_PGM_START_FS:
954 case SQ_PGM_START_ES:
955 case SQ_PGM_START_VS:
956 case SQ_PGM_START_GS:
957 case SQ_PGM_START_PS:
958 case SQ_ALU_CONST_CACHE_GS_0:
959 case SQ_ALU_CONST_CACHE_GS_1:
960 case SQ_ALU_CONST_CACHE_GS_2:
961 case SQ_ALU_CONST_CACHE_GS_3:
962 case SQ_ALU_CONST_CACHE_GS_4:
963 case SQ_ALU_CONST_CACHE_GS_5:
964 case SQ_ALU_CONST_CACHE_GS_6:
965 case SQ_ALU_CONST_CACHE_GS_7:
966 case SQ_ALU_CONST_CACHE_GS_8:
967 case SQ_ALU_CONST_CACHE_GS_9:
968 case SQ_ALU_CONST_CACHE_GS_10:
969 case SQ_ALU_CONST_CACHE_GS_11:
970 case SQ_ALU_CONST_CACHE_GS_12:
971 case SQ_ALU_CONST_CACHE_GS_13:
972 case SQ_ALU_CONST_CACHE_GS_14:
973 case SQ_ALU_CONST_CACHE_GS_15:
974 case SQ_ALU_CONST_CACHE_PS_0:
975 case SQ_ALU_CONST_CACHE_PS_1:
976 case SQ_ALU_CONST_CACHE_PS_2:
977 case SQ_ALU_CONST_CACHE_PS_3:
978 case SQ_ALU_CONST_CACHE_PS_4:
979 case SQ_ALU_CONST_CACHE_PS_5:
980 case SQ_ALU_CONST_CACHE_PS_6:
981 case SQ_ALU_CONST_CACHE_PS_7:
982 case SQ_ALU_CONST_CACHE_PS_8:
983 case SQ_ALU_CONST_CACHE_PS_9:
984 case SQ_ALU_CONST_CACHE_PS_10:
985 case SQ_ALU_CONST_CACHE_PS_11:
986 case SQ_ALU_CONST_CACHE_PS_12:
987 case SQ_ALU_CONST_CACHE_PS_13:
988 case SQ_ALU_CONST_CACHE_PS_14:
989 case SQ_ALU_CONST_CACHE_PS_15:
990 case SQ_ALU_CONST_CACHE_VS_0:
991 case SQ_ALU_CONST_CACHE_VS_1:
992 case SQ_ALU_CONST_CACHE_VS_2:
993 case SQ_ALU_CONST_CACHE_VS_3:
994 case SQ_ALU_CONST_CACHE_VS_4:
995 case SQ_ALU_CONST_CACHE_VS_5:
996 case SQ_ALU_CONST_CACHE_VS_6:
997 case SQ_ALU_CONST_CACHE_VS_7:
998 case SQ_ALU_CONST_CACHE_VS_8:
999 case SQ_ALU_CONST_CACHE_VS_9:
1000 case SQ_ALU_CONST_CACHE_VS_10:
1001 case SQ_ALU_CONST_CACHE_VS_11:
1002 case SQ_ALU_CONST_CACHE_VS_12:
1003 case SQ_ALU_CONST_CACHE_VS_13:
1004 case SQ_ALU_CONST_CACHE_VS_14:
1005 case SQ_ALU_CONST_CACHE_VS_15:
1006 r = r600_cs_packet_next_reloc(p, &reloc);
1007 if (r) {
1008 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1009 "0x%04X\n", reg);
1010 return -EINVAL;
1012 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1013 break;
1014 default:
1015 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1016 return -EINVAL;
1018 return 0;
1021 static inline unsigned minify(unsigned size, unsigned levels)
1023 size = size >> levels;
1024 if (size < 1)
1025 size = 1;
1026 return size;
1029 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
1030 unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
1031 unsigned pitch_align,
1032 unsigned *l0_size, unsigned *mipmap_size)
1034 unsigned offset, i, level, face;
1035 unsigned width, height, depth, rowstride, size;
1037 w0 = minify(w0, 0);
1038 h0 = minify(h0, 0);
1039 d0 = minify(d0, 0);
1040 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1041 width = minify(w0, i);
1042 height = minify(h0, i);
1043 depth = minify(d0, i);
1044 for(face = 0; face < nfaces; face++) {
1045 rowstride = ALIGN((width * bpe), pitch_align);
1046 size = height * rowstride * depth;
1047 offset += size;
1048 offset = (offset + 0x1f) & ~0x1f;
1051 *l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0;
1052 *mipmap_size = offset;
1053 if (!nlevels)
1054 *mipmap_size = *l0_size;
1055 if (!blevel)
1056 *mipmap_size -= *l0_size;
1060 * r600_check_texture_resource() - check if register is authorized or not
1061 * @p: parser structure holding parsing context
1062 * @idx: index into the cs buffer
1063 * @texture: texture's bo structure
1064 * @mipmap: mipmap's bo structure
1066 * This function will check that the resource has valid field and that
1067 * the texture and mipmap bo object are big enough to cover this resource.
1069 static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1070 struct radeon_bo *texture,
1071 struct radeon_bo *mipmap,
1072 u32 tiling_flags)
1074 struct r600_cs_track *track = p->track;
1075 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
1076 u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align;
1078 /* on legacy kernel we don't perform advanced check */
1079 if (p->rdev == NULL)
1080 return 0;
1082 word0 = radeon_get_ib_value(p, idx + 0);
1083 if (tiling_flags & RADEON_TILING_MACRO)
1084 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1085 else if (tiling_flags & RADEON_TILING_MICRO)
1086 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1087 word1 = radeon_get_ib_value(p, idx + 1);
1088 w0 = G_038000_TEX_WIDTH(word0) + 1;
1089 h0 = G_038004_TEX_HEIGHT(word1) + 1;
1090 d0 = G_038004_TEX_DEPTH(word1);
1091 nfaces = 1;
1092 switch (G_038000_DIM(word0)) {
1093 case V_038000_SQ_TEX_DIM_1D:
1094 case V_038000_SQ_TEX_DIM_2D:
1095 case V_038000_SQ_TEX_DIM_3D:
1096 break;
1097 case V_038000_SQ_TEX_DIM_CUBEMAP:
1098 nfaces = 6;
1099 break;
1100 case V_038000_SQ_TEX_DIM_1D_ARRAY:
1101 case V_038000_SQ_TEX_DIM_2D_ARRAY:
1102 case V_038000_SQ_TEX_DIM_2D_MSAA:
1103 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1104 default:
1105 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1106 return -EINVAL;
1108 if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) {
1109 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1110 __func__, __LINE__, G_038004_DATA_FORMAT(word1));
1111 return -EINVAL;
1114 pitch = G_038000_PITCH(word0) + 1;
1115 switch (G_038000_TILE_MODE(word0)) {
1116 case V_038000_ARRAY_LINEAR_GENERAL:
1117 pitch_align = 1;
1118 break;
1119 case V_038000_ARRAY_LINEAR_ALIGNED:
1120 pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
1121 if (!IS_ALIGNED(pitch, pitch_align)) {
1122 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
1123 __func__, __LINE__, pitch);
1124 return -EINVAL;
1126 break;
1127 case V_038000_ARRAY_1D_TILED_THIN1:
1128 pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8;
1129 if (!IS_ALIGNED(pitch, pitch_align)) {
1130 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
1131 __func__, __LINE__, pitch);
1132 return -EINVAL;
1134 break;
1135 case V_038000_ARRAY_2D_TILED_THIN1:
1136 pitch_align = max((u32)track->nbanks,
1137 (u32)(((track->group_size / 8) / bpe) * track->nbanks));
1138 if (!IS_ALIGNED(pitch, pitch_align)) {
1139 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
1140 __func__, __LINE__, pitch);
1141 return -EINVAL;
1143 break;
1144 default:
1145 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
1146 G_038000_TILE_MODE(word0), word0);
1147 return -EINVAL;
1150 word0 = radeon_get_ib_value(p, idx + 4);
1151 word1 = radeon_get_ib_value(p, idx + 5);
1152 blevel = G_038010_BASE_LEVEL(word0);
1153 nlevels = G_038014_LAST_LEVEL(word1);
1154 r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe,
1155 (pitch_align * bpe),
1156 &l0_size, &mipmap_size);
1157 /* using get ib will give us the offset into the texture bo */
1158 word0 = radeon_get_ib_value(p, idx + 2) << 8;
1159 if ((l0_size + word0) > radeon_bo_size(texture)) {
1160 dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
1161 w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
1162 return -EINVAL;
1164 /* using get ib will give us the offset into the mipmap bo */
1165 word0 = radeon_get_ib_value(p, idx + 3) << 8;
1166 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
1167 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1168 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
1170 return 0;
1173 static int r600_packet3_check(struct radeon_cs_parser *p,
1174 struct radeon_cs_packet *pkt)
1176 struct radeon_cs_reloc *reloc;
1177 struct r600_cs_track *track;
1178 volatile u32 *ib;
1179 unsigned idx;
1180 unsigned i;
1181 unsigned start_reg, end_reg, reg;
1182 int r;
1183 u32 idx_value;
1185 track = (struct r600_cs_track *)p->track;
1186 ib = p->ib->ptr;
1187 idx = pkt->idx + 1;
1188 idx_value = radeon_get_ib_value(p, idx);
1190 switch (pkt->opcode) {
1191 case PACKET3_START_3D_CMDBUF:
1192 if (p->family >= CHIP_RV770 || pkt->count) {
1193 DRM_ERROR("bad START_3D\n");
1194 return -EINVAL;
1196 break;
1197 case PACKET3_CONTEXT_CONTROL:
1198 if (pkt->count != 1) {
1199 DRM_ERROR("bad CONTEXT_CONTROL\n");
1200 return -EINVAL;
1202 break;
1203 case PACKET3_INDEX_TYPE:
1204 case PACKET3_NUM_INSTANCES:
1205 if (pkt->count) {
1206 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1207 return -EINVAL;
1209 break;
1210 case PACKET3_DRAW_INDEX:
1211 if (pkt->count != 3) {
1212 DRM_ERROR("bad DRAW_INDEX\n");
1213 return -EINVAL;
1215 r = r600_cs_packet_next_reloc(p, &reloc);
1216 if (r) {
1217 DRM_ERROR("bad DRAW_INDEX\n");
1218 return -EINVAL;
1220 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1221 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1222 r = r600_cs_track_check(p);
1223 if (r) {
1224 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1225 return r;
1227 break;
1228 case PACKET3_DRAW_INDEX_AUTO:
1229 if (pkt->count != 1) {
1230 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1231 return -EINVAL;
1233 r = r600_cs_track_check(p);
1234 if (r) {
1235 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1236 return r;
1238 break;
1239 case PACKET3_DRAW_INDEX_IMMD_BE:
1240 case PACKET3_DRAW_INDEX_IMMD:
1241 if (pkt->count < 2) {
1242 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1243 return -EINVAL;
1245 r = r600_cs_track_check(p);
1246 if (r) {
1247 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1248 return r;
1250 break;
1251 case PACKET3_WAIT_REG_MEM:
1252 if (pkt->count != 5) {
1253 DRM_ERROR("bad WAIT_REG_MEM\n");
1254 return -EINVAL;
1256 /* bit 4 is reg (0) or mem (1) */
1257 if (idx_value & 0x10) {
1258 r = r600_cs_packet_next_reloc(p, &reloc);
1259 if (r) {
1260 DRM_ERROR("bad WAIT_REG_MEM\n");
1261 return -EINVAL;
1263 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1264 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1266 break;
1267 case PACKET3_SURFACE_SYNC:
1268 if (pkt->count != 3) {
1269 DRM_ERROR("bad SURFACE_SYNC\n");
1270 return -EINVAL;
1272 /* 0xffffffff/0x0 is flush all cache flag */
1273 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1274 radeon_get_ib_value(p, idx + 2) != 0) {
1275 r = r600_cs_packet_next_reloc(p, &reloc);
1276 if (r) {
1277 DRM_ERROR("bad SURFACE_SYNC\n");
1278 return -EINVAL;
1280 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1282 break;
1283 case PACKET3_EVENT_WRITE:
1284 if (pkt->count != 2 && pkt->count != 0) {
1285 DRM_ERROR("bad EVENT_WRITE\n");
1286 return -EINVAL;
1288 if (pkt->count) {
1289 r = r600_cs_packet_next_reloc(p, &reloc);
1290 if (r) {
1291 DRM_ERROR("bad EVENT_WRITE\n");
1292 return -EINVAL;
1294 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1295 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1297 break;
1298 case PACKET3_EVENT_WRITE_EOP:
1299 if (pkt->count != 4) {
1300 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1301 return -EINVAL;
1303 r = r600_cs_packet_next_reloc(p, &reloc);
1304 if (r) {
1305 DRM_ERROR("bad EVENT_WRITE\n");
1306 return -EINVAL;
1308 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1309 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1310 break;
1311 case PACKET3_SET_CONFIG_REG:
1312 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1313 end_reg = 4 * pkt->count + start_reg - 4;
1314 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
1315 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1316 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1317 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1318 return -EINVAL;
1320 for (i = 0; i < pkt->count; i++) {
1321 reg = start_reg + (4 * i);
1322 r = r600_cs_check_reg(p, reg, idx+1+i);
1323 if (r)
1324 return r;
1326 break;
1327 case PACKET3_SET_CONTEXT_REG:
1328 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
1329 end_reg = 4 * pkt->count + start_reg - 4;
1330 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
1331 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1332 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1333 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1334 return -EINVAL;
1336 for (i = 0; i < pkt->count; i++) {
1337 reg = start_reg + (4 * i);
1338 r = r600_cs_check_reg(p, reg, idx+1+i);
1339 if (r)
1340 return r;
1342 break;
1343 case PACKET3_SET_RESOURCE:
1344 if (pkt->count % 7) {
1345 DRM_ERROR("bad SET_RESOURCE\n");
1346 return -EINVAL;
1348 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
1349 end_reg = 4 * pkt->count + start_reg - 4;
1350 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
1351 (start_reg >= PACKET3_SET_RESOURCE_END) ||
1352 (end_reg >= PACKET3_SET_RESOURCE_END)) {
1353 DRM_ERROR("bad SET_RESOURCE\n");
1354 return -EINVAL;
1356 for (i = 0; i < (pkt->count / 7); i++) {
1357 struct radeon_bo *texture, *mipmap;
1358 u32 size, offset, base_offset, mip_offset;
1360 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
1361 case SQ_TEX_VTX_VALID_TEXTURE:
1362 /* tex base */
1363 r = r600_cs_packet_next_reloc(p, &reloc);
1364 if (r) {
1365 DRM_ERROR("bad SET_RESOURCE\n");
1366 return -EINVAL;
1368 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1369 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1370 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1371 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1372 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1373 texture = reloc->robj;
1374 /* tex mip base */
1375 r = r600_cs_packet_next_reloc(p, &reloc);
1376 if (r) {
1377 DRM_ERROR("bad SET_RESOURCE\n");
1378 return -EINVAL;
1380 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1381 mipmap = reloc->robj;
1382 r = r600_check_texture_resource(p, idx+(i*7)+1,
1383 texture, mipmap, reloc->lobj.tiling_flags);
1384 if (r)
1385 return r;
1386 ib[idx+1+(i*7)+2] += base_offset;
1387 ib[idx+1+(i*7)+3] += mip_offset;
1388 break;
1389 case SQ_TEX_VTX_VALID_BUFFER:
1390 /* vtx base */
1391 r = r600_cs_packet_next_reloc(p, &reloc);
1392 if (r) {
1393 DRM_ERROR("bad SET_RESOURCE\n");
1394 return -EINVAL;
1396 offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1397 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
1398 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1399 /* force size to size of the buffer */
1400 dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
1401 size + offset, radeon_bo_size(reloc->robj));
1402 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
1404 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
1405 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1406 break;
1407 case SQ_TEX_VTX_INVALID_TEXTURE:
1408 case SQ_TEX_VTX_INVALID_BUFFER:
1409 default:
1410 DRM_ERROR("bad SET_RESOURCE\n");
1411 return -EINVAL;
1414 break;
1415 case PACKET3_SET_ALU_CONST:
1416 if (track->sq_config & DX9_CONSTS) {
1417 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
1418 end_reg = 4 * pkt->count + start_reg - 4;
1419 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
1420 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
1421 (end_reg >= PACKET3_SET_ALU_CONST_END)) {
1422 DRM_ERROR("bad SET_ALU_CONST\n");
1423 return -EINVAL;
1426 break;
1427 case PACKET3_SET_BOOL_CONST:
1428 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
1429 end_reg = 4 * pkt->count + start_reg - 4;
1430 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
1431 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
1432 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
1433 DRM_ERROR("bad SET_BOOL_CONST\n");
1434 return -EINVAL;
1436 break;
1437 case PACKET3_SET_LOOP_CONST:
1438 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
1439 end_reg = 4 * pkt->count + start_reg - 4;
1440 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
1441 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
1442 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
1443 DRM_ERROR("bad SET_LOOP_CONST\n");
1444 return -EINVAL;
1446 break;
1447 case PACKET3_SET_CTL_CONST:
1448 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
1449 end_reg = 4 * pkt->count + start_reg - 4;
1450 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
1451 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
1452 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
1453 DRM_ERROR("bad SET_CTL_CONST\n");
1454 return -EINVAL;
1456 break;
1457 case PACKET3_SET_SAMPLER:
1458 if (pkt->count % 3) {
1459 DRM_ERROR("bad SET_SAMPLER\n");
1460 return -EINVAL;
1462 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
1463 end_reg = 4 * pkt->count + start_reg - 4;
1464 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
1465 (start_reg >= PACKET3_SET_SAMPLER_END) ||
1466 (end_reg >= PACKET3_SET_SAMPLER_END)) {
1467 DRM_ERROR("bad SET_SAMPLER\n");
1468 return -EINVAL;
1470 break;
1471 case PACKET3_SURFACE_BASE_UPDATE:
1472 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
1473 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1474 return -EINVAL;
1476 if (pkt->count) {
1477 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1478 return -EINVAL;
1480 break;
1481 case PACKET3_NOP:
1482 break;
1483 default:
1484 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1485 return -EINVAL;
1487 return 0;
1490 int r600_cs_parse(struct radeon_cs_parser *p)
1492 struct radeon_cs_packet pkt;
1493 struct r600_cs_track *track;
1494 int r;
1496 if (p->track == NULL) {
1497 /* initialize tracker, we are in kms */
1498 track = kzalloc(sizeof(*track), GFP_KERNEL);
1499 if (track == NULL)
1500 return -ENOMEM;
1501 r600_cs_track_init(track);
1502 if (p->rdev->family < CHIP_RV770) {
1503 track->npipes = p->rdev->config.r600.tiling_npipes;
1504 track->nbanks = p->rdev->config.r600.tiling_nbanks;
1505 track->group_size = p->rdev->config.r600.tiling_group_size;
1506 } else if (p->rdev->family <= CHIP_RV740) {
1507 track->npipes = p->rdev->config.rv770.tiling_npipes;
1508 track->nbanks = p->rdev->config.rv770.tiling_nbanks;
1509 track->group_size = p->rdev->config.rv770.tiling_group_size;
1511 p->track = track;
1513 do {
1514 r = r600_cs_packet_parse(p, &pkt, p->idx);
1515 if (r) {
1516 kfree(p->track);
1517 p->track = NULL;
1518 return r;
1520 p->idx += pkt.count + 2;
1521 switch (pkt.type) {
1522 case PACKET_TYPE0:
1523 r = r600_cs_parse_packet0(p, &pkt);
1524 break;
1525 case PACKET_TYPE2:
1526 break;
1527 case PACKET_TYPE3:
1528 r = r600_packet3_check(p, &pkt);
1529 break;
1530 default:
1531 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1532 kfree(p->track);
1533 p->track = NULL;
1534 return -EINVAL;
1536 if (r) {
1537 kfree(p->track);
1538 p->track = NULL;
1539 return r;
1541 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1542 kfree(p->track);
1543 p->track = NULL;
1544 return 0;
1547 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
1549 if (p->chunk_relocs_idx == -1) {
1550 return 0;
1552 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
1553 if (p->relocs == NULL) {
1554 return -ENOMEM;
1556 return 0;
1560 * cs_parser_fini() - clean parser states
1561 * @parser: parser structure holding parsing context.
1562 * @error: error number
1564 * If error is set than unvalidate buffer, otherwise just free memory
1565 * used by parsing context.
1567 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
1569 unsigned i;
1571 kfree(parser->relocs);
1572 for (i = 0; i < parser->nchunks; i++) {
1573 kfree(parser->chunks[i].kdata);
1574 kfree(parser->chunks[i].kpage[0]);
1575 kfree(parser->chunks[i].kpage[1]);
1577 kfree(parser->chunks);
1578 kfree(parser->chunks_array);
1581 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
1582 unsigned family, u32 *ib, int *l)
1584 struct radeon_cs_parser parser;
1585 struct radeon_cs_chunk *ib_chunk;
1586 struct radeon_ib fake_ib;
1587 struct r600_cs_track *track;
1588 int r;
1590 /* initialize tracker */
1591 track = kzalloc(sizeof(*track), GFP_KERNEL);
1592 if (track == NULL)
1593 return -ENOMEM;
1594 r600_cs_track_init(track);
1595 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
1596 /* initialize parser */
1597 memset(&parser, 0, sizeof(struct radeon_cs_parser));
1598 parser.filp = filp;
1599 parser.dev = &dev->pdev->dev;
1600 parser.rdev = NULL;
1601 parser.family = family;
1602 parser.ib = &fake_ib;
1603 parser.track = track;
1604 fake_ib.ptr = ib;
1605 r = radeon_cs_parser_init(&parser, data);
1606 if (r) {
1607 DRM_ERROR("Failed to initialize parser !\n");
1608 r600_cs_parser_fini(&parser, r);
1609 return r;
1611 r = r600_cs_parser_relocs_legacy(&parser);
1612 if (r) {
1613 DRM_ERROR("Failed to parse relocation !\n");
1614 r600_cs_parser_fini(&parser, r);
1615 return r;
1617 /* Copy the packet into the IB, the parser will read from the
1618 * input memory (cached) and write to the IB (which can be
1619 * uncached). */
1620 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
1621 parser.ib->length_dw = ib_chunk->length_dw;
1622 *l = parser.ib->length_dw;
1623 r = r600_cs_parse(&parser);
1624 if (r) {
1625 DRM_ERROR("Invalid command stream !\n");
1626 r600_cs_parser_fini(&parser, r);
1627 return r;
1629 r = radeon_cs_finish_pages(&parser);
1630 if (r) {
1631 DRM_ERROR("Invalid command stream !\n");
1632 r600_cs_parser_fini(&parser, r);
1633 return r;
1635 r600_cs_parser_fini(&parser, r);
1636 return r;
1639 void r600_cs_legacy_init(void)
1641 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;