drm: Consolidate memory allocation types
[dragonfly.git] / sys / dev / drm / radeon / radeon_cs.c
blob326f9013641f3cb98ba57500b2431c98e39fa87c
1 /*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
27 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_cs.c 254885 2013-08-25 19:37:15Z dumbbell $
30 #include <drm/drmP.h>
31 #include <uapi_drm/radeon_drm.h>
32 #include "radeon_reg.h"
33 #include "radeon.h"
35 void r100_cs_dump_packet(struct radeon_cs_parser *p,
36 struct radeon_cs_packet *pkt);
38 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
40 struct drm_device *ddev = p->rdev->ddev;
41 struct radeon_cs_chunk *chunk;
42 unsigned i, j;
43 bool duplicate;
45 if (p->chunk_relocs_idx == -1) {
46 return 0;
48 chunk = &p->chunks[p->chunk_relocs_idx];
49 p->dma_reloc_idx = 0;
50 /* FIXME: we assume that each relocs use 4 dwords */
51 p->nrelocs = chunk->length_dw / 4;
52 p->relocs_ptr = kmalloc(p->nrelocs * sizeof(void *), M_DRM,
53 M_ZERO | M_WAITOK);
54 if (p->relocs_ptr == NULL) {
55 return -ENOMEM;
57 p->relocs = kmalloc(p->nrelocs * sizeof(struct radeon_cs_reloc),
58 M_DRM, M_ZERO | M_WAITOK);
59 if (p->relocs == NULL) {
60 return -ENOMEM;
62 for (i = 0; i < p->nrelocs; i++) {
63 struct drm_radeon_cs_reloc *r;
65 duplicate = false;
66 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
67 for (j = 0; j < i; j++) {
68 if (r->handle == p->relocs[j].handle) {
69 p->relocs_ptr[i] = &p->relocs[j];
70 duplicate = true;
71 break;
74 if (!duplicate) {
75 p->relocs[i].gobj = drm_gem_object_lookup(ddev,
76 p->filp,
77 r->handle);
78 if (p->relocs[i].gobj == NULL) {
79 DRM_ERROR("gem object lookup failed 0x%x\n",
80 r->handle);
81 return -ENOENT;
83 p->relocs_ptr[i] = &p->relocs[i];
84 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
85 p->relocs[i].lobj.bo = p->relocs[i].robj;
86 p->relocs[i].lobj.wdomain = r->write_domain;
87 p->relocs[i].lobj.rdomain = r->read_domains;
88 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
89 p->relocs[i].handle = r->handle;
90 p->relocs[i].flags = r->flags;
91 radeon_bo_list_add_object(&p->relocs[i].lobj,
92 &p->validated);
94 } else
95 p->relocs[i].handle = 0;
97 return radeon_bo_list_validate(&p->validated);
100 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
102 p->priority = priority;
104 switch (ring) {
105 default:
106 DRM_ERROR("unknown ring id: %d\n", ring);
107 return -EINVAL;
108 case RADEON_CS_RING_GFX:
109 p->ring = RADEON_RING_TYPE_GFX_INDEX;
110 break;
111 case RADEON_CS_RING_COMPUTE:
112 if (p->rdev->family >= CHIP_TAHITI) {
113 if (p->priority > 0)
114 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
115 else
116 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
117 } else
118 p->ring = RADEON_RING_TYPE_GFX_INDEX;
119 break;
120 case RADEON_CS_RING_DMA:
121 if (p->rdev->family >= CHIP_CAYMAN) {
122 if (p->priority > 0)
123 p->ring = R600_RING_TYPE_DMA_INDEX;
124 else
125 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
126 } else if (p->rdev->family >= CHIP_R600) {
127 p->ring = R600_RING_TYPE_DMA_INDEX;
128 } else {
129 return -EINVAL;
131 break;
133 return 0;
136 static void radeon_cs_sync_to(struct radeon_cs_parser *p,
137 struct radeon_fence *fence)
139 struct radeon_fence *other;
141 if (!fence)
142 return;
144 other = p->ib.sync_to[fence->ring];
145 p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
148 static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
150 int i;
152 for (i = 0; i < p->nrelocs; i++) {
153 if (!p->relocs[i].robj)
154 continue;
156 radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
160 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
161 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
163 struct drm_radeon_cs *cs = data;
164 uint64_t *chunk_array_ptr;
165 unsigned size, i;
166 u32 ring = RADEON_CS_RING_GFX;
167 s32 priority = 0;
169 if (!cs->num_chunks) {
170 return 0;
172 /* get chunks */
173 INIT_LIST_HEAD(&p->validated);
174 p->idx = 0;
175 p->ib.sa_bo = NULL;
176 p->ib.semaphore = NULL;
177 p->const_ib.sa_bo = NULL;
178 p->const_ib.semaphore = NULL;
179 p->chunk_ib_idx = -1;
180 p->chunk_relocs_idx = -1;
181 p->chunk_flags_idx = -1;
182 p->chunk_const_ib_idx = -1;
183 p->chunks_array = kmalloc(cs->num_chunks * sizeof(uint64_t),
184 M_DRM, M_ZERO | M_WAITOK);
185 if (p->chunks_array == NULL) {
186 return -ENOMEM;
188 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
189 if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
190 sizeof(uint64_t)*cs->num_chunks)) {
191 return -EFAULT;
193 p->cs_flags = 0;
194 p->nchunks = cs->num_chunks;
195 p->chunks = kmalloc(p->nchunks * sizeof(struct radeon_cs_chunk),
196 M_DRM, M_ZERO | M_WAITOK);
197 if (p->chunks == NULL) {
198 return -ENOMEM;
200 for (i = 0; i < p->nchunks; i++) {
201 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
202 struct drm_radeon_cs_chunk user_chunk;
203 uint32_t __user *cdata;
205 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
206 if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
207 sizeof(struct drm_radeon_cs_chunk))) {
208 return -EFAULT;
210 p->chunks[i].length_dw = user_chunk.length_dw;
211 p->chunks[i].kdata = NULL;
212 p->chunks[i].chunk_id = user_chunk.chunk_id;
214 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
215 p->chunk_relocs_idx = i;
217 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
218 p->chunk_ib_idx = i;
219 /* zero length IB isn't useful */
220 if (p->chunks[i].length_dw == 0)
221 return -EINVAL;
223 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
224 p->chunk_const_ib_idx = i;
225 /* zero length CONST IB isn't useful */
226 if (p->chunks[i].length_dw == 0)
227 return -EINVAL;
229 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
230 p->chunk_flags_idx = i;
231 /* zero length flags aren't useful */
232 if (p->chunks[i].length_dw == 0)
233 return -EINVAL;
236 p->chunks[i].length_dw = user_chunk.length_dw;
237 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
239 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
240 if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
241 (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
242 size = p->chunks[i].length_dw * sizeof(uint32_t);
243 p->chunks[i].kdata = kmalloc(size, M_DRM,
244 M_WAITOK);
245 if (p->chunks[i].kdata == NULL) {
246 return -ENOMEM;
248 if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
249 p->chunks[i].user_ptr, size)) {
250 return -EFAULT;
252 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
253 p->cs_flags = p->chunks[i].kdata[0];
254 if (p->chunks[i].length_dw > 1)
255 ring = p->chunks[i].kdata[1];
256 if (p->chunks[i].length_dw > 2)
257 priority = (s32)p->chunks[i].kdata[2];
262 /* these are KMS only */
263 if (p->rdev) {
264 if ((p->cs_flags & RADEON_CS_USE_VM) &&
265 !p->rdev->vm_manager.enabled) {
266 DRM_ERROR("VM not active on asic!\n");
267 return -EINVAL;
270 /* we only support VM on SI+ */
271 if ((p->rdev->family >= CHIP_TAHITI) &&
272 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
273 DRM_ERROR("VM required on SI+!\n");
274 return -EINVAL;
277 if (radeon_cs_get_ring(p, ring, priority))
278 return -EINVAL;
281 /* deal with non-vm */
282 if ((p->chunk_ib_idx != -1) &&
283 ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
284 (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
285 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
286 DRM_ERROR("cs IB too big: %d\n",
287 p->chunks[p->chunk_ib_idx].length_dw);
288 return -EINVAL;
290 if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
291 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE,
292 M_DRM,
293 M_WAITOK);
294 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE,
295 M_DRM,
296 M_WAITOK);
297 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
298 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
299 drm_free(p->chunks[p->chunk_ib_idx].kpage[0],
300 M_DRM);
301 drm_free(p->chunks[p->chunk_ib_idx].kpage[1],
302 M_DRM);
303 p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
304 p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
305 return -ENOMEM;
308 p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
309 p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
310 p->chunks[p->chunk_ib_idx].last_copied_page = -1;
311 p->chunks[p->chunk_ib_idx].last_page_index =
312 ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
315 return 0;
319 * cs_parser_fini() - clean parser states
320 * @parser: parser structure holding parsing context.
321 * @error: error number
323 * If error is set than unvalidate buffer, otherwise just free memory
324 * used by parsing context.
326 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
328 unsigned i;
330 if (!error) {
331 ttm_eu_fence_buffer_objects(&parser->validated,
332 parser->ib.fence);
333 } else {
334 ttm_eu_backoff_reservation(&parser->validated);
337 if (parser->relocs != NULL) {
338 for (i = 0; i < parser->nrelocs; i++) {
339 if (parser->relocs[i].gobj)
340 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
343 drm_free(parser->track, M_DRM);
344 drm_free(parser->relocs, M_DRM);
345 drm_free(parser->relocs_ptr, M_DRM);
346 for (i = 0; i < parser->nchunks; i++) {
347 drm_free(parser->chunks[i].kdata, M_DRM);
348 if ((parser->rdev->flags & RADEON_IS_AGP)) {
349 drm_free(parser->chunks[i].kpage[0], M_DRM);
350 drm_free(parser->chunks[i].kpage[1], M_DRM);
353 drm_free(parser->chunks, M_DRM);
354 drm_free(parser->chunks_array, M_DRM);
355 radeon_ib_free(parser->rdev, &parser->ib);
356 radeon_ib_free(parser->rdev, &parser->const_ib);
359 static int radeon_cs_ib_chunk(struct radeon_device *rdev,
360 struct radeon_cs_parser *parser)
362 struct radeon_cs_chunk *ib_chunk;
363 int r;
365 if (parser->chunk_ib_idx == -1)
366 return 0;
368 if (parser->cs_flags & RADEON_CS_USE_VM)
369 return 0;
371 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
372 /* Copy the packet into the IB, the parser will read from the
373 * input memory (cached) and write to the IB (which can be
374 * uncached).
376 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
377 NULL, ib_chunk->length_dw * 4);
378 if (r) {
379 DRM_ERROR("Failed to get ib !\n");
380 return r;
382 parser->ib.length_dw = ib_chunk->length_dw;
383 r = radeon_cs_parse(rdev, parser->ring, parser);
384 if (r || parser->parser_error) {
385 DRM_ERROR("Invalid command stream !\n");
386 return r;
388 r = radeon_cs_finish_pages(parser);
389 if (r) {
390 DRM_ERROR("Invalid command stream !\n");
391 return r;
393 radeon_cs_sync_rings(parser);
394 r = radeon_ib_schedule(rdev, &parser->ib, NULL);
395 if (r) {
396 DRM_ERROR("Failed to schedule IB !\n");
398 return r;
401 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
402 struct radeon_vm *vm)
404 struct radeon_device *rdev = parser->rdev;
405 struct radeon_bo_list *lobj;
406 struct radeon_bo *bo;
407 int r;
409 r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
410 if (r) {
411 return r;
413 list_for_each_entry(lobj, &parser->validated, tv.head) {
414 bo = lobj->bo;
415 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
416 if (r) {
417 return r;
420 return 0;
423 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
424 struct radeon_cs_parser *parser)
426 struct radeon_cs_chunk *ib_chunk;
427 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
428 struct radeon_vm *vm = &fpriv->vm;
429 int r;
431 if (parser->chunk_ib_idx == -1)
432 return 0;
433 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
434 return 0;
436 if ((rdev->family >= CHIP_TAHITI) &&
437 (parser->chunk_const_ib_idx != -1)) {
438 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
439 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
440 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
441 return -EINVAL;
443 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
444 vm, ib_chunk->length_dw * 4);
445 if (r) {
446 DRM_ERROR("Failed to get const ib !\n");
447 return r;
449 parser->const_ib.is_const_ib = true;
450 parser->const_ib.length_dw = ib_chunk->length_dw;
451 /* Copy the packet into the IB */
452 if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
453 ib_chunk->length_dw * 4)) {
454 return -EFAULT;
456 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
457 if (r) {
458 return r;
462 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
463 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
464 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
465 return -EINVAL;
467 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
468 vm, ib_chunk->length_dw * 4);
469 if (r) {
470 DRM_ERROR("Failed to get ib !\n");
471 return r;
473 parser->ib.length_dw = ib_chunk->length_dw;
474 /* Copy the packet into the IB */
475 if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
476 ib_chunk->length_dw * 4)) {
477 return -EFAULT;
479 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
480 if (r) {
481 return r;
484 lockmgr(&rdev->vm_manager.lock, LK_EXCLUSIVE);
485 lockmgr(&vm->mutex, LK_EXCLUSIVE);
486 r = radeon_vm_alloc_pt(rdev, vm);
487 if (r) {
488 goto out;
490 r = radeon_bo_vm_update_pte(parser, vm);
491 if (r) {
492 goto out;
494 radeon_cs_sync_rings(parser);
495 radeon_cs_sync_to(parser, vm->fence);
496 radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
498 if ((rdev->family >= CHIP_TAHITI) &&
499 (parser->chunk_const_ib_idx != -1)) {
500 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
501 } else {
502 r = radeon_ib_schedule(rdev, &parser->ib, NULL);
505 if (!r) {
506 radeon_vm_fence(rdev, vm, parser->ib.fence);
509 out:
510 radeon_vm_add_to_lru(rdev, vm);
511 lockmgr(&vm->mutex, LK_RELEASE);
512 lockmgr(&rdev->vm_manager.lock, LK_RELEASE);
513 return r;
516 static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
518 if (r == -EDEADLK) {
519 r = radeon_gpu_reset(rdev);
520 if (!r)
521 r = -EAGAIN;
523 return r;
526 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
528 struct radeon_device *rdev = dev->dev_private;
529 struct radeon_cs_parser parser;
530 int r;
532 lockmgr(&rdev->exclusive_lock, LK_EXCLUSIVE);
533 if (!rdev->accel_working) {
534 lockmgr(&rdev->exclusive_lock, LK_RELEASE);
535 return -EBUSY;
537 /* initialize parser */
538 memset(&parser, 0, sizeof(struct radeon_cs_parser));
539 parser.filp = filp;
540 parser.rdev = rdev;
541 parser.dev = rdev->dev;
542 parser.family = rdev->family;
543 r = radeon_cs_parser_init(&parser, data);
544 if (r) {
545 DRM_ERROR("Failed to initialize parser !\n");
546 radeon_cs_parser_fini(&parser, r);
547 lockmgr(&rdev->exclusive_lock, LK_RELEASE);
548 r = radeon_cs_handle_lockup(rdev, r);
549 return r;
551 r = radeon_cs_parser_relocs(&parser);
552 if (r) {
553 if (r != -ERESTARTSYS)
554 DRM_ERROR("Failed to parse relocation %d!\n", r);
555 radeon_cs_parser_fini(&parser, r);
556 lockmgr(&rdev->exclusive_lock, LK_RELEASE);
557 r = radeon_cs_handle_lockup(rdev, r);
558 return r;
560 r = radeon_cs_ib_chunk(rdev, &parser);
561 if (r) {
562 goto out;
564 r = radeon_cs_ib_vm_chunk(rdev, &parser);
565 if (r) {
566 goto out;
568 out:
569 radeon_cs_parser_fini(&parser, r);
570 lockmgr(&rdev->exclusive_lock, LK_RELEASE);
571 r = radeon_cs_handle_lockup(rdev, r);
572 return r;
575 int radeon_cs_finish_pages(struct radeon_cs_parser *p)
577 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
578 int i;
579 int size = PAGE_SIZE;
581 for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
582 if (i == ibc->last_page_index) {
583 size = (ibc->length_dw * 4) % PAGE_SIZE;
584 if (size == 0)
585 size = PAGE_SIZE;
588 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
589 (char *)ibc->user_ptr + (i * PAGE_SIZE),
590 size))
591 return -EFAULT;
593 return 0;
596 static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
598 int new_page;
599 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
600 int i;
601 int size = PAGE_SIZE;
602 bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
603 false : true;
605 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
606 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
607 (char *)ibc->user_ptr + (i * PAGE_SIZE),
608 PAGE_SIZE)) {
609 p->parser_error = -EFAULT;
610 return 0;
614 if (pg_idx == ibc->last_page_index) {
615 size = (ibc->length_dw * 4) % PAGE_SIZE;
616 if (size == 0)
617 size = PAGE_SIZE;
620 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
621 if (copy1)
622 ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
624 if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
625 (char *)ibc->user_ptr + (pg_idx * PAGE_SIZE),
626 size)) {
627 p->parser_error = -EFAULT;
628 return 0;
631 /* copy to IB for non single case */
632 if (!copy1)
633 memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
635 ibc->last_copied_page = pg_idx;
636 ibc->kpage_idx[new_page] = pg_idx;
638 return new_page;
641 u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
643 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
644 u32 pg_idx, pg_offset;
645 u32 idx_value = 0;
646 int new_page;
648 pg_idx = (idx * 4) / PAGE_SIZE;
649 pg_offset = (idx * 4) % PAGE_SIZE;
651 if (ibc->kpage_idx[0] == pg_idx)
652 return ibc->kpage[0][pg_offset/4];
653 if (ibc->kpage_idx[1] == pg_idx)
654 return ibc->kpage[1][pg_offset/4];
656 new_page = radeon_cs_update_pages(p, pg_idx);
657 if (new_page < 0) {
658 p->parser_error = new_page;
659 return 0;
662 idx_value = ibc->kpage[new_page][pg_offset/4];
663 return idx_value;