staging: gma500: pull mrst firmware stuff into its own header
[linux-2.6/btrfs-unstable.git] / drivers / staging / gma500 / psb_ttm_placement_user.c
blob272b397982edbe5d1ffe9ab4c813e5607379d893
1 /**************************************************************************
3 * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 **************************************************************************/
21 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 #include "psb_ttm_placement_user.h"
25 #include "ttm/ttm_bo_driver.h"
26 #include "ttm/ttm_object.h"
27 #include "psb_ttm_userobj_api.h"
28 #include "ttm/ttm_lock.h"
29 #include <linux/slab.h>
30 #include <linux/sched.h>
32 struct ttm_bo_user_object {
33 struct ttm_base_object base;
34 struct ttm_buffer_object bo;
37 static size_t pl_bo_size;
39 static uint32_t psb_busy_prios[] = {
40 TTM_PL_TT,
41 TTM_PL_PRIV0, /* CI */
42 TTM_PL_PRIV2, /* RAR */
43 TTM_PL_PRIV1, /* DRM_PSB_MEM_MMU */
44 TTM_PL_SYSTEM
47 static const struct ttm_placement default_placement = {
48 0, 0, 0, NULL, 5, psb_busy_prios
51 static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages)
53 size_t page_array_size =
54 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
56 if (unlikely(pl_bo_size == 0)) {
57 pl_bo_size = bdev->glob->ttm_bo_extra_size +
58 ttm_round_pot(sizeof(struct ttm_bo_user_object));
61 return bdev->glob->ttm_bo_size + 2 * page_array_size;
64 static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file
65 *tfile, uint32_t handle)
67 struct ttm_base_object *base;
69 base = ttm_base_object_lookup(tfile, handle);
70 if (unlikely(base == NULL)) {
71 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
72 (unsigned long)handle);
73 return NULL;
76 if (unlikely(base->object_type != ttm_buffer_type)) {
77 ttm_base_object_unref(&base);
78 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
79 (unsigned long)handle);
80 return NULL;
83 return container_of(base, struct ttm_bo_user_object, base);
86 struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
87 *tfile, uint32_t handle)
89 struct ttm_bo_user_object *user_bo;
90 struct ttm_base_object *base;
92 user_bo = ttm_bo_user_lookup(tfile, handle);
93 if (unlikely(user_bo == NULL))
94 return NULL;
96 (void)ttm_bo_reference(&user_bo->bo);
97 base = &user_bo->base;
98 ttm_base_object_unref(&base);
99 return &user_bo->bo;
102 static void ttm_bo_user_destroy(struct ttm_buffer_object *bo)
104 struct ttm_bo_user_object *user_bo =
105 container_of(bo, struct ttm_bo_user_object, bo);
107 ttm_mem_global_free(bo->glob->mem_glob, bo->acc_size);
108 kfree(user_bo);
111 static void ttm_bo_user_release(struct ttm_base_object **p_base)
113 struct ttm_bo_user_object *user_bo;
114 struct ttm_base_object *base = *p_base;
115 struct ttm_buffer_object *bo;
117 *p_base = NULL;
119 if (unlikely(base == NULL))
120 return;
122 user_bo = container_of(base, struct ttm_bo_user_object, base);
123 bo = &user_bo->bo;
124 ttm_bo_unref(&bo);
127 static void ttm_bo_user_ref_release(struct ttm_base_object *base,
128 enum ttm_ref_type ref_type)
130 struct ttm_bo_user_object *user_bo =
131 container_of(base, struct ttm_bo_user_object, base);
132 struct ttm_buffer_object *bo = &user_bo->bo;
134 switch (ref_type) {
135 case TTM_REF_SYNCCPU_WRITE:
136 ttm_bo_synccpu_write_release(bo);
137 break;
138 default:
139 BUG();
143 static void ttm_pl_fill_rep(struct ttm_buffer_object *bo,
144 struct ttm_pl_rep *rep)
146 struct ttm_bo_user_object *user_bo =
147 container_of(bo, struct ttm_bo_user_object, bo);
149 rep->gpu_offset = bo->offset;
150 rep->bo_size = bo->num_pages << PAGE_SHIFT;
151 rep->map_handle = bo->addr_space_offset;
152 rep->placement = bo->mem.placement;
153 rep->handle = user_bo->base.hash.key;
154 rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg;
157 /* FIXME Copy from upstream TTM */
158 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
159 unsigned long num_pages)
161 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
162 PAGE_MASK;
164 return glob->ttm_bo_size + 2 * page_array_size;
167 /* FIXME Copy from upstream TTM "ttm_bo_create", upstream TTM does not
168 export this, so copy it here */
169 static int ttm_bo_create_private(struct ttm_bo_device *bdev,
170 unsigned long size,
171 enum ttm_bo_type type,
172 struct ttm_placement *placement,
173 uint32_t page_alignment,
174 unsigned long buffer_start,
175 bool interruptible,
176 struct file *persistant_swap_storage,
177 struct ttm_buffer_object **p_bo)
179 struct ttm_buffer_object *bo;
180 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
181 int ret;
183 size_t acc_size =
184 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
185 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
186 if (unlikely(ret != 0))
187 return ret;
189 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
191 if (unlikely(bo == NULL)) {
192 ttm_mem_global_free(mem_glob, acc_size);
193 return -ENOMEM;
196 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
197 buffer_start, interruptible,
198 persistant_swap_storage, acc_size, NULL);
199 if (likely(ret == 0))
200 *p_bo = bo;
202 return ret;
205 int psb_ttm_bo_check_placement(struct ttm_buffer_object *bo,
206 struct ttm_placement *placement)
208 int i;
210 for (i = 0; i < placement->num_placement; i++) {
211 if (!capable(CAP_SYS_ADMIN)) {
212 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
213 printk(KERN_ERR TTM_PFX "Need to be root to "
214 "modify NO_EVICT status.\n");
215 return -EINVAL;
219 for (i = 0; i < placement->num_busy_placement; i++) {
220 if (!capable(CAP_SYS_ADMIN)) {
221 if (placement->busy_placement[i]
222 & TTM_PL_FLAG_NO_EVICT) {
223 printk(KERN_ERR TTM_PFX "Need to be root to modify NO_EVICT status.\n");
224 return -EINVAL;
228 return 0;
231 int ttm_buffer_object_create(struct ttm_bo_device *bdev,
232 unsigned long size,
233 enum ttm_bo_type type,
234 uint32_t flags,
235 uint32_t page_alignment,
236 unsigned long buffer_start,
237 bool interruptible,
238 struct file *persistant_swap_storage,
239 struct ttm_buffer_object **p_bo)
241 struct ttm_placement placement = default_placement;
242 int ret;
244 if ((flags & TTM_PL_MASK_CACHING) == 0)
245 flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
247 placement.num_placement = 1;
248 placement.placement = &flags;
250 ret = ttm_bo_create_private(bdev,
251 size,
252 type,
253 &placement,
254 page_alignment,
255 buffer_start,
256 interruptible,
257 persistant_swap_storage,
258 p_bo);
260 return ret;
264 int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
265 struct ttm_bo_device *bdev,
266 struct ttm_lock *lock, void *data)
268 union ttm_pl_create_arg *arg = data;
269 struct ttm_pl_create_req *req = &arg->req;
270 struct ttm_pl_rep *rep = &arg->rep;
271 struct ttm_buffer_object *bo;
272 struct ttm_buffer_object *tmp;
273 struct ttm_bo_user_object *user_bo;
274 uint32_t flags;
275 int ret = 0;
276 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
277 struct ttm_placement placement = default_placement;
278 size_t acc_size =
279 ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
280 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
281 if (unlikely(ret != 0))
282 return ret;
284 flags = req->placement;
285 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
286 if (unlikely(user_bo == NULL)) {
287 ttm_mem_global_free(mem_glob, acc_size);
288 return -ENOMEM;
291 bo = &user_bo->bo;
292 ret = ttm_read_lock(lock, true);
293 if (unlikely(ret != 0)) {
294 ttm_mem_global_free(mem_glob, acc_size);
295 kfree(user_bo);
296 return ret;
299 placement.num_placement = 1;
300 placement.placement = &flags;
302 if ((flags & TTM_PL_MASK_CACHING) == 0)
303 flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
305 ret = ttm_bo_init(bdev, bo, req->size,
306 ttm_bo_type_device, &placement,
307 req->page_alignment, 0, true,
308 NULL, acc_size, &ttm_bo_user_destroy);
309 ttm_read_unlock(lock);
312 * Note that the ttm_buffer_object_init function
313 * would've called the destroy function on failure!!
316 if (unlikely(ret != 0))
317 goto out;
319 tmp = ttm_bo_reference(bo);
320 ret = ttm_base_object_init(tfile, &user_bo->base,
321 flags & TTM_PL_FLAG_SHARED,
322 ttm_buffer_type,
323 &ttm_bo_user_release,
324 &ttm_bo_user_ref_release);
325 if (unlikely(ret != 0))
326 goto out_err;
328 ttm_pl_fill_rep(bo, rep);
329 ttm_bo_unref(&bo);
330 out:
331 return 0;
332 out_err:
333 ttm_bo_unref(&tmp);
334 ttm_bo_unref(&bo);
335 return ret;
338 int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
339 struct ttm_bo_device *bdev,
340 struct ttm_lock *lock, void *data)
342 union ttm_pl_create_ub_arg *arg = data;
343 struct ttm_pl_create_ub_req *req = &arg->req;
344 struct ttm_pl_rep *rep = &arg->rep;
345 struct ttm_buffer_object *bo;
346 struct ttm_buffer_object *tmp;
347 struct ttm_bo_user_object *user_bo;
348 uint32_t flags;
349 int ret = 0;
350 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
351 struct ttm_placement placement = default_placement;
352 size_t acc_size =
353 ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
354 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
355 if (unlikely(ret != 0))
356 return ret;
358 flags = req->placement;
359 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
360 if (unlikely(user_bo == NULL)) {
361 ttm_mem_global_free(mem_glob, acc_size);
362 return -ENOMEM;
364 ret = ttm_read_lock(lock, true);
365 if (unlikely(ret != 0)) {
366 ttm_mem_global_free(mem_glob, acc_size);
367 kfree(user_bo);
368 return ret;
370 bo = &user_bo->bo;
372 placement.num_placement = 1;
373 placement.placement = &flags;
375 ret = ttm_bo_init(bdev,
377 req->size,
378 ttm_bo_type_user,
379 &placement,
380 req->page_alignment,
381 req->user_address,
382 true,
383 NULL,
384 acc_size,
385 &ttm_bo_user_destroy);
388 * Note that the ttm_buffer_object_init function
389 * would've called the destroy function on failure!!
391 ttm_read_unlock(lock);
392 if (unlikely(ret != 0))
393 goto out;
395 tmp = ttm_bo_reference(bo);
396 ret = ttm_base_object_init(tfile, &user_bo->base,
397 flags & TTM_PL_FLAG_SHARED,
398 ttm_buffer_type,
399 &ttm_bo_user_release,
400 &ttm_bo_user_ref_release);
401 if (unlikely(ret != 0))
402 goto out_err;
404 ttm_pl_fill_rep(bo, rep);
405 ttm_bo_unref(&bo);
406 out:
407 return 0;
408 out_err:
409 ttm_bo_unref(&tmp);
410 ttm_bo_unref(&bo);
411 return ret;
414 int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data)
416 union ttm_pl_reference_arg *arg = data;
417 struct ttm_pl_rep *rep = &arg->rep;
418 struct ttm_bo_user_object *user_bo;
419 struct ttm_buffer_object *bo;
420 struct ttm_base_object *base;
421 int ret;
423 user_bo = ttm_bo_user_lookup(tfile, arg->req.handle);
424 if (unlikely(user_bo == NULL)) {
425 printk(KERN_ERR "Could not reference buffer object.\n");
426 return -EINVAL;
429 bo = &user_bo->bo;
430 ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
431 if (unlikely(ret != 0)) {
432 printk(KERN_ERR
433 "Could not add a reference to buffer object.\n");
434 goto out;
437 ttm_pl_fill_rep(bo, rep);
439 out:
440 base = &user_bo->base;
441 ttm_base_object_unref(&base);
442 return ret;
445 int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data)
447 struct ttm_pl_reference_req *arg = data;
449 return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE);
452 int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data)
454 struct ttm_pl_synccpu_arg *arg = data;
455 struct ttm_bo_user_object *user_bo;
456 struct ttm_buffer_object *bo;
457 struct ttm_base_object *base;
458 bool existed;
459 int ret;
461 switch (arg->op) {
462 case TTM_PL_SYNCCPU_OP_GRAB:
463 user_bo = ttm_bo_user_lookup(tfile, arg->handle);
464 if (unlikely(user_bo == NULL)) {
465 printk(KERN_ERR
466 "Could not find buffer object for synccpu.\n");
467 return -EINVAL;
469 bo = &user_bo->bo;
470 base = &user_bo->base;
471 ret = ttm_bo_synccpu_write_grab(bo,
472 arg->access_mode &
473 TTM_PL_SYNCCPU_MODE_NO_BLOCK);
474 if (unlikely(ret != 0)) {
475 ttm_base_object_unref(&base);
476 goto out;
478 ret = ttm_ref_object_add(tfile, &user_bo->base,
479 TTM_REF_SYNCCPU_WRITE, &existed);
480 if (existed || ret != 0)
481 ttm_bo_synccpu_write_release(bo);
482 ttm_base_object_unref(&base);
483 break;
484 case TTM_PL_SYNCCPU_OP_RELEASE:
485 ret = ttm_ref_object_base_unref(tfile, arg->handle,
486 TTM_REF_SYNCCPU_WRITE);
487 break;
488 default:
489 ret = -EINVAL;
490 break;
492 out:
493 return ret;
496 int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
497 struct ttm_lock *lock, void *data)
499 union ttm_pl_setstatus_arg *arg = data;
500 struct ttm_pl_setstatus_req *req = &arg->req;
501 struct ttm_pl_rep *rep = &arg->rep;
502 struct ttm_buffer_object *bo;
503 struct ttm_bo_device *bdev;
504 struct ttm_placement placement = default_placement;
505 uint32_t flags[2];
506 int ret;
508 bo = ttm_buffer_object_lookup(tfile, req->handle);
509 if (unlikely(bo == NULL)) {
510 printk(KERN_ERR
511 "Could not find buffer object for setstatus.\n");
512 return -EINVAL;
515 bdev = bo->bdev;
517 ret = ttm_read_lock(lock, true);
518 if (unlikely(ret != 0))
519 goto out_err0;
521 ret = ttm_bo_reserve(bo, true, false, false, 0);
522 if (unlikely(ret != 0))
523 goto out_err1;
525 ret = ttm_bo_wait_cpu(bo, false);
526 if (unlikely(ret != 0))
527 goto out_err2;
529 flags[0] = req->set_placement;
530 flags[1] = req->clr_placement;
532 placement.num_placement = 2;
533 placement.placement = flags;
535 /* Review internal locking ? FIXMEAC */
536 ret = psb_ttm_bo_check_placement(bo, &placement);
537 if (unlikely(ret != 0))
538 goto out_err2;
540 placement.num_placement = 1;
541 flags[0] = (req->set_placement | bo->mem.placement)
542 & ~req->clr_placement;
544 ret = ttm_bo_validate(bo, &placement, true, false, false);
545 if (unlikely(ret != 0))
546 goto out_err2;
548 ttm_pl_fill_rep(bo, rep);
549 out_err2:
550 ttm_bo_unreserve(bo);
551 out_err1:
552 ttm_read_unlock(lock);
553 out_err0:
554 ttm_bo_unref(&bo);
555 return ret;
558 static int psb_ttm_bo_block_reservation(struct ttm_buffer_object *bo,
559 bool interruptible, bool no_wait)
561 int ret;
563 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
564 if (no_wait)
565 return -EBUSY;
566 else if (interruptible) {
567 ret = wait_event_interruptible(bo->event_queue,
568 atomic_read(&bo->reserved) == 0);
569 if (unlikely(ret != 0))
570 return -ERESTART;
571 } else {
572 wait_event(bo->event_queue,
573 atomic_read(&bo->reserved) == 0);
576 return 0;
579 static void psb_ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
581 atomic_set(&bo->reserved, 0);
582 wake_up_all(&bo->event_queue);
585 int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data)
587 struct ttm_pl_waitidle_arg *arg = data;
588 struct ttm_buffer_object *bo;
589 int ret;
591 bo = ttm_buffer_object_lookup(tfile, arg->handle);
592 if (unlikely(bo == NULL)) {
593 printk(KERN_ERR "Could not find buffer object for waitidle.\n");
594 return -EINVAL;
597 ret =
598 psb_ttm_bo_block_reservation(bo, true,
599 arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
600 if (unlikely(ret != 0))
601 goto out;
602 ret = ttm_bo_wait(bo,
603 arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
604 true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
605 psb_ttm_bo_unblock_reservation(bo);
606 out:
607 ttm_bo_unref(&bo);
608 return ret;
611 int ttm_pl_verify_access(struct ttm_buffer_object *bo,
612 struct ttm_object_file *tfile)
614 struct ttm_bo_user_object *ubo;
617 * Check bo subclass.
620 if (unlikely(bo->destroy != &ttm_bo_user_destroy))
621 return -EPERM;
623 ubo = container_of(bo, struct ttm_bo_user_object, bo);
624 if (likely(ubo->base.shareable || ubo->base.tfile == tfile))
625 return 0;
627 return -EPERM;