fs/fhandle.c: add <linux/personality.h> for ia64
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / gma500 / psb_ttm_fence.c
blobd1c359018cba7a6846b1461fe17fe3659e92f87a
1 /**************************************************************************
3 * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
5 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
6 * All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 **************************************************************************/
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
26 #include "psb_ttm_fence_api.h"
27 #include "psb_ttm_fence_driver.h"
28 #include <linux/wait.h>
29 #include <linux/sched.h>
31 #include <drm/drmP.h>
34 * Simple implementation for now.
37 static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask)
39 struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
41 printk(KERN_ERR "GPU lockup dectected on engine %u "
42 "fence type 0x%08x\n",
43 (unsigned int)fence->fence_class, (unsigned int)mask);
45 * Give engines some time to idle?
48 write_lock(&fc->lock);
49 ttm_fence_handler(fence->fdev, fence->fence_class,
50 fence->sequence, mask, -EBUSY);
51 write_unlock(&fc->lock);
55 * Convenience function to be called by fence::wait methods that
56 * need polling.
59 int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy,
60 bool interruptible, uint32_t mask)
62 struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
63 const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
64 uint32_t count = 0;
65 int ret;
66 unsigned long end_jiffies = fence->timeout_jiffies;
68 DECLARE_WAITQUEUE(entry, current);
69 add_wait_queue(&fc->fence_queue, &entry);
71 ret = 0;
73 for (;;) {
74 __set_current_state((interruptible) ?
75 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
76 if (ttm_fence_object_signaled(fence, mask))
77 break;
78 if (time_after_eq(jiffies, end_jiffies)) {
79 if (driver->lockup)
80 driver->lockup(fence, mask);
81 else
82 ttm_fence_lockup(fence, mask);
83 continue;
85 if (lazy)
86 schedule_timeout(1);
87 else if ((++count & 0x0F) == 0) {
88 __set_current_state(TASK_RUNNING);
89 schedule();
90 __set_current_state((interruptible) ?
91 TASK_INTERRUPTIBLE :
92 TASK_UNINTERRUPTIBLE);
94 if (interruptible && signal_pending(current)) {
95 ret = -ERESTART;
96 break;
99 __set_current_state(TASK_RUNNING);
100 remove_wait_queue(&fc->fence_queue, &entry);
101 return ret;
105 * Typically called by the IRQ handler.
108 void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class,
109 uint32_t sequence, uint32_t type, uint32_t error)
111 int wake = 0;
112 uint32_t diff;
113 uint32_t relevant_type;
114 uint32_t new_type;
115 struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
116 const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev);
117 struct list_head *head;
118 struct ttm_fence_object *fence, *next;
119 bool found = false;
121 if (list_empty(&fc->ring))
122 return;
124 list_for_each_entry(fence, &fc->ring, ring) {
125 diff = (sequence - fence->sequence) & fc->sequence_mask;
126 if (diff > fc->wrap_diff) {
127 found = true;
128 break;
132 fc->waiting_types &= ~type;
133 head = (found) ? &fence->ring : &fc->ring;
135 list_for_each_entry_safe_reverse(fence, next, head, ring) {
136 if (&fence->ring == &fc->ring)
137 break;
139 DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n",
140 (unsigned long)fence, fence->sequence,
141 fence->fence_type);
143 if (error) {
144 fence->info.error = error;
145 fence->info.signaled_types = fence->fence_type;
146 list_del_init(&fence->ring);
147 wake = 1;
148 break;
151 relevant_type = type & fence->fence_type;
152 new_type = (fence->info.signaled_types | relevant_type) ^
153 fence->info.signaled_types;
155 if (new_type) {
156 fence->info.signaled_types |= new_type;
157 DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
158 (unsigned long)fence,
159 fence->info.signaled_types);
161 if (unlikely(driver->signaled))
162 driver->signaled(fence);
164 if (driver->needed_flush)
165 fc->pending_flush |=
166 driver->needed_flush(fence);
168 if (new_type & fence->waiting_types)
169 wake = 1;
172 fc->waiting_types |=
173 fence->waiting_types & ~fence->info.signaled_types;
175 if (!(fence->fence_type & ~fence->info.signaled_types)) {
176 DRM_DEBUG("Fence completely signaled 0x%08lx\n",
177 (unsigned long)fence);
178 list_del_init(&fence->ring);
183 * Reinstate lost waiting types.
186 if ((fc->waiting_types & type) != type) {
187 head = head->prev;
188 list_for_each_entry(fence, head, ring) {
189 if (&fence->ring == &fc->ring)
190 break;
191 diff =
192 (fc->highest_waiting_sequence -
193 fence->sequence) & fc->sequence_mask;
194 if (diff > fc->wrap_diff)
195 break;
197 fc->waiting_types |=
198 fence->waiting_types & ~fence->info.signaled_types;
202 if (wake)
203 wake_up_all(&fc->fence_queue);
206 static void ttm_fence_unring(struct ttm_fence_object *fence)
208 struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
209 unsigned long irq_flags;
211 write_lock_irqsave(&fc->lock, irq_flags);
212 list_del_init(&fence->ring);
213 write_unlock_irqrestore(&fc->lock, irq_flags);
216 bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
218 unsigned long flags;
219 bool signaled;
220 const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
221 struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
223 mask &= fence->fence_type;
224 read_lock_irqsave(&fc->lock, flags);
225 signaled = (mask & fence->info.signaled_types) == mask;
226 read_unlock_irqrestore(&fc->lock, flags);
227 if (!signaled && driver->poll) {
228 write_lock_irqsave(&fc->lock, flags);
229 driver->poll(fence->fdev, fence->fence_class, mask);
230 signaled = (mask & fence->info.signaled_types) == mask;
231 write_unlock_irqrestore(&fc->lock, flags);
233 return signaled;
236 int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type)
238 const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
239 struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
240 unsigned long irq_flags;
241 uint32_t saved_pending_flush;
242 uint32_t diff;
243 bool call_flush;
245 if (type & ~fence->fence_type) {
246 DRM_ERROR("Flush trying to extend fence type, "
247 "0x%x, 0x%x\n", type, fence->fence_type);
248 return -EINVAL;
251 write_lock_irqsave(&fc->lock, irq_flags);
252 fence->waiting_types |= type;
253 fc->waiting_types |= fence->waiting_types;
254 diff = (fence->sequence - fc->highest_waiting_sequence) &
255 fc->sequence_mask;
257 if (diff < fc->wrap_diff)
258 fc->highest_waiting_sequence = fence->sequence;
261 * fence->waiting_types has changed. Determine whether
262 * we need to initiate some kind of flush as a result of this.
265 saved_pending_flush = fc->pending_flush;
266 if (driver->needed_flush)
267 fc->pending_flush |= driver->needed_flush(fence);
269 if (driver->poll)
270 driver->poll(fence->fdev, fence->fence_class,
271 fence->waiting_types);
273 call_flush = (fc->pending_flush != 0);
274 write_unlock_irqrestore(&fc->lock, irq_flags);
276 if (call_flush && driver->flush)
277 driver->flush(fence->fdev, fence->fence_class);
279 return 0;
283 * Make sure old fence objects are signaled before their fence sequences are
284 * wrapped around and reused.
287 void ttm_fence_flush_old(struct ttm_fence_device *fdev,
288 uint32_t fence_class, uint32_t sequence)
290 struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
291 struct ttm_fence_object *fence;
292 unsigned long irq_flags;
293 const struct ttm_fence_driver *driver = fdev->driver;
294 bool call_flush;
296 uint32_t diff;
298 write_lock_irqsave(&fc->lock, irq_flags);
300 list_for_each_entry_reverse(fence, &fc->ring, ring) {
301 diff = (sequence - fence->sequence) & fc->sequence_mask;
302 if (diff <= fc->flush_diff)
303 break;
305 fence->waiting_types = fence->fence_type;
306 fc->waiting_types |= fence->fence_type;
308 if (driver->needed_flush)
309 fc->pending_flush |= driver->needed_flush(fence);
312 if (driver->poll)
313 driver->poll(fdev, fence_class, fc->waiting_types);
315 call_flush = (fc->pending_flush != 0);
316 write_unlock_irqrestore(&fc->lock, irq_flags);
318 if (call_flush && driver->flush)
319 driver->flush(fdev, fence->fence_class);
322 * FIXME: Shold we implement a wait here for really old fences?
327 int ttm_fence_object_wait(struct ttm_fence_object *fence,
328 bool lazy, bool interruptible, uint32_t mask)
330 const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
331 struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
332 int ret = 0;
333 unsigned long timeout;
334 unsigned long cur_jiffies;
335 unsigned long to_jiffies;
337 if (mask & ~fence->fence_type) {
338 DRM_ERROR("Wait trying to extend fence type"
339 " 0x%08x 0x%08x\n", mask, fence->fence_type);
340 BUG();
341 return -EINVAL;
344 if (driver->wait)
345 return driver->wait(fence, lazy, interruptible, mask);
347 ttm_fence_object_flush(fence, mask);
348 retry:
349 if (!driver->has_irq ||
350 driver->has_irq(fence->fdev, fence->fence_class, mask)) {
352 cur_jiffies = jiffies;
353 to_jiffies = fence->timeout_jiffies;
355 timeout = (time_after(to_jiffies, cur_jiffies)) ?
356 to_jiffies - cur_jiffies : 1;
358 if (interruptible)
359 ret = wait_event_interruptible_timeout
360 (fc->fence_queue,
361 ttm_fence_object_signaled(fence, mask), timeout);
362 else
363 ret = wait_event_timeout
364 (fc->fence_queue,
365 ttm_fence_object_signaled(fence, mask), timeout);
367 if (unlikely(ret == -ERESTARTSYS))
368 return -ERESTART;
370 if (unlikely(ret == 0)) {
371 if (driver->lockup)
372 driver->lockup(fence, mask);
373 else
374 ttm_fence_lockup(fence, mask);
375 goto retry;
378 return 0;
381 return ttm_fence_wait_polling(fence, lazy, interruptible, mask);
384 int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags,
385 uint32_t fence_class, uint32_t type)
387 const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
388 struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
389 unsigned long flags;
390 uint32_t sequence;
391 unsigned long timeout;
392 int ret;
394 ttm_fence_unring(fence);
395 ret = driver->emit(fence->fdev,
396 fence_class, fence_flags, &sequence, &timeout);
397 if (ret)
398 return ret;
400 write_lock_irqsave(&fc->lock, flags);
401 fence->fence_class = fence_class;
402 fence->fence_type = type;
403 fence->waiting_types = 0;
404 fence->info.signaled_types = 0;
405 fence->info.error = 0;
406 fence->sequence = sequence;
407 fence->timeout_jiffies = timeout;
408 if (list_empty(&fc->ring))
409 fc->highest_waiting_sequence = sequence - 1;
410 list_add_tail(&fence->ring, &fc->ring);
411 fc->latest_queued_sequence = sequence;
412 write_unlock_irqrestore(&fc->lock, flags);
413 return 0;
416 int ttm_fence_object_init(struct ttm_fence_device *fdev,
417 uint32_t fence_class,
418 uint32_t type,
419 uint32_t create_flags,
420 void (*destroy) (struct ttm_fence_object *),
421 struct ttm_fence_object *fence)
423 int ret = 0;
425 kref_init(&fence->kref);
426 fence->fence_class = fence_class;
427 fence->fence_type = type;
428 fence->info.signaled_types = 0;
429 fence->waiting_types = 0;
430 fence->sequence = 0;
431 fence->info.error = 0;
432 fence->fdev = fdev;
433 fence->destroy = destroy;
434 INIT_LIST_HEAD(&fence->ring);
435 atomic_inc(&fdev->count);
437 if (create_flags & TTM_FENCE_FLAG_EMIT) {
438 ret = ttm_fence_object_emit(fence, create_flags,
439 fence->fence_class, type);
442 return ret;
445 int ttm_fence_object_create(struct ttm_fence_device *fdev,
446 uint32_t fence_class,
447 uint32_t type,
448 uint32_t create_flags,
449 struct ttm_fence_object **c_fence)
451 struct ttm_fence_object *fence;
452 int ret;
454 ret = ttm_mem_global_alloc(fdev->mem_glob,
455 sizeof(*fence),
456 false,
457 false);
458 if (unlikely(ret != 0)) {
459 printk(KERN_ERR "Out of memory creating fence object\n");
460 return ret;
463 fence = kmalloc(sizeof(*fence), GFP_KERNEL);
464 if (!fence) {
465 printk(KERN_ERR "Out of memory creating fence object\n");
466 ttm_mem_global_free(fdev->mem_glob, sizeof(*fence));
467 return -ENOMEM;
470 ret = ttm_fence_object_init(fdev, fence_class, type,
471 create_flags, NULL, fence);
472 if (ret) {
473 ttm_fence_object_unref(&fence);
474 return ret;
476 *c_fence = fence;
478 return 0;
481 static void ttm_fence_object_destroy(struct kref *kref)
483 struct ttm_fence_object *fence =
484 container_of(kref, struct ttm_fence_object, kref);
485 struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
486 unsigned long irq_flags;
488 write_lock_irqsave(&fc->lock, irq_flags);
489 list_del_init(&fence->ring);
490 write_unlock_irqrestore(&fc->lock, irq_flags);
492 atomic_dec(&fence->fdev->count);
493 if (fence->destroy)
494 fence->destroy(fence);
495 else {
496 ttm_mem_global_free(fence->fdev->mem_glob,
497 sizeof(*fence));
498 kfree(fence);
502 void ttm_fence_device_release(struct ttm_fence_device *fdev)
504 kfree(fdev->fence_class);
508 ttm_fence_device_init(int num_classes,
509 struct ttm_mem_global *mem_glob,
510 struct ttm_fence_device *fdev,
511 const struct ttm_fence_class_init *init,
512 bool replicate_init,
513 const struct ttm_fence_driver *driver)
515 struct ttm_fence_class_manager *fc;
516 const struct ttm_fence_class_init *fci;
517 int i;
519 fdev->mem_glob = mem_glob;
520 fdev->fence_class = kzalloc(num_classes *
521 sizeof(*fdev->fence_class), GFP_KERNEL);
523 if (unlikely(!fdev->fence_class))
524 return -ENOMEM;
526 fdev->num_classes = num_classes;
527 atomic_set(&fdev->count, 0);
528 fdev->driver = driver;
530 for (i = 0; i < fdev->num_classes; ++i) {
531 fc = &fdev->fence_class[i];
532 fci = &init[(replicate_init) ? 0 : i];
534 fc->wrap_diff = fci->wrap_diff;
535 fc->flush_diff = fci->flush_diff;
536 fc->sequence_mask = fci->sequence_mask;
538 rwlock_init(&fc->lock);
539 INIT_LIST_HEAD(&fc->ring);
540 init_waitqueue_head(&fc->fence_queue);
543 return 0;
546 struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence)
548 struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
549 struct ttm_fence_info tmp;
550 unsigned long irq_flags;
552 read_lock_irqsave(&fc->lock, irq_flags);
553 tmp = fence->info;
554 read_unlock_irqrestore(&fc->lock, irq_flags);
556 return tmp;
559 void ttm_fence_object_unref(struct ttm_fence_object **p_fence)
561 struct ttm_fence_object *fence = *p_fence;
563 *p_fence = NULL;
564 (void)kref_put(&fence->kref, &ttm_fence_object_destroy);
568 * Placement / BO sync object glue.
571 bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
573 struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
574 uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
576 return ttm_fence_object_signaled(fence, fence_types);
579 int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
580 bool lazy, bool interruptible)
582 struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
583 uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
585 return ttm_fence_object_wait(fence, lazy, interruptible, fence_types);
588 int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg)
590 struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
591 uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
593 return ttm_fence_object_flush(fence, fence_types);
596 void ttm_fence_sync_obj_unref(void **sync_obj)
598 ttm_fence_object_unref((struct ttm_fence_object **)sync_obj);
601 void *ttm_fence_sync_obj_ref(void *sync_obj)
603 return (void *)
604 ttm_fence_object_ref((struct ttm_fence_object *)sync_obj);