2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
34 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
37 i915_gem_object_set_to_gpu_domain(struct drm_gem_object
*obj
,
38 uint32_t read_domains
,
39 uint32_t write_domain
);
40 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
);
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
);
43 static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object
*obj
,
45 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
,
47 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
50 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
);
51 static int i915_gem_object_get_page_list(struct drm_gem_object
*obj
);
52 static void i915_gem_object_free_page_list(struct drm_gem_object
*obj
);
53 static int i915_gem_object_wait_rendering(struct drm_gem_object
*obj
);
56 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
);
59 i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
60 struct drm_file
*file_priv
)
62 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
63 struct drm_i915_gem_init
*args
= data
;
65 mutex_lock(&dev
->struct_mutex
);
67 if (args
->gtt_start
>= args
->gtt_end
||
68 (args
->gtt_start
& (PAGE_SIZE
- 1)) != 0 ||
69 (args
->gtt_end
& (PAGE_SIZE
- 1)) != 0) {
70 mutex_unlock(&dev
->struct_mutex
);
74 drm_mm_init(&dev_priv
->mm
.gtt_space
, args
->gtt_start
,
75 args
->gtt_end
- args
->gtt_start
);
77 dev
->gtt_total
= (uint32_t) (args
->gtt_end
- args
->gtt_start
);
79 mutex_unlock(&dev
->struct_mutex
);
85 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
86 struct drm_file
*file_priv
)
88 struct drm_i915_gem_get_aperture
*args
= data
;
90 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
93 args
->aper_size
= dev
->gtt_total
;
94 args
->aper_available_size
= (args
->aper_size
-
95 atomic_read(&dev
->pin_memory
));
102 * Creates a new mm object and returns a handle to it.
105 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
106 struct drm_file
*file_priv
)
108 struct drm_i915_gem_create
*args
= data
;
109 struct drm_gem_object
*obj
;
112 args
->size
= roundup(args
->size
, PAGE_SIZE
);
114 /* Allocate the new object */
115 obj
= drm_gem_object_alloc(dev
, args
->size
);
119 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
120 mutex_lock(&dev
->struct_mutex
);
121 drm_gem_object_handle_unreference(obj
);
122 mutex_unlock(&dev
->struct_mutex
);
127 args
->handle
= handle
;
133 * Reads data from the object referenced by handle.
135 * On error, the contents of *data are undefined.
138 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
139 struct drm_file
*file_priv
)
141 struct drm_i915_gem_pread
*args
= data
;
142 struct drm_gem_object
*obj
;
143 struct drm_i915_gem_object
*obj_priv
;
148 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
151 obj_priv
= obj
->driver_private
;
153 /* Bounds check source.
155 * XXX: This could use review for overflow issues...
157 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
158 args
->offset
+ args
->size
> obj
->size
) {
159 drm_gem_object_unreference(obj
);
163 mutex_lock(&dev
->struct_mutex
);
165 ret
= i915_gem_object_set_cpu_read_domain_range(obj
, args
->offset
,
168 drm_gem_object_unreference(obj
);
169 mutex_unlock(&dev
->struct_mutex
);
173 offset
= args
->offset
;
175 read
= vfs_read(obj
->filp
, (char __user
*)(uintptr_t)args
->data_ptr
,
176 args
->size
, &offset
);
177 if (read
!= args
->size
) {
178 drm_gem_object_unreference(obj
);
179 mutex_unlock(&dev
->struct_mutex
);
186 drm_gem_object_unreference(obj
);
187 mutex_unlock(&dev
->struct_mutex
);
192 /* This is the fast write path which cannot handle
193 * page faults in the source data
197 fast_user_write(struct io_mapping
*mapping
,
198 loff_t page_base
, int page_offset
,
199 char __user
*user_data
,
203 unsigned long unwritten
;
205 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
);
206 unwritten
= __copy_from_user_inatomic_nocache(vaddr_atomic
+ page_offset
,
208 io_mapping_unmap_atomic(vaddr_atomic
);
214 /* Here's the write path which can sleep for
219 slow_user_write(struct io_mapping
*mapping
,
220 loff_t page_base
, int page_offset
,
221 char __user
*user_data
,
225 unsigned long unwritten
;
227 vaddr
= io_mapping_map_wc(mapping
, page_base
);
230 unwritten
= __copy_from_user(vaddr
+ page_offset
,
232 io_mapping_unmap(vaddr
);
239 i915_gem_gtt_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
240 struct drm_i915_gem_pwrite
*args
,
241 struct drm_file
*file_priv
)
243 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
244 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
246 loff_t offset
, page_base
;
247 char __user
*user_data
;
248 int page_offset
, page_length
;
251 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
253 if (!access_ok(VERIFY_READ
, user_data
, remain
))
257 mutex_lock(&dev
->struct_mutex
);
258 ret
= i915_gem_object_pin(obj
, 0);
260 mutex_unlock(&dev
->struct_mutex
);
263 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
267 obj_priv
= obj
->driver_private
;
268 offset
= obj_priv
->gtt_offset
+ args
->offset
;
272 /* Operation in this page
274 * page_base = page offset within aperture
275 * page_offset = offset within page
276 * page_length = bytes to copy for this page
278 page_base
= (offset
& ~(PAGE_SIZE
-1));
279 page_offset
= offset
& (PAGE_SIZE
-1);
280 page_length
= remain
;
281 if ((page_offset
+ remain
) > PAGE_SIZE
)
282 page_length
= PAGE_SIZE
- page_offset
;
284 ret
= fast_user_write (dev_priv
->mm
.gtt_mapping
, page_base
,
285 page_offset
, user_data
, page_length
);
287 /* If we get a fault while copying data, then (presumably) our
288 * source page isn't available. In this case, use the
289 * non-atomic function
292 ret
= slow_user_write (dev_priv
->mm
.gtt_mapping
,
293 page_base
, page_offset
,
294 user_data
, page_length
);
299 remain
-= page_length
;
300 user_data
+= page_length
;
301 offset
+= page_length
;
305 i915_gem_object_unpin(obj
);
306 mutex_unlock(&dev
->struct_mutex
);
312 i915_gem_shmem_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
313 struct drm_i915_gem_pwrite
*args
,
314 struct drm_file
*file_priv
)
320 mutex_lock(&dev
->struct_mutex
);
322 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
324 mutex_unlock(&dev
->struct_mutex
);
328 offset
= args
->offset
;
330 written
= vfs_write(obj
->filp
,
331 (char __user
*)(uintptr_t) args
->data_ptr
,
332 args
->size
, &offset
);
333 if (written
!= args
->size
) {
334 mutex_unlock(&dev
->struct_mutex
);
341 mutex_unlock(&dev
->struct_mutex
);
347 * Writes data to the object referenced by handle.
349 * On error, the contents of the buffer that were to be modified are undefined.
352 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
353 struct drm_file
*file_priv
)
355 struct drm_i915_gem_pwrite
*args
= data
;
356 struct drm_gem_object
*obj
;
357 struct drm_i915_gem_object
*obj_priv
;
360 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
363 obj_priv
= obj
->driver_private
;
365 /* Bounds check destination.
367 * XXX: This could use review for overflow issues...
369 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
370 args
->offset
+ args
->size
> obj
->size
) {
371 drm_gem_object_unreference(obj
);
375 /* We can only do the GTT pwrite on untiled buffers, as otherwise
376 * it would end up going through the fenced access, and we'll get
377 * different detiling behavior between reading and writing.
378 * pread/pwrite currently are reading and writing from the CPU
379 * perspective, requiring manual detiling by the client.
381 if (obj_priv
->tiling_mode
== I915_TILING_NONE
&&
383 ret
= i915_gem_gtt_pwrite(dev
, obj
, args
, file_priv
);
385 ret
= i915_gem_shmem_pwrite(dev
, obj
, args
, file_priv
);
389 DRM_INFO("pwrite failed %d\n", ret
);
392 drm_gem_object_unreference(obj
);
398 * Called when user space prepares to use an object with the CPU, either
399 * through the mmap ioctl's mapping or a GTT mapping.
402 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
403 struct drm_file
*file_priv
)
405 struct drm_i915_gem_set_domain
*args
= data
;
406 struct drm_gem_object
*obj
;
407 uint32_t read_domains
= args
->read_domains
;
408 uint32_t write_domain
= args
->write_domain
;
411 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
414 /* Only handle setting domains to types used by the CPU. */
415 if (write_domain
& ~(I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
))
418 if (read_domains
& ~(I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
))
421 /* Having something in the write domain implies it's in the read
422 * domain, and only that read domain. Enforce that in the request.
424 if (write_domain
!= 0 && read_domains
!= write_domain
)
427 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
431 mutex_lock(&dev
->struct_mutex
);
433 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
434 obj
, obj
->size
, read_domains
, write_domain
);
436 if (read_domains
& I915_GEM_DOMAIN_GTT
) {
437 ret
= i915_gem_object_set_to_gtt_domain(obj
, write_domain
!= 0);
439 ret
= i915_gem_object_set_to_cpu_domain(obj
, write_domain
!= 0);
442 drm_gem_object_unreference(obj
);
443 mutex_unlock(&dev
->struct_mutex
);
448 * Called when user space has done writes to this buffer
451 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
452 struct drm_file
*file_priv
)
454 struct drm_i915_gem_sw_finish
*args
= data
;
455 struct drm_gem_object
*obj
;
456 struct drm_i915_gem_object
*obj_priv
;
459 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
462 mutex_lock(&dev
->struct_mutex
);
463 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
465 mutex_unlock(&dev
->struct_mutex
);
470 DRM_INFO("%s: sw_finish %d (%p %d)\n",
471 __func__
, args
->handle
, obj
, obj
->size
);
473 obj_priv
= obj
->driver_private
;
475 /* Pinned buffers may be scanout, so flush the cache */
476 if (obj_priv
->pin_count
)
477 i915_gem_object_flush_cpu_write_domain(obj
);
479 drm_gem_object_unreference(obj
);
480 mutex_unlock(&dev
->struct_mutex
);
485 * Maps the contents of an object, returning the address it is mapped
488 * While the mapping holds a reference on the contents of the object, it doesn't
489 * imply a ref on the object itself.
492 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
493 struct drm_file
*file_priv
)
495 struct drm_i915_gem_mmap
*args
= data
;
496 struct drm_gem_object
*obj
;
500 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
503 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
507 offset
= args
->offset
;
509 down_write(¤t
->mm
->mmap_sem
);
510 addr
= do_mmap(obj
->filp
, 0, args
->size
,
511 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
513 up_write(¤t
->mm
->mmap_sem
);
514 mutex_lock(&dev
->struct_mutex
);
515 drm_gem_object_unreference(obj
);
516 mutex_unlock(&dev
->struct_mutex
);
517 if (IS_ERR((void *)addr
))
520 args
->addr_ptr
= (uint64_t) addr
;
526 i915_gem_object_free_page_list(struct drm_gem_object
*obj
)
528 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
529 int page_count
= obj
->size
/ PAGE_SIZE
;
532 if (obj_priv
->page_list
== NULL
)
536 for (i
= 0; i
< page_count
; i
++)
537 if (obj_priv
->page_list
[i
] != NULL
) {
539 set_page_dirty(obj_priv
->page_list
[i
]);
540 mark_page_accessed(obj_priv
->page_list
[i
]);
541 page_cache_release(obj_priv
->page_list
[i
]);
545 drm_free(obj_priv
->page_list
,
546 page_count
* sizeof(struct page
*),
548 obj_priv
->page_list
= NULL
;
552 i915_gem_object_move_to_active(struct drm_gem_object
*obj
, uint32_t seqno
)
554 struct drm_device
*dev
= obj
->dev
;
555 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
556 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
558 /* Add a reference if we're newly entering the active list. */
559 if (!obj_priv
->active
) {
560 drm_gem_object_reference(obj
);
561 obj_priv
->active
= 1;
563 /* Move from whatever list we were on to the tail of execution. */
564 list_move_tail(&obj_priv
->list
,
565 &dev_priv
->mm
.active_list
);
566 obj_priv
->last_rendering_seqno
= seqno
;
570 i915_gem_object_move_to_flushing(struct drm_gem_object
*obj
)
572 struct drm_device
*dev
= obj
->dev
;
573 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
574 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
576 BUG_ON(!obj_priv
->active
);
577 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.flushing_list
);
578 obj_priv
->last_rendering_seqno
= 0;
582 i915_gem_object_move_to_inactive(struct drm_gem_object
*obj
)
584 struct drm_device
*dev
= obj
->dev
;
585 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
586 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
588 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
589 if (obj_priv
->pin_count
!= 0)
590 list_del_init(&obj_priv
->list
);
592 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
594 obj_priv
->last_rendering_seqno
= 0;
595 if (obj_priv
->active
) {
596 obj_priv
->active
= 0;
597 drm_gem_object_unreference(obj
);
599 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
603 * Creates a new sequence number, emitting a write of it to the status page
604 * plus an interrupt, which will trigger i915_user_interrupt_handler.
606 * Must be called with struct_lock held.
608 * Returned sequence numbers are nonzero on success.
611 i915_add_request(struct drm_device
*dev
, uint32_t flush_domains
)
613 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
614 struct drm_i915_gem_request
*request
;
619 request
= drm_calloc(1, sizeof(*request
), DRM_MEM_DRIVER
);
623 /* Grab the seqno we're going to make this request be, and bump the
624 * next (skipping 0 so it can be the reserved no-seqno value).
626 seqno
= dev_priv
->mm
.next_gem_seqno
;
627 dev_priv
->mm
.next_gem_seqno
++;
628 if (dev_priv
->mm
.next_gem_seqno
== 0)
629 dev_priv
->mm
.next_gem_seqno
++;
632 OUT_RING(MI_STORE_DWORD_INDEX
);
633 OUT_RING(I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
636 OUT_RING(MI_USER_INTERRUPT
);
639 DRM_DEBUG("%d\n", seqno
);
641 request
->seqno
= seqno
;
642 request
->emitted_jiffies
= jiffies
;
643 was_empty
= list_empty(&dev_priv
->mm
.request_list
);
644 list_add_tail(&request
->list
, &dev_priv
->mm
.request_list
);
646 /* Associate any objects on the flushing list matching the write
647 * domain we're flushing with our flush.
649 if (flush_domains
!= 0) {
650 struct drm_i915_gem_object
*obj_priv
, *next
;
652 list_for_each_entry_safe(obj_priv
, next
,
653 &dev_priv
->mm
.flushing_list
, list
) {
654 struct drm_gem_object
*obj
= obj_priv
->obj
;
656 if ((obj
->write_domain
& flush_domains
) ==
658 obj
->write_domain
= 0;
659 i915_gem_object_move_to_active(obj
, seqno
);
665 if (was_empty
&& !dev_priv
->mm
.suspended
)
666 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
671 * Command execution barrier
673 * Ensures that all commands in the ring are finished
674 * before signalling the CPU
677 i915_retire_commands(struct drm_device
*dev
)
679 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
680 uint32_t cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
681 uint32_t flush_domains
= 0;
684 /* The sampler always gets flushed on i965 (sigh) */
686 flush_domains
|= I915_GEM_DOMAIN_SAMPLER
;
689 OUT_RING(0); /* noop */
691 return flush_domains
;
695 * Moves buffers associated only with the given active seqno from the active
696 * to inactive list, potentially freeing them.
699 i915_gem_retire_request(struct drm_device
*dev
,
700 struct drm_i915_gem_request
*request
)
702 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
704 /* Move any buffers on the active list that are no longer referenced
705 * by the ringbuffer to the flushing/inactive lists as appropriate.
707 while (!list_empty(&dev_priv
->mm
.active_list
)) {
708 struct drm_gem_object
*obj
;
709 struct drm_i915_gem_object
*obj_priv
;
711 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
712 struct drm_i915_gem_object
,
716 /* If the seqno being retired doesn't match the oldest in the
717 * list, then the oldest in the list must still be newer than
720 if (obj_priv
->last_rendering_seqno
!= request
->seqno
)
723 DRM_INFO("%s: retire %d moves to inactive list %p\n",
724 __func__
, request
->seqno
, obj
);
727 if (obj
->write_domain
!= 0)
728 i915_gem_object_move_to_flushing(obj
);
730 i915_gem_object_move_to_inactive(obj
);
735 * Returns true if seq1 is later than seq2.
738 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
740 return (int32_t)(seq1
- seq2
) >= 0;
744 i915_get_gem_seqno(struct drm_device
*dev
)
746 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
748 return READ_HWSP(dev_priv
, I915_GEM_HWS_INDEX
);
752 * This function clears the request list as sequence numbers are passed.
755 i915_gem_retire_requests(struct drm_device
*dev
)
757 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
760 seqno
= i915_get_gem_seqno(dev
);
762 while (!list_empty(&dev_priv
->mm
.request_list
)) {
763 struct drm_i915_gem_request
*request
;
764 uint32_t retiring_seqno
;
766 request
= list_first_entry(&dev_priv
->mm
.request_list
,
767 struct drm_i915_gem_request
,
769 retiring_seqno
= request
->seqno
;
771 if (i915_seqno_passed(seqno
, retiring_seqno
) ||
772 dev_priv
->mm
.wedged
) {
773 i915_gem_retire_request(dev
, request
);
775 list_del(&request
->list
);
776 drm_free(request
, sizeof(*request
), DRM_MEM_DRIVER
);
783 i915_gem_retire_work_handler(struct work_struct
*work
)
785 drm_i915_private_t
*dev_priv
;
786 struct drm_device
*dev
;
788 dev_priv
= container_of(work
, drm_i915_private_t
,
789 mm
.retire_work
.work
);
792 mutex_lock(&dev
->struct_mutex
);
793 i915_gem_retire_requests(dev
);
794 if (!dev_priv
->mm
.suspended
&&
795 !list_empty(&dev_priv
->mm
.request_list
))
796 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
797 mutex_unlock(&dev
->struct_mutex
);
801 * Waits for a sequence number to be signaled, and cleans up the
802 * request and object lists appropriately for that event.
805 i915_wait_request(struct drm_device
*dev
, uint32_t seqno
)
807 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
812 if (!i915_seqno_passed(i915_get_gem_seqno(dev
), seqno
)) {
813 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
814 i915_user_irq_get(dev
);
815 ret
= wait_event_interruptible(dev_priv
->irq_queue
,
816 i915_seqno_passed(i915_get_gem_seqno(dev
),
818 dev_priv
->mm
.wedged
);
819 i915_user_irq_put(dev
);
820 dev_priv
->mm
.waiting_gem_seqno
= 0;
822 if (dev_priv
->mm
.wedged
)
825 if (ret
&& ret
!= -ERESTARTSYS
)
826 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
827 __func__
, ret
, seqno
, i915_get_gem_seqno(dev
));
829 /* Directly dispatch request retiring. While we have the work queue
830 * to handle this, the waiter on a request often wants an associated
831 * buffer to have made it to the inactive list, and we would need
832 * a separate wait queue to handle that.
835 i915_gem_retire_requests(dev
);
841 i915_gem_flush(struct drm_device
*dev
,
842 uint32_t invalidate_domains
,
843 uint32_t flush_domains
)
845 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
850 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__
,
851 invalidate_domains
, flush_domains
);
854 if (flush_domains
& I915_GEM_DOMAIN_CPU
)
855 drm_agp_chipset_flush(dev
);
857 if ((invalidate_domains
| flush_domains
) & ~(I915_GEM_DOMAIN_CPU
|
858 I915_GEM_DOMAIN_GTT
)) {
862 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
863 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
864 * also flushed at 2d versus 3d pipeline switches.
868 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
869 * MI_READ_FLUSH is set, and is always flushed on 965.
871 * I915_GEM_DOMAIN_COMMAND may not exist?
873 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
874 * invalidated when MI_EXE_FLUSH is set.
876 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
877 * invalidated with every MI_FLUSH.
881 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
882 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
883 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
884 * are flushed at any MI_FLUSH.
887 cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
888 if ((invalidate_domains
|flush_domains
) &
889 I915_GEM_DOMAIN_RENDER
)
890 cmd
&= ~MI_NO_WRITE_FLUSH
;
891 if (!IS_I965G(dev
)) {
893 * On the 965, the sampler cache always gets flushed
894 * and this bit is reserved.
896 if (invalidate_domains
& I915_GEM_DOMAIN_SAMPLER
)
897 cmd
|= MI_READ_FLUSH
;
899 if (invalidate_domains
& I915_GEM_DOMAIN_INSTRUCTION
)
903 DRM_INFO("%s: queue flush %08x to ring\n", __func__
, cmd
);
907 OUT_RING(0); /* noop */
913 * Ensures that all rendering to the object has completed and the object is
914 * safe to unbind from the GTT or access from the CPU.
917 i915_gem_object_wait_rendering(struct drm_gem_object
*obj
)
919 struct drm_device
*dev
= obj
->dev
;
920 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
923 /* This function only exists to support waiting for existing rendering,
924 * not for emitting required flushes.
926 BUG_ON((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) != 0);
928 /* If there is rendering queued on the buffer being evicted, wait for
931 if (obj_priv
->active
) {
933 DRM_INFO("%s: object %p wait for seqno %08x\n",
934 __func__
, obj
, obj_priv
->last_rendering_seqno
);
936 ret
= i915_wait_request(dev
, obj_priv
->last_rendering_seqno
);
945 * Unbinds an object from the GTT aperture.
948 i915_gem_object_unbind(struct drm_gem_object
*obj
)
950 struct drm_device
*dev
= obj
->dev
;
951 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
955 DRM_INFO("%s:%d %p\n", __func__
, __LINE__
, obj
);
956 DRM_INFO("gtt_space %p\n", obj_priv
->gtt_space
);
958 if (obj_priv
->gtt_space
== NULL
)
961 if (obj_priv
->pin_count
!= 0) {
962 DRM_ERROR("Attempting to unbind pinned buffer\n");
966 /* Move the object to the CPU domain to ensure that
967 * any possible CPU writes while it's not in the GTT
968 * are flushed when we go to remap it. This will
969 * also ensure that all pending GPU writes are finished
972 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
974 if (ret
!= -ERESTARTSYS
)
975 DRM_ERROR("set_domain failed: %d\n", ret
);
979 if (obj_priv
->agp_mem
!= NULL
) {
980 drm_unbind_agp(obj_priv
->agp_mem
);
981 drm_free_agp(obj_priv
->agp_mem
, obj
->size
/ PAGE_SIZE
);
982 obj_priv
->agp_mem
= NULL
;
985 BUG_ON(obj_priv
->active
);
987 i915_gem_object_free_page_list(obj
);
989 if (obj_priv
->gtt_space
) {
990 atomic_dec(&dev
->gtt_count
);
991 atomic_sub(obj
->size
, &dev
->gtt_memory
);
993 drm_mm_put_block(obj_priv
->gtt_space
);
994 obj_priv
->gtt_space
= NULL
;
997 /* Remove ourselves from the LRU list if present. */
998 if (!list_empty(&obj_priv
->list
))
999 list_del_init(&obj_priv
->list
);
1005 i915_gem_evict_something(struct drm_device
*dev
)
1007 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1008 struct drm_gem_object
*obj
;
1009 struct drm_i915_gem_object
*obj_priv
;
1013 /* If there's an inactive buffer available now, grab it
1016 if (!list_empty(&dev_priv
->mm
.inactive_list
)) {
1017 obj_priv
= list_first_entry(&dev_priv
->mm
.inactive_list
,
1018 struct drm_i915_gem_object
,
1020 obj
= obj_priv
->obj
;
1021 BUG_ON(obj_priv
->pin_count
!= 0);
1023 DRM_INFO("%s: evicting %p\n", __func__
, obj
);
1025 BUG_ON(obj_priv
->active
);
1027 /* Wait on the rendering and unbind the buffer. */
1028 ret
= i915_gem_object_unbind(obj
);
1032 /* If we didn't get anything, but the ring is still processing
1033 * things, wait for one of those things to finish and hopefully
1034 * leave us a buffer to evict.
1036 if (!list_empty(&dev_priv
->mm
.request_list
)) {
1037 struct drm_i915_gem_request
*request
;
1039 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1040 struct drm_i915_gem_request
,
1043 ret
= i915_wait_request(dev
, request
->seqno
);
1047 /* if waiting caused an object to become inactive,
1048 * then loop around and wait for it. Otherwise, we
1049 * assume that waiting freed and unbound something,
1050 * so there should now be some space in the GTT
1052 if (!list_empty(&dev_priv
->mm
.inactive_list
))
1057 /* If we didn't have anything on the request list but there
1058 * are buffers awaiting a flush, emit one and try again.
1059 * When we wait on it, those buffers waiting for that flush
1060 * will get moved to inactive.
1062 if (!list_empty(&dev_priv
->mm
.flushing_list
)) {
1063 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
1064 struct drm_i915_gem_object
,
1066 obj
= obj_priv
->obj
;
1071 i915_add_request(dev
, obj
->write_domain
);
1077 DRM_ERROR("inactive empty %d request empty %d "
1078 "flushing empty %d\n",
1079 list_empty(&dev_priv
->mm
.inactive_list
),
1080 list_empty(&dev_priv
->mm
.request_list
),
1081 list_empty(&dev_priv
->mm
.flushing_list
));
1082 /* If we didn't do any of the above, there's nothing to be done
1083 * and we just can't fit it in.
1091 i915_gem_object_get_page_list(struct drm_gem_object
*obj
)
1093 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1095 struct address_space
*mapping
;
1096 struct inode
*inode
;
1100 if (obj_priv
->page_list
)
1103 /* Get the list of pages out of our struct file. They'll be pinned
1104 * at this point until we release them.
1106 page_count
= obj
->size
/ PAGE_SIZE
;
1107 BUG_ON(obj_priv
->page_list
!= NULL
);
1108 obj_priv
->page_list
= drm_calloc(page_count
, sizeof(struct page
*),
1110 if (obj_priv
->page_list
== NULL
) {
1111 DRM_ERROR("Faled to allocate page list\n");
1115 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
1116 mapping
= inode
->i_mapping
;
1117 for (i
= 0; i
< page_count
; i
++) {
1118 page
= read_mapping_page(mapping
, i
, NULL
);
1120 ret
= PTR_ERR(page
);
1121 DRM_ERROR("read_mapping_page failed: %d\n", ret
);
1122 i915_gem_object_free_page_list(obj
);
1125 obj_priv
->page_list
[i
] = page
;
1131 * Finds free space in the GTT aperture and binds the object there.
1134 i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
, unsigned alignment
)
1136 struct drm_device
*dev
= obj
->dev
;
1137 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1138 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1139 struct drm_mm_node
*free_space
;
1140 int page_count
, ret
;
1143 alignment
= PAGE_SIZE
;
1144 if (alignment
& (PAGE_SIZE
- 1)) {
1145 DRM_ERROR("Invalid object alignment requested %u\n", alignment
);
1150 free_space
= drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
1151 obj
->size
, alignment
, 0);
1152 if (free_space
!= NULL
) {
1153 obj_priv
->gtt_space
= drm_mm_get_block(free_space
, obj
->size
,
1155 if (obj_priv
->gtt_space
!= NULL
) {
1156 obj_priv
->gtt_space
->private = obj
;
1157 obj_priv
->gtt_offset
= obj_priv
->gtt_space
->start
;
1160 if (obj_priv
->gtt_space
== NULL
) {
1161 /* If the gtt is empty and we're still having trouble
1162 * fitting our object in, we're out of memory.
1165 DRM_INFO("%s: GTT full, evicting something\n", __func__
);
1167 if (list_empty(&dev_priv
->mm
.inactive_list
) &&
1168 list_empty(&dev_priv
->mm
.flushing_list
) &&
1169 list_empty(&dev_priv
->mm
.active_list
)) {
1170 DRM_ERROR("GTT full, but LRU list empty\n");
1174 ret
= i915_gem_evict_something(dev
);
1176 DRM_ERROR("Failed to evict a buffer %d\n", ret
);
1183 DRM_INFO("Binding object of size %d at 0x%08x\n",
1184 obj
->size
, obj_priv
->gtt_offset
);
1186 ret
= i915_gem_object_get_page_list(obj
);
1188 drm_mm_put_block(obj_priv
->gtt_space
);
1189 obj_priv
->gtt_space
= NULL
;
1193 page_count
= obj
->size
/ PAGE_SIZE
;
1194 /* Create an AGP memory structure pointing at our pages, and bind it
1197 obj_priv
->agp_mem
= drm_agp_bind_pages(dev
,
1198 obj_priv
->page_list
,
1200 obj_priv
->gtt_offset
,
1201 obj_priv
->agp_type
);
1202 if (obj_priv
->agp_mem
== NULL
) {
1203 i915_gem_object_free_page_list(obj
);
1204 drm_mm_put_block(obj_priv
->gtt_space
);
1205 obj_priv
->gtt_space
= NULL
;
1208 atomic_inc(&dev
->gtt_count
);
1209 atomic_add(obj
->size
, &dev
->gtt_memory
);
1211 /* Assert that the object is not currently in any GPU domain. As it
1212 * wasn't in the GTT, there shouldn't be any way it could have been in
1215 BUG_ON(obj
->read_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1216 BUG_ON(obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1222 i915_gem_clflush_object(struct drm_gem_object
*obj
)
1224 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1226 /* If we don't have a page list set up, then we're not pinned
1227 * to GPU, and we can ignore the cache flush because it'll happen
1228 * again at bind time.
1230 if (obj_priv
->page_list
== NULL
)
1233 drm_clflush_pages(obj_priv
->page_list
, obj
->size
/ PAGE_SIZE
);
1236 /** Flushes any GPU write domain for the object if it's dirty. */
1238 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
)
1240 struct drm_device
*dev
= obj
->dev
;
1243 if ((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) == 0)
1246 /* Queue the GPU write cache flushing we need. */
1247 i915_gem_flush(dev
, 0, obj
->write_domain
);
1248 seqno
= i915_add_request(dev
, obj
->write_domain
);
1249 obj
->write_domain
= 0;
1250 i915_gem_object_move_to_active(obj
, seqno
);
1253 /** Flushes the GTT write domain for the object if it's dirty. */
1255 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
)
1257 if (obj
->write_domain
!= I915_GEM_DOMAIN_GTT
)
1260 /* No actual flushing is required for the GTT write domain. Writes
1261 * to it immediately go to main memory as far as we know, so there's
1262 * no chipset flush. It also doesn't land in render cache.
1264 obj
->write_domain
= 0;
1267 /** Flushes the CPU write domain for the object if it's dirty. */
1269 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
)
1271 struct drm_device
*dev
= obj
->dev
;
1273 if (obj
->write_domain
!= I915_GEM_DOMAIN_CPU
)
1276 i915_gem_clflush_object(obj
);
1277 drm_agp_chipset_flush(dev
);
1278 obj
->write_domain
= 0;
1282 * Moves a single object to the GTT read, and possibly write domain.
1284 * This function returns when the move is complete, including waiting on
1288 i915_gem_object_set_to_gtt_domain(struct drm_gem_object
*obj
, int write
)
1290 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1293 i915_gem_object_flush_gpu_write_domain(obj
);
1294 /* Wait on any GPU rendering and flushing to occur. */
1295 ret
= i915_gem_object_wait_rendering(obj
);
1299 /* If we're writing through the GTT domain, then CPU and GPU caches
1300 * will need to be invalidated at next use.
1303 obj
->read_domains
&= I915_GEM_DOMAIN_GTT
;
1305 i915_gem_object_flush_cpu_write_domain(obj
);
1307 /* It should now be out of any other write domains, and we can update
1308 * the domain values for our changes.
1310 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_GTT
) != 0);
1311 obj
->read_domains
|= I915_GEM_DOMAIN_GTT
;
1313 obj
->write_domain
= I915_GEM_DOMAIN_GTT
;
1314 obj_priv
->dirty
= 1;
1321 * Moves a single object to the CPU read, and possibly write domain.
1323 * This function returns when the move is complete, including waiting on
1327 i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
, int write
)
1329 struct drm_device
*dev
= obj
->dev
;
1332 i915_gem_object_flush_gpu_write_domain(obj
);
1333 /* Wait on any GPU rendering and flushing to occur. */
1334 ret
= i915_gem_object_wait_rendering(obj
);
1338 i915_gem_object_flush_gtt_write_domain(obj
);
1340 /* If we have a partially-valid cache of the object in the CPU,
1341 * finish invalidating it and free the per-page flags.
1343 i915_gem_object_set_to_full_cpu_read_domain(obj
);
1345 /* Flush the CPU cache if it's still invalid. */
1346 if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0) {
1347 i915_gem_clflush_object(obj
);
1348 drm_agp_chipset_flush(dev
);
1350 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
1353 /* It should now be out of any other write domains, and we can update
1354 * the domain values for our changes.
1356 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
1358 /* If we're writing through the CPU, then the GPU read domains will
1359 * need to be invalidated at next use.
1362 obj
->read_domains
&= I915_GEM_DOMAIN_CPU
;
1363 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
1370 * Set the next domain for the specified object. This
1371 * may not actually perform the necessary flushing/invaliding though,
1372 * as that may want to be batched with other set_domain operations
1374 * This is (we hope) the only really tricky part of gem. The goal
1375 * is fairly simple -- track which caches hold bits of the object
1376 * and make sure they remain coherent. A few concrete examples may
1377 * help to explain how it works. For shorthand, we use the notation
1378 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1379 * a pair of read and write domain masks.
1381 * Case 1: the batch buffer
1387 * 5. Unmapped from GTT
1390 * Let's take these a step at a time
1393 * Pages allocated from the kernel may still have
1394 * cache contents, so we set them to (CPU, CPU) always.
1395 * 2. Written by CPU (using pwrite)
1396 * The pwrite function calls set_domain (CPU, CPU) and
1397 * this function does nothing (as nothing changes)
1399 * This function asserts that the object is not
1400 * currently in any GPU-based read or write domains
1402 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1403 * As write_domain is zero, this function adds in the
1404 * current read domains (CPU+COMMAND, 0).
1405 * flush_domains is set to CPU.
1406 * invalidate_domains is set to COMMAND
1407 * clflush is run to get data out of the CPU caches
1408 * then i915_dev_set_domain calls i915_gem_flush to
1409 * emit an MI_FLUSH and drm_agp_chipset_flush
1410 * 5. Unmapped from GTT
1411 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1412 * flush_domains and invalidate_domains end up both zero
1413 * so no flushing/invalidating happens
1417 * Case 2: The shared render buffer
1421 * 3. Read/written by GPU
1422 * 4. set_domain to (CPU,CPU)
1423 * 5. Read/written by CPU
1424 * 6. Read/written by GPU
1427 * Same as last example, (CPU, CPU)
1429 * Nothing changes (assertions find that it is not in the GPU)
1430 * 3. Read/written by GPU
1431 * execbuffer calls set_domain (RENDER, RENDER)
1432 * flush_domains gets CPU
1433 * invalidate_domains gets GPU
1435 * MI_FLUSH and drm_agp_chipset_flush
1436 * 4. set_domain (CPU, CPU)
1437 * flush_domains gets GPU
1438 * invalidate_domains gets CPU
1439 * wait_rendering (obj) to make sure all drawing is complete.
1440 * This will include an MI_FLUSH to get the data from GPU
1442 * clflush (obj) to invalidate the CPU cache
1443 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1444 * 5. Read/written by CPU
1445 * cache lines are loaded and dirtied
1446 * 6. Read written by GPU
1447 * Same as last GPU access
1449 * Case 3: The constant buffer
1454 * 4. Updated (written) by CPU again
1463 * flush_domains = CPU
1464 * invalidate_domains = RENDER
1467 * drm_agp_chipset_flush
1468 * 4. Updated (written) by CPU again
1470 * flush_domains = 0 (no previous write domain)
1471 * invalidate_domains = 0 (no new read domains)
1474 * flush_domains = CPU
1475 * invalidate_domains = RENDER
1478 * drm_agp_chipset_flush
1481 i915_gem_object_set_to_gpu_domain(struct drm_gem_object
*obj
,
1482 uint32_t read_domains
,
1483 uint32_t write_domain
)
1485 struct drm_device
*dev
= obj
->dev
;
1486 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1487 uint32_t invalidate_domains
= 0;
1488 uint32_t flush_domains
= 0;
1490 BUG_ON(read_domains
& I915_GEM_DOMAIN_CPU
);
1491 BUG_ON(write_domain
== I915_GEM_DOMAIN_CPU
);
1494 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1496 obj
->read_domains
, read_domains
,
1497 obj
->write_domain
, write_domain
);
1500 * If the object isn't moving to a new write domain,
1501 * let the object stay in multiple read domains
1503 if (write_domain
== 0)
1504 read_domains
|= obj
->read_domains
;
1506 obj_priv
->dirty
= 1;
1509 * Flush the current write domain if
1510 * the new read domains don't match. Invalidate
1511 * any read domains which differ from the old
1514 if (obj
->write_domain
&& obj
->write_domain
!= read_domains
) {
1515 flush_domains
|= obj
->write_domain
;
1516 invalidate_domains
|= read_domains
& ~obj
->write_domain
;
1519 * Invalidate any read caches which may have
1520 * stale data. That is, any new read domains.
1522 invalidate_domains
|= read_domains
& ~obj
->read_domains
;
1523 if ((flush_domains
| invalidate_domains
) & I915_GEM_DOMAIN_CPU
) {
1525 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1526 __func__
, flush_domains
, invalidate_domains
);
1528 i915_gem_clflush_object(obj
);
1531 if ((write_domain
| flush_domains
) != 0)
1532 obj
->write_domain
= write_domain
;
1533 obj
->read_domains
= read_domains
;
1535 dev
->invalidate_domains
|= invalidate_domains
;
1536 dev
->flush_domains
|= flush_domains
;
1538 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1540 obj
->read_domains
, obj
->write_domain
,
1541 dev
->invalidate_domains
, dev
->flush_domains
);
1546 * Moves the object from a partially CPU read to a full one.
1548 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
1549 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
1552 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
)
1554 struct drm_device
*dev
= obj
->dev
;
1555 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1557 if (!obj_priv
->page_cpu_valid
)
1560 /* If we're partially in the CPU read domain, finish moving it in.
1562 if (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) {
1565 for (i
= 0; i
<= (obj
->size
- 1) / PAGE_SIZE
; i
++) {
1566 if (obj_priv
->page_cpu_valid
[i
])
1568 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
1570 drm_agp_chipset_flush(dev
);
1573 /* Free the page_cpu_valid mappings which are now stale, whether
1574 * or not we've got I915_GEM_DOMAIN_CPU.
1576 drm_free(obj_priv
->page_cpu_valid
, obj
->size
/ PAGE_SIZE
,
1578 obj_priv
->page_cpu_valid
= NULL
;
1582 * Set the CPU read domain on a range of the object.
1584 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
1585 * not entirely valid. The page_cpu_valid member of the object flags which
1586 * pages have been flushed, and will be respected by
1587 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
1588 * of the whole object.
1590 * This function returns when the move is complete, including waiting on
1594 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
1595 uint64_t offset
, uint64_t size
)
1597 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1600 if (offset
== 0 && size
== obj
->size
)
1601 return i915_gem_object_set_to_cpu_domain(obj
, 0);
1603 i915_gem_object_flush_gpu_write_domain(obj
);
1604 /* Wait on any GPU rendering and flushing to occur. */
1605 ret
= i915_gem_object_wait_rendering(obj
);
1608 i915_gem_object_flush_gtt_write_domain(obj
);
1610 /* If we're already fully in the CPU read domain, we're done. */
1611 if (obj_priv
->page_cpu_valid
== NULL
&&
1612 (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) != 0)
1615 /* Otherwise, create/clear the per-page CPU read domain flag if we're
1616 * newly adding I915_GEM_DOMAIN_CPU
1618 if (obj_priv
->page_cpu_valid
== NULL
) {
1619 obj_priv
->page_cpu_valid
= drm_calloc(1, obj
->size
/ PAGE_SIZE
,
1621 if (obj_priv
->page_cpu_valid
== NULL
)
1623 } else if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0)
1624 memset(obj_priv
->page_cpu_valid
, 0, obj
->size
/ PAGE_SIZE
);
1626 /* Flush the cache on any pages that are still invalid from the CPU's
1629 for (i
= offset
/ PAGE_SIZE
; i
<= (offset
+ size
- 1) / PAGE_SIZE
;
1631 if (obj_priv
->page_cpu_valid
[i
])
1634 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
1636 obj_priv
->page_cpu_valid
[i
] = 1;
1639 /* It should now be out of any other write domains, and we can update
1640 * the domain values for our changes.
1642 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
1644 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
1650 * Once all of the objects have been set in the proper domain,
1651 * perform the necessary flush and invalidate operations.
1653 * Returns the write domains flushed, for use in flush tracking.
1656 i915_gem_dev_set_domain(struct drm_device
*dev
)
1658 uint32_t flush_domains
= dev
->flush_domains
;
1661 * Now that all the buffers are synced to the proper domains,
1662 * flush and invalidate the collected domains
1664 if (dev
->invalidate_domains
| dev
->flush_domains
) {
1666 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1668 dev
->invalidate_domains
,
1669 dev
->flush_domains
);
1672 dev
->invalidate_domains
,
1673 dev
->flush_domains
);
1674 dev
->invalidate_domains
= 0;
1675 dev
->flush_domains
= 0;
1678 return flush_domains
;
1682 * Pin an object to the GTT and evaluate the relocations landing in it.
1685 i915_gem_object_pin_and_relocate(struct drm_gem_object
*obj
,
1686 struct drm_file
*file_priv
,
1687 struct drm_i915_gem_exec_object
*entry
)
1689 struct drm_device
*dev
= obj
->dev
;
1690 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1691 struct drm_i915_gem_relocation_entry reloc
;
1692 struct drm_i915_gem_relocation_entry __user
*relocs
;
1693 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1695 void __iomem
*reloc_page
;
1697 /* Choose the GTT offset for our buffer and put it there. */
1698 ret
= i915_gem_object_pin(obj
, (uint32_t) entry
->alignment
);
1702 entry
->offset
= obj_priv
->gtt_offset
;
1704 relocs
= (struct drm_i915_gem_relocation_entry __user
*)
1705 (uintptr_t) entry
->relocs_ptr
;
1706 /* Apply the relocations, using the GTT aperture to avoid cache
1707 * flushing requirements.
1709 for (i
= 0; i
< entry
->relocation_count
; i
++) {
1710 struct drm_gem_object
*target_obj
;
1711 struct drm_i915_gem_object
*target_obj_priv
;
1712 uint32_t reloc_val
, reloc_offset
;
1713 uint32_t __iomem
*reloc_entry
;
1715 ret
= copy_from_user(&reloc
, relocs
+ i
, sizeof(reloc
));
1717 i915_gem_object_unpin(obj
);
1721 target_obj
= drm_gem_object_lookup(obj
->dev
, file_priv
,
1722 reloc
.target_handle
);
1723 if (target_obj
== NULL
) {
1724 i915_gem_object_unpin(obj
);
1727 target_obj_priv
= target_obj
->driver_private
;
1729 /* The target buffer should have appeared before us in the
1730 * exec_object list, so it should have a GTT space bound by now.
1732 if (target_obj_priv
->gtt_space
== NULL
) {
1733 DRM_ERROR("No GTT space found for object %d\n",
1734 reloc
.target_handle
);
1735 drm_gem_object_unreference(target_obj
);
1736 i915_gem_object_unpin(obj
);
1740 if (reloc
.offset
> obj
->size
- 4) {
1741 DRM_ERROR("Relocation beyond object bounds: "
1742 "obj %p target %d offset %d size %d.\n",
1743 obj
, reloc
.target_handle
,
1744 (int) reloc
.offset
, (int) obj
->size
);
1745 drm_gem_object_unreference(target_obj
);
1746 i915_gem_object_unpin(obj
);
1749 if (reloc
.offset
& 3) {
1750 DRM_ERROR("Relocation not 4-byte aligned: "
1751 "obj %p target %d offset %d.\n",
1752 obj
, reloc
.target_handle
,
1753 (int) reloc
.offset
);
1754 drm_gem_object_unreference(target_obj
);
1755 i915_gem_object_unpin(obj
);
1759 if (reloc
.write_domain
& I915_GEM_DOMAIN_CPU
||
1760 reloc
.read_domains
& I915_GEM_DOMAIN_CPU
) {
1761 DRM_ERROR("reloc with read/write CPU domains: "
1762 "obj %p target %d offset %d "
1763 "read %08x write %08x",
1764 obj
, reloc
.target_handle
,
1767 reloc
.write_domain
);
1771 if (reloc
.write_domain
&& target_obj
->pending_write_domain
&&
1772 reloc
.write_domain
!= target_obj
->pending_write_domain
) {
1773 DRM_ERROR("Write domain conflict: "
1774 "obj %p target %d offset %d "
1775 "new %08x old %08x\n",
1776 obj
, reloc
.target_handle
,
1779 target_obj
->pending_write_domain
);
1780 drm_gem_object_unreference(target_obj
);
1781 i915_gem_object_unpin(obj
);
1786 DRM_INFO("%s: obj %p offset %08x target %d "
1787 "read %08x write %08x gtt %08x "
1788 "presumed %08x delta %08x\n",
1792 (int) reloc
.target_handle
,
1793 (int) reloc
.read_domains
,
1794 (int) reloc
.write_domain
,
1795 (int) target_obj_priv
->gtt_offset
,
1796 (int) reloc
.presumed_offset
,
1800 target_obj
->pending_read_domains
|= reloc
.read_domains
;
1801 target_obj
->pending_write_domain
|= reloc
.write_domain
;
1803 /* If the relocation already has the right value in it, no
1804 * more work needs to be done.
1806 if (target_obj_priv
->gtt_offset
== reloc
.presumed_offset
) {
1807 drm_gem_object_unreference(target_obj
);
1811 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
1813 drm_gem_object_unreference(target_obj
);
1814 i915_gem_object_unpin(obj
);
1818 /* Map the page containing the relocation we're going to
1821 reloc_offset
= obj_priv
->gtt_offset
+ reloc
.offset
;
1822 reloc_page
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
1825 reloc_entry
= (uint32_t __iomem
*)(reloc_page
+
1826 (reloc_offset
& (PAGE_SIZE
- 1)));
1827 reloc_val
= target_obj_priv
->gtt_offset
+ reloc
.delta
;
1830 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1831 obj
, (unsigned int) reloc
.offset
,
1832 readl(reloc_entry
), reloc_val
);
1834 writel(reloc_val
, reloc_entry
);
1835 io_mapping_unmap_atomic(reloc_page
);
1837 /* Write the updated presumed offset for this entry back out
1840 reloc
.presumed_offset
= target_obj_priv
->gtt_offset
;
1841 ret
= copy_to_user(relocs
+ i
, &reloc
, sizeof(reloc
));
1843 drm_gem_object_unreference(target_obj
);
1844 i915_gem_object_unpin(obj
);
1848 drm_gem_object_unreference(target_obj
);
1853 i915_gem_dump_object(obj
, 128, __func__
, ~0);
1858 /** Dispatch a batchbuffer to the ring
1861 i915_dispatch_gem_execbuffer(struct drm_device
*dev
,
1862 struct drm_i915_gem_execbuffer
*exec
,
1863 uint64_t exec_offset
)
1865 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1866 struct drm_clip_rect __user
*boxes
= (struct drm_clip_rect __user
*)
1867 (uintptr_t) exec
->cliprects_ptr
;
1868 int nbox
= exec
->num_cliprects
;
1870 uint32_t exec_start
, exec_len
;
1873 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
1874 exec_len
= (uint32_t) exec
->batch_len
;
1876 if ((exec_start
| exec_len
) & 0x7) {
1877 DRM_ERROR("alignment\n");
1884 count
= nbox
? nbox
: 1;
1886 for (i
= 0; i
< count
; i
++) {
1888 int ret
= i915_emit_box(dev
, boxes
, i
,
1889 exec
->DR1
, exec
->DR4
);
1894 if (IS_I830(dev
) || IS_845G(dev
)) {
1896 OUT_RING(MI_BATCH_BUFFER
);
1897 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
1898 OUT_RING(exec_start
+ exec_len
- 4);
1903 if (IS_I965G(dev
)) {
1904 OUT_RING(MI_BATCH_BUFFER_START
|
1906 MI_BATCH_NON_SECURE_I965
);
1907 OUT_RING(exec_start
);
1909 OUT_RING(MI_BATCH_BUFFER_START
|
1911 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
1917 /* XXX breadcrumb */
1921 /* Throttle our rendering by waiting until the ring has completed our requests
1922 * emitted over 20 msec ago.
1924 * This should get us reasonable parallelism between CPU and GPU but also
1925 * relatively low latency when blocking on a particular request to finish.
1928 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file_priv
)
1930 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
1934 mutex_lock(&dev
->struct_mutex
);
1935 seqno
= i915_file_priv
->mm
.last_gem_throttle_seqno
;
1936 i915_file_priv
->mm
.last_gem_throttle_seqno
=
1937 i915_file_priv
->mm
.last_gem_seqno
;
1939 ret
= i915_wait_request(dev
, seqno
);
1940 mutex_unlock(&dev
->struct_mutex
);
1945 i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
1946 struct drm_file
*file_priv
)
1948 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1949 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
1950 struct drm_i915_gem_execbuffer
*args
= data
;
1951 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
1952 struct drm_gem_object
**object_list
= NULL
;
1953 struct drm_gem_object
*batch_obj
;
1954 int ret
, i
, pinned
= 0;
1955 uint64_t exec_offset
;
1956 uint32_t seqno
, flush_domains
;
1959 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1960 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
1963 if (args
->buffer_count
< 1) {
1964 DRM_ERROR("execbuf with %d buffers\n", args
->buffer_count
);
1967 /* Copy in the exec list from userland */
1968 exec_list
= drm_calloc(sizeof(*exec_list
), args
->buffer_count
,
1970 object_list
= drm_calloc(sizeof(*object_list
), args
->buffer_count
,
1972 if (exec_list
== NULL
|| object_list
== NULL
) {
1973 DRM_ERROR("Failed to allocate exec or object list "
1975 args
->buffer_count
);
1979 ret
= copy_from_user(exec_list
,
1980 (struct drm_i915_relocation_entry __user
*)
1981 (uintptr_t) args
->buffers_ptr
,
1982 sizeof(*exec_list
) * args
->buffer_count
);
1984 DRM_ERROR("copy %d exec entries failed %d\n",
1985 args
->buffer_count
, ret
);
1989 mutex_lock(&dev
->struct_mutex
);
1991 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1993 if (dev_priv
->mm
.wedged
) {
1994 DRM_ERROR("Execbuf while wedged\n");
1995 mutex_unlock(&dev
->struct_mutex
);
1999 if (dev_priv
->mm
.suspended
) {
2000 DRM_ERROR("Execbuf while VT-switched.\n");
2001 mutex_unlock(&dev
->struct_mutex
);
2005 /* Zero the gloabl flush/invalidate flags. These
2006 * will be modified as each object is bound to the
2009 dev
->invalidate_domains
= 0;
2010 dev
->flush_domains
= 0;
2012 /* Look up object handles and perform the relocations */
2013 for (i
= 0; i
< args
->buffer_count
; i
++) {
2014 object_list
[i
] = drm_gem_object_lookup(dev
, file_priv
,
2015 exec_list
[i
].handle
);
2016 if (object_list
[i
] == NULL
) {
2017 DRM_ERROR("Invalid object handle %d at index %d\n",
2018 exec_list
[i
].handle
, i
);
2023 object_list
[i
]->pending_read_domains
= 0;
2024 object_list
[i
]->pending_write_domain
= 0;
2025 ret
= i915_gem_object_pin_and_relocate(object_list
[i
],
2029 DRM_ERROR("object bind and relocate failed %d\n", ret
);
2035 /* Set the pending read domains for the batch buffer to COMMAND */
2036 batch_obj
= object_list
[args
->buffer_count
-1];
2037 batch_obj
->pending_read_domains
= I915_GEM_DOMAIN_COMMAND
;
2038 batch_obj
->pending_write_domain
= 0;
2040 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2042 for (i
= 0; i
< args
->buffer_count
; i
++) {
2043 struct drm_gem_object
*obj
= object_list
[i
];
2045 /* Compute new gpu domains and update invalidate/flushing */
2046 i915_gem_object_set_to_gpu_domain(obj
,
2047 obj
->pending_read_domains
,
2048 obj
->pending_write_domain
);
2051 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2053 /* Flush/invalidate caches and chipset buffer */
2054 flush_domains
= i915_gem_dev_set_domain(dev
);
2056 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2059 for (i
= 0; i
< args
->buffer_count
; i
++) {
2060 i915_gem_object_check_coherency(object_list
[i
],
2061 exec_list
[i
].handle
);
2065 exec_offset
= exec_list
[args
->buffer_count
- 1].offset
;
2068 i915_gem_dump_object(object_list
[args
->buffer_count
- 1],
2074 (void)i915_add_request(dev
, flush_domains
);
2076 /* Exec the batchbuffer */
2077 ret
= i915_dispatch_gem_execbuffer(dev
, args
, exec_offset
);
2079 DRM_ERROR("dispatch failed %d\n", ret
);
2084 * Ensure that the commands in the batch buffer are
2085 * finished before the interrupt fires
2087 flush_domains
= i915_retire_commands(dev
);
2089 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2092 * Get a seqno representing the execution of the current buffer,
2093 * which we can wait on. We would like to mitigate these interrupts,
2094 * likely by only creating seqnos occasionally (so that we have
2095 * *some* interrupts representing completion of buffers that we can
2096 * wait on when trying to clear up gtt space).
2098 seqno
= i915_add_request(dev
, flush_domains
);
2100 i915_file_priv
->mm
.last_gem_seqno
= seqno
;
2101 for (i
= 0; i
< args
->buffer_count
; i
++) {
2102 struct drm_gem_object
*obj
= object_list
[i
];
2104 i915_gem_object_move_to_active(obj
, seqno
);
2106 DRM_INFO("%s: move to exec list %p\n", __func__
, obj
);
2110 i915_dump_lru(dev
, __func__
);
2113 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2115 /* Copy the new buffer offsets back to the user's exec list. */
2116 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
2117 (uintptr_t) args
->buffers_ptr
,
2119 sizeof(*exec_list
) * args
->buffer_count
);
2121 DRM_ERROR("failed to copy %d exec entries "
2122 "back to user (%d)\n",
2123 args
->buffer_count
, ret
);
2125 if (object_list
!= NULL
) {
2126 for (i
= 0; i
< pinned
; i
++)
2127 i915_gem_object_unpin(object_list
[i
]);
2129 for (i
= 0; i
< args
->buffer_count
; i
++)
2130 drm_gem_object_unreference(object_list
[i
]);
2132 mutex_unlock(&dev
->struct_mutex
);
2135 drm_free(object_list
, sizeof(*object_list
) * args
->buffer_count
,
2137 drm_free(exec_list
, sizeof(*exec_list
) * args
->buffer_count
,
2144 i915_gem_object_pin(struct drm_gem_object
*obj
, uint32_t alignment
)
2146 struct drm_device
*dev
= obj
->dev
;
2147 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2150 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2151 if (obj_priv
->gtt_space
== NULL
) {
2152 ret
= i915_gem_object_bind_to_gtt(obj
, alignment
);
2154 DRM_ERROR("Failure to bind: %d", ret
);
2158 obj_priv
->pin_count
++;
2160 /* If the object is not active and not pending a flush,
2161 * remove it from the inactive list
2163 if (obj_priv
->pin_count
== 1) {
2164 atomic_inc(&dev
->pin_count
);
2165 atomic_add(obj
->size
, &dev
->pin_memory
);
2166 if (!obj_priv
->active
&&
2167 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2168 I915_GEM_DOMAIN_GTT
)) == 0 &&
2169 !list_empty(&obj_priv
->list
))
2170 list_del_init(&obj_priv
->list
);
2172 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2178 i915_gem_object_unpin(struct drm_gem_object
*obj
)
2180 struct drm_device
*dev
= obj
->dev
;
2181 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2182 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2184 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2185 obj_priv
->pin_count
--;
2186 BUG_ON(obj_priv
->pin_count
< 0);
2187 BUG_ON(obj_priv
->gtt_space
== NULL
);
2189 /* If the object is no longer pinned, and is
2190 * neither active nor being flushed, then stick it on
2193 if (obj_priv
->pin_count
== 0) {
2194 if (!obj_priv
->active
&&
2195 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2196 I915_GEM_DOMAIN_GTT
)) == 0)
2197 list_move_tail(&obj_priv
->list
,
2198 &dev_priv
->mm
.inactive_list
);
2199 atomic_dec(&dev
->pin_count
);
2200 atomic_sub(obj
->size
, &dev
->pin_memory
);
2202 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2206 i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
2207 struct drm_file
*file_priv
)
2209 struct drm_i915_gem_pin
*args
= data
;
2210 struct drm_gem_object
*obj
;
2211 struct drm_i915_gem_object
*obj_priv
;
2214 mutex_lock(&dev
->struct_mutex
);
2216 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2218 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2220 mutex_unlock(&dev
->struct_mutex
);
2223 obj_priv
= obj
->driver_private
;
2225 ret
= i915_gem_object_pin(obj
, args
->alignment
);
2227 drm_gem_object_unreference(obj
);
2228 mutex_unlock(&dev
->struct_mutex
);
2232 /* XXX - flush the CPU caches for pinned objects
2233 * as the X server doesn't manage domains yet
2235 i915_gem_object_flush_cpu_write_domain(obj
);
2236 args
->offset
= obj_priv
->gtt_offset
;
2237 drm_gem_object_unreference(obj
);
2238 mutex_unlock(&dev
->struct_mutex
);
2244 i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
2245 struct drm_file
*file_priv
)
2247 struct drm_i915_gem_pin
*args
= data
;
2248 struct drm_gem_object
*obj
;
2250 mutex_lock(&dev
->struct_mutex
);
2252 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2254 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2256 mutex_unlock(&dev
->struct_mutex
);
2260 i915_gem_object_unpin(obj
);
2262 drm_gem_object_unreference(obj
);
2263 mutex_unlock(&dev
->struct_mutex
);
2268 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
2269 struct drm_file
*file_priv
)
2271 struct drm_i915_gem_busy
*args
= data
;
2272 struct drm_gem_object
*obj
;
2273 struct drm_i915_gem_object
*obj_priv
;
2275 mutex_lock(&dev
->struct_mutex
);
2276 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2278 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2280 mutex_unlock(&dev
->struct_mutex
);
2284 obj_priv
= obj
->driver_private
;
2285 args
->busy
= obj_priv
->active
;
2287 drm_gem_object_unreference(obj
);
2288 mutex_unlock(&dev
->struct_mutex
);
2293 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
2294 struct drm_file
*file_priv
)
2296 return i915_gem_ring_throttle(dev
, file_priv
);
2299 int i915_gem_init_object(struct drm_gem_object
*obj
)
2301 struct drm_i915_gem_object
*obj_priv
;
2303 obj_priv
= drm_calloc(1, sizeof(*obj_priv
), DRM_MEM_DRIVER
);
2304 if (obj_priv
== NULL
)
2308 * We've just allocated pages from the kernel,
2309 * so they've just been written by the CPU with
2310 * zeros. They'll need to be clflushed before we
2311 * use them with the GPU.
2313 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
2314 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
2316 obj_priv
->agp_type
= AGP_USER_MEMORY
;
2318 obj
->driver_private
= obj_priv
;
2319 obj_priv
->obj
= obj
;
2320 INIT_LIST_HEAD(&obj_priv
->list
);
2324 void i915_gem_free_object(struct drm_gem_object
*obj
)
2326 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2328 while (obj_priv
->pin_count
> 0)
2329 i915_gem_object_unpin(obj
);
2331 i915_gem_object_unbind(obj
);
2333 drm_free(obj_priv
->page_cpu_valid
, 1, DRM_MEM_DRIVER
);
2334 drm_free(obj
->driver_private
, 1, DRM_MEM_DRIVER
);
2337 /** Unbinds all objects that are on the given buffer list. */
2339 i915_gem_evict_from_list(struct drm_device
*dev
, struct list_head
*head
)
2341 struct drm_gem_object
*obj
;
2342 struct drm_i915_gem_object
*obj_priv
;
2345 while (!list_empty(head
)) {
2346 obj_priv
= list_first_entry(head
,
2347 struct drm_i915_gem_object
,
2349 obj
= obj_priv
->obj
;
2351 if (obj_priv
->pin_count
!= 0) {
2352 DRM_ERROR("Pinned object in unbind list\n");
2353 mutex_unlock(&dev
->struct_mutex
);
2357 ret
= i915_gem_object_unbind(obj
);
2359 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2361 mutex_unlock(&dev
->struct_mutex
);
2371 i915_gem_idle(struct drm_device
*dev
)
2373 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2374 uint32_t seqno
, cur_seqno
, last_seqno
;
2377 mutex_lock(&dev
->struct_mutex
);
2379 if (dev_priv
->mm
.suspended
|| dev_priv
->ring
.ring_obj
== NULL
) {
2380 mutex_unlock(&dev
->struct_mutex
);
2384 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2385 * We need to replace this with a semaphore, or something.
2387 dev_priv
->mm
.suspended
= 1;
2389 /* Cancel the retire work handler, wait for it to finish if running
2391 mutex_unlock(&dev
->struct_mutex
);
2392 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2393 mutex_lock(&dev
->struct_mutex
);
2395 i915_kernel_lost_context(dev
);
2397 /* Flush the GPU along with all non-CPU write domains
2399 i915_gem_flush(dev
, ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
),
2400 ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
2401 seqno
= i915_add_request(dev
, ~(I915_GEM_DOMAIN_CPU
|
2402 I915_GEM_DOMAIN_GTT
));
2405 mutex_unlock(&dev
->struct_mutex
);
2409 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
2413 cur_seqno
= i915_get_gem_seqno(dev
);
2414 if (i915_seqno_passed(cur_seqno
, seqno
))
2416 if (last_seqno
== cur_seqno
) {
2417 if (stuck
++ > 100) {
2418 DRM_ERROR("hardware wedged\n");
2419 dev_priv
->mm
.wedged
= 1;
2420 DRM_WAKEUP(&dev_priv
->irq_queue
);
2425 last_seqno
= cur_seqno
;
2427 dev_priv
->mm
.waiting_gem_seqno
= 0;
2429 i915_gem_retire_requests(dev
);
2431 if (!dev_priv
->mm
.wedged
) {
2432 /* Active and flushing should now be empty as we've
2433 * waited for a sequence higher than any pending execbuffer
2435 WARN_ON(!list_empty(&dev_priv
->mm
.active_list
));
2436 WARN_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2437 /* Request should now be empty as we've also waited
2438 * for the last request in the list
2440 WARN_ON(!list_empty(&dev_priv
->mm
.request_list
));
2443 /* Empty the active and flushing lists to inactive. If there's
2444 * anything left at this point, it means that we're wedged and
2445 * nothing good's going to happen by leaving them there. So strip
2446 * the GPU domains and just stuff them onto inactive.
2448 while (!list_empty(&dev_priv
->mm
.active_list
)) {
2449 struct drm_i915_gem_object
*obj_priv
;
2451 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
2452 struct drm_i915_gem_object
,
2454 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
2455 i915_gem_object_move_to_inactive(obj_priv
->obj
);
2458 while (!list_empty(&dev_priv
->mm
.flushing_list
)) {
2459 struct drm_i915_gem_object
*obj_priv
;
2461 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
2462 struct drm_i915_gem_object
,
2464 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
2465 i915_gem_object_move_to_inactive(obj_priv
->obj
);
2469 /* Move all inactive buffers out of the GTT. */
2470 ret
= i915_gem_evict_from_list(dev
, &dev_priv
->mm
.inactive_list
);
2471 WARN_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
2473 mutex_unlock(&dev
->struct_mutex
);
2477 i915_gem_cleanup_ringbuffer(dev
);
2478 mutex_unlock(&dev
->struct_mutex
);
2484 i915_gem_init_hws(struct drm_device
*dev
)
2486 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2487 struct drm_gem_object
*obj
;
2488 struct drm_i915_gem_object
*obj_priv
;
2491 /* If we need a physical address for the status page, it's already
2492 * initialized at driver load time.
2494 if (!I915_NEED_GFX_HWS(dev
))
2497 obj
= drm_gem_object_alloc(dev
, 4096);
2499 DRM_ERROR("Failed to allocate status page\n");
2502 obj_priv
= obj
->driver_private
;
2503 obj_priv
->agp_type
= AGP_USER_CACHED_MEMORY
;
2505 ret
= i915_gem_object_pin(obj
, 4096);
2507 drm_gem_object_unreference(obj
);
2511 dev_priv
->status_gfx_addr
= obj_priv
->gtt_offset
;
2513 dev_priv
->hw_status_page
= kmap(obj_priv
->page_list
[0]);
2514 if (dev_priv
->hw_status_page
== NULL
) {
2515 DRM_ERROR("Failed to map status page.\n");
2516 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
2517 drm_gem_object_unreference(obj
);
2520 dev_priv
->hws_obj
= obj
;
2521 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
2522 I915_WRITE(HWS_PGA
, dev_priv
->status_gfx_addr
);
2523 I915_READ(HWS_PGA
); /* posting read */
2524 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv
->status_gfx_addr
);
2530 i915_gem_init_ringbuffer(struct drm_device
*dev
)
2532 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2533 struct drm_gem_object
*obj
;
2534 struct drm_i915_gem_object
*obj_priv
;
2538 ret
= i915_gem_init_hws(dev
);
2542 obj
= drm_gem_object_alloc(dev
, 128 * 1024);
2544 DRM_ERROR("Failed to allocate ringbuffer\n");
2547 obj_priv
= obj
->driver_private
;
2549 ret
= i915_gem_object_pin(obj
, 4096);
2551 drm_gem_object_unreference(obj
);
2555 /* Set up the kernel mapping for the ring. */
2556 dev_priv
->ring
.Size
= obj
->size
;
2557 dev_priv
->ring
.tail_mask
= obj
->size
- 1;
2559 dev_priv
->ring
.map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
2560 dev_priv
->ring
.map
.size
= obj
->size
;
2561 dev_priv
->ring
.map
.type
= 0;
2562 dev_priv
->ring
.map
.flags
= 0;
2563 dev_priv
->ring
.map
.mtrr
= 0;
2565 drm_core_ioremap_wc(&dev_priv
->ring
.map
, dev
);
2566 if (dev_priv
->ring
.map
.handle
== NULL
) {
2567 DRM_ERROR("Failed to map ringbuffer.\n");
2568 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
2569 drm_gem_object_unreference(obj
);
2572 dev_priv
->ring
.ring_obj
= obj
;
2573 dev_priv
->ring
.virtual_start
= dev_priv
->ring
.map
.handle
;
2575 /* Stop the ring if it's running. */
2576 I915_WRITE(PRB0_CTL
, 0);
2577 I915_WRITE(PRB0_TAIL
, 0);
2578 I915_WRITE(PRB0_HEAD
, 0);
2580 /* Initialize the ring. */
2581 I915_WRITE(PRB0_START
, obj_priv
->gtt_offset
);
2582 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
2584 /* G45 ring initialization fails to reset head to zero */
2586 DRM_ERROR("Ring head not reset to zero "
2587 "ctl %08x head %08x tail %08x start %08x\n",
2588 I915_READ(PRB0_CTL
),
2589 I915_READ(PRB0_HEAD
),
2590 I915_READ(PRB0_TAIL
),
2591 I915_READ(PRB0_START
));
2592 I915_WRITE(PRB0_HEAD
, 0);
2594 DRM_ERROR("Ring head forced to zero "
2595 "ctl %08x head %08x tail %08x start %08x\n",
2596 I915_READ(PRB0_CTL
),
2597 I915_READ(PRB0_HEAD
),
2598 I915_READ(PRB0_TAIL
),
2599 I915_READ(PRB0_START
));
2602 I915_WRITE(PRB0_CTL
,
2603 ((obj
->size
- 4096) & RING_NR_PAGES
) |
2607 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
2609 /* If the head is still not zero, the ring is dead */
2611 DRM_ERROR("Ring initialization failed "
2612 "ctl %08x head %08x tail %08x start %08x\n",
2613 I915_READ(PRB0_CTL
),
2614 I915_READ(PRB0_HEAD
),
2615 I915_READ(PRB0_TAIL
),
2616 I915_READ(PRB0_START
));
2620 /* Update our cache of the ring state */
2621 i915_kernel_lost_context(dev
);
2627 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
)
2629 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2631 if (dev_priv
->ring
.ring_obj
== NULL
)
2634 drm_core_ioremapfree(&dev_priv
->ring
.map
, dev
);
2636 i915_gem_object_unpin(dev_priv
->ring
.ring_obj
);
2637 drm_gem_object_unreference(dev_priv
->ring
.ring_obj
);
2638 dev_priv
->ring
.ring_obj
= NULL
;
2639 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
2641 if (dev_priv
->hws_obj
!= NULL
) {
2642 struct drm_gem_object
*obj
= dev_priv
->hws_obj
;
2643 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2645 kunmap(obj_priv
->page_list
[0]);
2646 i915_gem_object_unpin(obj
);
2647 drm_gem_object_unreference(obj
);
2648 dev_priv
->hws_obj
= NULL
;
2649 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
2650 dev_priv
->hw_status_page
= NULL
;
2652 /* Write high address into HWS_PGA when disabling. */
2653 I915_WRITE(HWS_PGA
, 0x1ffff000);
2658 i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
2659 struct drm_file
*file_priv
)
2661 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2664 if (dev_priv
->mm
.wedged
) {
2665 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2666 dev_priv
->mm
.wedged
= 0;
2669 ret
= i915_gem_init_ringbuffer(dev
);
2673 dev_priv
->mm
.gtt_mapping
= io_mapping_create_wc(dev
->agp
->base
,
2674 dev
->agp
->agp_info
.aper_size
2677 mutex_lock(&dev
->struct_mutex
);
2678 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
2679 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2680 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
2681 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
2682 dev_priv
->mm
.suspended
= 0;
2683 mutex_unlock(&dev
->struct_mutex
);
2685 drm_irq_install(dev
);
2691 i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
2692 struct drm_file
*file_priv
)
2694 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2697 ret
= i915_gem_idle(dev
);
2698 drm_irq_uninstall(dev
);
2700 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2705 i915_gem_lastclose(struct drm_device
*dev
)
2709 ret
= i915_gem_idle(dev
);
2711 DRM_ERROR("failed to idle hardware: %d\n", ret
);
2715 i915_gem_load(struct drm_device
*dev
)
2717 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2719 INIT_LIST_HEAD(&dev_priv
->mm
.active_list
);
2720 INIT_LIST_HEAD(&dev_priv
->mm
.flushing_list
);
2721 INIT_LIST_HEAD(&dev_priv
->mm
.inactive_list
);
2722 INIT_LIST_HEAD(&dev_priv
->mm
.request_list
);
2723 INIT_DELAYED_WORK(&dev_priv
->mm
.retire_work
,
2724 i915_gem_retire_work_handler
);
2725 dev_priv
->mm
.next_gem_seqno
= 1;
2727 i915_gem_detect_bit_6_swizzle(dev
);