2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
34 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
37 i915_gem_object_set_domain(struct drm_gem_object
*obj
,
38 uint32_t read_domains
,
39 uint32_t write_domain
);
41 i915_gem_object_set_domain_range(struct drm_gem_object
*obj
,
44 uint32_t read_domains
,
45 uint32_t write_domain
);
47 i915_gem_set_domain(struct drm_gem_object
*obj
,
48 struct drm_file
*file_priv
,
49 uint32_t read_domains
,
50 uint32_t write_domain
);
51 static int i915_gem_object_get_page_list(struct drm_gem_object
*obj
);
52 static void i915_gem_object_free_page_list(struct drm_gem_object
*obj
);
53 static int i915_gem_object_wait_rendering(struct drm_gem_object
*obj
);
56 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
);
59 i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
60 struct drm_file
*file_priv
)
62 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
63 struct drm_i915_gem_init
*args
= data
;
65 mutex_lock(&dev
->struct_mutex
);
67 if (args
->gtt_start
>= args
->gtt_end
||
68 (args
->gtt_start
& (PAGE_SIZE
- 1)) != 0 ||
69 (args
->gtt_end
& (PAGE_SIZE
- 1)) != 0) {
70 mutex_unlock(&dev
->struct_mutex
);
74 drm_mm_init(&dev_priv
->mm
.gtt_space
, args
->gtt_start
,
75 args
->gtt_end
- args
->gtt_start
);
77 dev
->gtt_total
= (uint32_t) (args
->gtt_end
- args
->gtt_start
);
79 mutex_unlock(&dev
->struct_mutex
);
85 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
86 struct drm_file
*file_priv
)
88 struct drm_i915_gem_get_aperture
*args
= data
;
90 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
93 args
->aper_size
= dev
->gtt_total
;
94 args
->aper_available_size
= (args
->aper_size
-
95 atomic_read(&dev
->pin_memory
));
102 * Creates a new mm object and returns a handle to it.
105 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
106 struct drm_file
*file_priv
)
108 struct drm_i915_gem_create
*args
= data
;
109 struct drm_gem_object
*obj
;
112 args
->size
= roundup(args
->size
, PAGE_SIZE
);
114 /* Allocate the new object */
115 obj
= drm_gem_object_alloc(dev
, args
->size
);
119 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
120 mutex_lock(&dev
->struct_mutex
);
121 drm_gem_object_handle_unreference(obj
);
122 mutex_unlock(&dev
->struct_mutex
);
127 args
->handle
= handle
;
133 * Reads data from the object referenced by handle.
135 * On error, the contents of *data are undefined.
138 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
139 struct drm_file
*file_priv
)
141 struct drm_i915_gem_pread
*args
= data
;
142 struct drm_gem_object
*obj
;
143 struct drm_i915_gem_object
*obj_priv
;
148 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
151 obj_priv
= obj
->driver_private
;
153 /* Bounds check source.
155 * XXX: This could use review for overflow issues...
157 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
158 args
->offset
+ args
->size
> obj
->size
) {
159 drm_gem_object_unreference(obj
);
163 mutex_lock(&dev
->struct_mutex
);
165 ret
= i915_gem_object_set_domain_range(obj
, args
->offset
, args
->size
,
166 I915_GEM_DOMAIN_CPU
, 0);
168 drm_gem_object_unreference(obj
);
169 mutex_unlock(&dev
->struct_mutex
);
173 offset
= args
->offset
;
175 read
= vfs_read(obj
->filp
, (char __user
*)(uintptr_t)args
->data_ptr
,
176 args
->size
, &offset
);
177 if (read
!= args
->size
) {
178 drm_gem_object_unreference(obj
);
179 mutex_unlock(&dev
->struct_mutex
);
186 drm_gem_object_unreference(obj
);
187 mutex_unlock(&dev
->struct_mutex
);
192 /* This is the fast write path which cannot handle
193 * page faults in the source data
197 fast_user_write(struct io_mapping
*mapping
,
198 loff_t page_base
, int page_offset
,
199 char __user
*user_data
,
203 unsigned long unwritten
;
205 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
);
206 unwritten
= __copy_from_user_inatomic_nocache(vaddr_atomic
+ page_offset
,
208 io_mapping_unmap_atomic(vaddr_atomic
);
214 /* Here's the write path which can sleep for
219 slow_user_write(struct io_mapping
*mapping
,
220 loff_t page_base
, int page_offset
,
221 char __user
*user_data
,
225 unsigned long unwritten
;
227 vaddr
= io_mapping_map_wc(mapping
, page_base
);
230 unwritten
= __copy_from_user(vaddr
+ page_offset
,
232 io_mapping_unmap(vaddr
);
239 i915_gem_gtt_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
240 struct drm_i915_gem_pwrite
*args
,
241 struct drm_file
*file_priv
)
243 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
244 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
246 loff_t offset
, page_base
;
247 char __user
*user_data
;
248 int page_offset
, page_length
;
251 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
253 if (!access_ok(VERIFY_READ
, user_data
, remain
))
257 mutex_lock(&dev
->struct_mutex
);
258 ret
= i915_gem_object_pin(obj
, 0);
260 mutex_unlock(&dev
->struct_mutex
);
263 ret
= i915_gem_set_domain(obj
, file_priv
,
264 I915_GEM_DOMAIN_GTT
, I915_GEM_DOMAIN_GTT
);
268 obj_priv
= obj
->driver_private
;
269 offset
= obj_priv
->gtt_offset
+ args
->offset
;
273 /* Operation in this page
275 * page_base = page offset within aperture
276 * page_offset = offset within page
277 * page_length = bytes to copy for this page
279 page_base
= (offset
& ~(PAGE_SIZE
-1));
280 page_offset
= offset
& (PAGE_SIZE
-1);
281 page_length
= remain
;
282 if ((page_offset
+ remain
) > PAGE_SIZE
)
283 page_length
= PAGE_SIZE
- page_offset
;
285 ret
= fast_user_write (dev_priv
->mm
.gtt_mapping
, page_base
,
286 page_offset
, user_data
, page_length
);
288 /* If we get a fault while copying data, then (presumably) our
289 * source page isn't available. In this case, use the
290 * non-atomic function
293 ret
= slow_user_write (dev_priv
->mm
.gtt_mapping
,
294 page_base
, page_offset
,
295 user_data
, page_length
);
300 remain
-= page_length
;
301 user_data
+= page_length
;
302 offset
+= page_length
;
306 i915_gem_object_unpin(obj
);
307 mutex_unlock(&dev
->struct_mutex
);
313 i915_gem_shmem_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
314 struct drm_i915_gem_pwrite
*args
,
315 struct drm_file
*file_priv
)
321 mutex_lock(&dev
->struct_mutex
);
323 ret
= i915_gem_set_domain(obj
, file_priv
,
324 I915_GEM_DOMAIN_CPU
, I915_GEM_DOMAIN_CPU
);
326 mutex_unlock(&dev
->struct_mutex
);
330 offset
= args
->offset
;
332 written
= vfs_write(obj
->filp
,
333 (char __user
*)(uintptr_t) args
->data_ptr
,
334 args
->size
, &offset
);
335 if (written
!= args
->size
) {
336 mutex_unlock(&dev
->struct_mutex
);
343 mutex_unlock(&dev
->struct_mutex
);
349 * Writes data to the object referenced by handle.
351 * On error, the contents of the buffer that were to be modified are undefined.
354 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
355 struct drm_file
*file_priv
)
357 struct drm_i915_gem_pwrite
*args
= data
;
358 struct drm_gem_object
*obj
;
359 struct drm_i915_gem_object
*obj_priv
;
362 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
365 obj_priv
= obj
->driver_private
;
367 /* Bounds check destination.
369 * XXX: This could use review for overflow issues...
371 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
372 args
->offset
+ args
->size
> obj
->size
) {
373 drm_gem_object_unreference(obj
);
377 /* We can only do the GTT pwrite on untiled buffers, as otherwise
378 * it would end up going through the fenced access, and we'll get
379 * different detiling behavior between reading and writing.
380 * pread/pwrite currently are reading and writing from the CPU
381 * perspective, requiring manual detiling by the client.
383 if (obj_priv
->tiling_mode
== I915_TILING_NONE
&&
385 ret
= i915_gem_gtt_pwrite(dev
, obj
, args
, file_priv
);
387 ret
= i915_gem_shmem_pwrite(dev
, obj
, args
, file_priv
);
391 DRM_INFO("pwrite failed %d\n", ret
);
394 drm_gem_object_unreference(obj
);
400 * Called when user space prepares to use an object
403 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
404 struct drm_file
*file_priv
)
406 struct drm_i915_gem_set_domain
*args
= data
;
407 struct drm_gem_object
*obj
;
410 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
413 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
417 mutex_lock(&dev
->struct_mutex
);
419 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
420 obj
, obj
->size
, args
->read_domains
, args
->write_domain
);
422 ret
= i915_gem_set_domain(obj
, file_priv
,
423 args
->read_domains
, args
->write_domain
);
424 drm_gem_object_unreference(obj
);
425 mutex_unlock(&dev
->struct_mutex
);
430 * Called when user space has done writes to this buffer
433 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
434 struct drm_file
*file_priv
)
436 struct drm_i915_gem_sw_finish
*args
= data
;
437 struct drm_gem_object
*obj
;
438 struct drm_i915_gem_object
*obj_priv
;
441 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
444 mutex_lock(&dev
->struct_mutex
);
445 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
447 mutex_unlock(&dev
->struct_mutex
);
452 DRM_INFO("%s: sw_finish %d (%p %d)\n",
453 __func__
, args
->handle
, obj
, obj
->size
);
455 obj_priv
= obj
->driver_private
;
457 /* Pinned buffers may be scanout, so flush the cache */
458 if ((obj
->write_domain
& I915_GEM_DOMAIN_CPU
) && obj_priv
->pin_count
) {
459 i915_gem_clflush_object(obj
);
460 drm_agp_chipset_flush(dev
);
462 drm_gem_object_unreference(obj
);
463 mutex_unlock(&dev
->struct_mutex
);
468 * Maps the contents of an object, returning the address it is mapped
471 * While the mapping holds a reference on the contents of the object, it doesn't
472 * imply a ref on the object itself.
475 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
476 struct drm_file
*file_priv
)
478 struct drm_i915_gem_mmap
*args
= data
;
479 struct drm_gem_object
*obj
;
483 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
486 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
490 offset
= args
->offset
;
492 down_write(¤t
->mm
->mmap_sem
);
493 addr
= do_mmap(obj
->filp
, 0, args
->size
,
494 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
496 up_write(¤t
->mm
->mmap_sem
);
497 mutex_lock(&dev
->struct_mutex
);
498 drm_gem_object_unreference(obj
);
499 mutex_unlock(&dev
->struct_mutex
);
500 if (IS_ERR((void *)addr
))
503 args
->addr_ptr
= (uint64_t) addr
;
509 i915_gem_object_free_page_list(struct drm_gem_object
*obj
)
511 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
512 int page_count
= obj
->size
/ PAGE_SIZE
;
515 if (obj_priv
->page_list
== NULL
)
519 for (i
= 0; i
< page_count
; i
++)
520 if (obj_priv
->page_list
[i
] != NULL
) {
522 set_page_dirty(obj_priv
->page_list
[i
]);
523 mark_page_accessed(obj_priv
->page_list
[i
]);
524 page_cache_release(obj_priv
->page_list
[i
]);
528 drm_free(obj_priv
->page_list
,
529 page_count
* sizeof(struct page
*),
531 obj_priv
->page_list
= NULL
;
535 i915_gem_object_move_to_active(struct drm_gem_object
*obj
, uint32_t seqno
)
537 struct drm_device
*dev
= obj
->dev
;
538 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
539 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
541 /* Add a reference if we're newly entering the active list. */
542 if (!obj_priv
->active
) {
543 drm_gem_object_reference(obj
);
544 obj_priv
->active
= 1;
546 /* Move from whatever list we were on to the tail of execution. */
547 list_move_tail(&obj_priv
->list
,
548 &dev_priv
->mm
.active_list
);
549 obj_priv
->last_rendering_seqno
= seqno
;
553 i915_gem_object_move_to_flushing(struct drm_gem_object
*obj
)
555 struct drm_device
*dev
= obj
->dev
;
556 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
557 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
559 BUG_ON(!obj_priv
->active
);
560 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.flushing_list
);
561 obj_priv
->last_rendering_seqno
= 0;
565 i915_gem_object_move_to_inactive(struct drm_gem_object
*obj
)
567 struct drm_device
*dev
= obj
->dev
;
568 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
569 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
571 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
572 if (obj_priv
->pin_count
!= 0)
573 list_del_init(&obj_priv
->list
);
575 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
577 obj_priv
->last_rendering_seqno
= 0;
578 if (obj_priv
->active
) {
579 obj_priv
->active
= 0;
580 drm_gem_object_unreference(obj
);
582 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
586 * Creates a new sequence number, emitting a write of it to the status page
587 * plus an interrupt, which will trigger i915_user_interrupt_handler.
589 * Must be called with struct_lock held.
591 * Returned sequence numbers are nonzero on success.
594 i915_add_request(struct drm_device
*dev
, uint32_t flush_domains
)
596 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
597 struct drm_i915_gem_request
*request
;
602 request
= drm_calloc(1, sizeof(*request
), DRM_MEM_DRIVER
);
606 /* Grab the seqno we're going to make this request be, and bump the
607 * next (skipping 0 so it can be the reserved no-seqno value).
609 seqno
= dev_priv
->mm
.next_gem_seqno
;
610 dev_priv
->mm
.next_gem_seqno
++;
611 if (dev_priv
->mm
.next_gem_seqno
== 0)
612 dev_priv
->mm
.next_gem_seqno
++;
615 OUT_RING(MI_STORE_DWORD_INDEX
);
616 OUT_RING(I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
619 OUT_RING(MI_USER_INTERRUPT
);
622 DRM_DEBUG("%d\n", seqno
);
624 request
->seqno
= seqno
;
625 request
->emitted_jiffies
= jiffies
;
626 was_empty
= list_empty(&dev_priv
->mm
.request_list
);
627 list_add_tail(&request
->list
, &dev_priv
->mm
.request_list
);
629 /* Associate any objects on the flushing list matching the write
630 * domain we're flushing with our flush.
632 if (flush_domains
!= 0) {
633 struct drm_i915_gem_object
*obj_priv
, *next
;
635 list_for_each_entry_safe(obj_priv
, next
,
636 &dev_priv
->mm
.flushing_list
, list
) {
637 struct drm_gem_object
*obj
= obj_priv
->obj
;
639 if ((obj
->write_domain
& flush_domains
) ==
641 obj
->write_domain
= 0;
642 i915_gem_object_move_to_active(obj
, seqno
);
648 if (was_empty
&& !dev_priv
->mm
.suspended
)
649 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
654 * Command execution barrier
656 * Ensures that all commands in the ring are finished
657 * before signalling the CPU
660 i915_retire_commands(struct drm_device
*dev
)
662 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
663 uint32_t cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
664 uint32_t flush_domains
= 0;
667 /* The sampler always gets flushed on i965 (sigh) */
669 flush_domains
|= I915_GEM_DOMAIN_SAMPLER
;
672 OUT_RING(0); /* noop */
674 return flush_domains
;
678 * Moves buffers associated only with the given active seqno from the active
679 * to inactive list, potentially freeing them.
682 i915_gem_retire_request(struct drm_device
*dev
,
683 struct drm_i915_gem_request
*request
)
685 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
687 /* Move any buffers on the active list that are no longer referenced
688 * by the ringbuffer to the flushing/inactive lists as appropriate.
690 while (!list_empty(&dev_priv
->mm
.active_list
)) {
691 struct drm_gem_object
*obj
;
692 struct drm_i915_gem_object
*obj_priv
;
694 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
695 struct drm_i915_gem_object
,
699 /* If the seqno being retired doesn't match the oldest in the
700 * list, then the oldest in the list must still be newer than
703 if (obj_priv
->last_rendering_seqno
!= request
->seqno
)
706 DRM_INFO("%s: retire %d moves to inactive list %p\n",
707 __func__
, request
->seqno
, obj
);
710 if (obj
->write_domain
!= 0)
711 i915_gem_object_move_to_flushing(obj
);
713 i915_gem_object_move_to_inactive(obj
);
718 * Returns true if seq1 is later than seq2.
721 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
723 return (int32_t)(seq1
- seq2
) >= 0;
727 i915_get_gem_seqno(struct drm_device
*dev
)
729 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
731 return READ_HWSP(dev_priv
, I915_GEM_HWS_INDEX
);
735 * This function clears the request list as sequence numbers are passed.
738 i915_gem_retire_requests(struct drm_device
*dev
)
740 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
743 seqno
= i915_get_gem_seqno(dev
);
745 while (!list_empty(&dev_priv
->mm
.request_list
)) {
746 struct drm_i915_gem_request
*request
;
747 uint32_t retiring_seqno
;
749 request
= list_first_entry(&dev_priv
->mm
.request_list
,
750 struct drm_i915_gem_request
,
752 retiring_seqno
= request
->seqno
;
754 if (i915_seqno_passed(seqno
, retiring_seqno
) ||
755 dev_priv
->mm
.wedged
) {
756 i915_gem_retire_request(dev
, request
);
758 list_del(&request
->list
);
759 drm_free(request
, sizeof(*request
), DRM_MEM_DRIVER
);
766 i915_gem_retire_work_handler(struct work_struct
*work
)
768 drm_i915_private_t
*dev_priv
;
769 struct drm_device
*dev
;
771 dev_priv
= container_of(work
, drm_i915_private_t
,
772 mm
.retire_work
.work
);
775 mutex_lock(&dev
->struct_mutex
);
776 i915_gem_retire_requests(dev
);
777 if (!dev_priv
->mm
.suspended
&&
778 !list_empty(&dev_priv
->mm
.request_list
))
779 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
780 mutex_unlock(&dev
->struct_mutex
);
784 * Waits for a sequence number to be signaled, and cleans up the
785 * request and object lists appropriately for that event.
788 i915_wait_request(struct drm_device
*dev
, uint32_t seqno
)
790 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
795 if (!i915_seqno_passed(i915_get_gem_seqno(dev
), seqno
)) {
796 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
797 i915_user_irq_get(dev
);
798 ret
= wait_event_interruptible(dev_priv
->irq_queue
,
799 i915_seqno_passed(i915_get_gem_seqno(dev
),
801 dev_priv
->mm
.wedged
);
802 i915_user_irq_put(dev
);
803 dev_priv
->mm
.waiting_gem_seqno
= 0;
805 if (dev_priv
->mm
.wedged
)
808 if (ret
&& ret
!= -ERESTARTSYS
)
809 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
810 __func__
, ret
, seqno
, i915_get_gem_seqno(dev
));
812 /* Directly dispatch request retiring. While we have the work queue
813 * to handle this, the waiter on a request often wants an associated
814 * buffer to have made it to the inactive list, and we would need
815 * a separate wait queue to handle that.
818 i915_gem_retire_requests(dev
);
824 i915_gem_flush(struct drm_device
*dev
,
825 uint32_t invalidate_domains
,
826 uint32_t flush_domains
)
828 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
833 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__
,
834 invalidate_domains
, flush_domains
);
837 if (flush_domains
& I915_GEM_DOMAIN_CPU
)
838 drm_agp_chipset_flush(dev
);
840 if ((invalidate_domains
| flush_domains
) & ~(I915_GEM_DOMAIN_CPU
|
841 I915_GEM_DOMAIN_GTT
)) {
845 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
846 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
847 * also flushed at 2d versus 3d pipeline switches.
851 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
852 * MI_READ_FLUSH is set, and is always flushed on 965.
854 * I915_GEM_DOMAIN_COMMAND may not exist?
856 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
857 * invalidated when MI_EXE_FLUSH is set.
859 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
860 * invalidated with every MI_FLUSH.
864 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
865 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
866 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
867 * are flushed at any MI_FLUSH.
870 cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
871 if ((invalidate_domains
|flush_domains
) &
872 I915_GEM_DOMAIN_RENDER
)
873 cmd
&= ~MI_NO_WRITE_FLUSH
;
874 if (!IS_I965G(dev
)) {
876 * On the 965, the sampler cache always gets flushed
877 * and this bit is reserved.
879 if (invalidate_domains
& I915_GEM_DOMAIN_SAMPLER
)
880 cmd
|= MI_READ_FLUSH
;
882 if (invalidate_domains
& I915_GEM_DOMAIN_INSTRUCTION
)
886 DRM_INFO("%s: queue flush %08x to ring\n", __func__
, cmd
);
890 OUT_RING(0); /* noop */
896 * Ensures that all rendering to the object has completed and the object is
897 * safe to unbind from the GTT or access from the CPU.
900 i915_gem_object_wait_rendering(struct drm_gem_object
*obj
)
902 struct drm_device
*dev
= obj
->dev
;
903 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
906 /* If there are writes queued to the buffer, flush and
907 * create a new seqno to wait for.
909 if (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
)) {
910 uint32_t seqno
, write_domain
= obj
->write_domain
;
912 DRM_INFO("%s: flushing object %p from write domain %08x\n",
913 __func__
, obj
, write_domain
);
915 i915_gem_flush(dev
, 0, write_domain
);
917 seqno
= i915_add_request(dev
, write_domain
);
918 i915_gem_object_move_to_active(obj
, seqno
);
920 DRM_INFO("%s: flush moves to exec list %p\n", __func__
, obj
);
924 /* If there is rendering queued on the buffer being evicted, wait for
927 if (obj_priv
->active
) {
929 DRM_INFO("%s: object %p wait for seqno %08x\n",
930 __func__
, obj
, obj_priv
->last_rendering_seqno
);
932 ret
= i915_wait_request(dev
, obj_priv
->last_rendering_seqno
);
941 * Unbinds an object from the GTT aperture.
944 i915_gem_object_unbind(struct drm_gem_object
*obj
)
946 struct drm_device
*dev
= obj
->dev
;
947 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
951 DRM_INFO("%s:%d %p\n", __func__
, __LINE__
, obj
);
952 DRM_INFO("gtt_space %p\n", obj_priv
->gtt_space
);
954 if (obj_priv
->gtt_space
== NULL
)
957 if (obj_priv
->pin_count
!= 0) {
958 DRM_ERROR("Attempting to unbind pinned buffer\n");
962 /* Wait for any rendering to complete
964 ret
= i915_gem_object_wait_rendering(obj
);
966 DRM_ERROR("wait_rendering failed: %d\n", ret
);
970 /* Move the object to the CPU domain to ensure that
971 * any possible CPU writes while it's not in the GTT
972 * are flushed when we go to remap it. This will
973 * also ensure that all pending GPU writes are finished
976 ret
= i915_gem_object_set_domain(obj
, I915_GEM_DOMAIN_CPU
,
977 I915_GEM_DOMAIN_CPU
);
979 DRM_ERROR("set_domain failed: %d\n", ret
);
983 if (obj_priv
->agp_mem
!= NULL
) {
984 drm_unbind_agp(obj_priv
->agp_mem
);
985 drm_free_agp(obj_priv
->agp_mem
, obj
->size
/ PAGE_SIZE
);
986 obj_priv
->agp_mem
= NULL
;
989 BUG_ON(obj_priv
->active
);
991 i915_gem_object_free_page_list(obj
);
993 if (obj_priv
->gtt_space
) {
994 atomic_dec(&dev
->gtt_count
);
995 atomic_sub(obj
->size
, &dev
->gtt_memory
);
997 drm_mm_put_block(obj_priv
->gtt_space
);
998 obj_priv
->gtt_space
= NULL
;
1001 /* Remove ourselves from the LRU list if present. */
1002 if (!list_empty(&obj_priv
->list
))
1003 list_del_init(&obj_priv
->list
);
1009 i915_gem_evict_something(struct drm_device
*dev
)
1011 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1012 struct drm_gem_object
*obj
;
1013 struct drm_i915_gem_object
*obj_priv
;
1017 /* If there's an inactive buffer available now, grab it
1020 if (!list_empty(&dev_priv
->mm
.inactive_list
)) {
1021 obj_priv
= list_first_entry(&dev_priv
->mm
.inactive_list
,
1022 struct drm_i915_gem_object
,
1024 obj
= obj_priv
->obj
;
1025 BUG_ON(obj_priv
->pin_count
!= 0);
1027 DRM_INFO("%s: evicting %p\n", __func__
, obj
);
1029 BUG_ON(obj_priv
->active
);
1031 /* Wait on the rendering and unbind the buffer. */
1032 ret
= i915_gem_object_unbind(obj
);
1036 /* If we didn't get anything, but the ring is still processing
1037 * things, wait for one of those things to finish and hopefully
1038 * leave us a buffer to evict.
1040 if (!list_empty(&dev_priv
->mm
.request_list
)) {
1041 struct drm_i915_gem_request
*request
;
1043 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1044 struct drm_i915_gem_request
,
1047 ret
= i915_wait_request(dev
, request
->seqno
);
1051 /* if waiting caused an object to become inactive,
1052 * then loop around and wait for it. Otherwise, we
1053 * assume that waiting freed and unbound something,
1054 * so there should now be some space in the GTT
1056 if (!list_empty(&dev_priv
->mm
.inactive_list
))
1061 /* If we didn't have anything on the request list but there
1062 * are buffers awaiting a flush, emit one and try again.
1063 * When we wait on it, those buffers waiting for that flush
1064 * will get moved to inactive.
1066 if (!list_empty(&dev_priv
->mm
.flushing_list
)) {
1067 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
1068 struct drm_i915_gem_object
,
1070 obj
= obj_priv
->obj
;
1075 i915_add_request(dev
, obj
->write_domain
);
1081 DRM_ERROR("inactive empty %d request empty %d "
1082 "flushing empty %d\n",
1083 list_empty(&dev_priv
->mm
.inactive_list
),
1084 list_empty(&dev_priv
->mm
.request_list
),
1085 list_empty(&dev_priv
->mm
.flushing_list
));
1086 /* If we didn't do any of the above, there's nothing to be done
1087 * and we just can't fit it in.
1095 i915_gem_object_get_page_list(struct drm_gem_object
*obj
)
1097 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1099 struct address_space
*mapping
;
1100 struct inode
*inode
;
1104 if (obj_priv
->page_list
)
1107 /* Get the list of pages out of our struct file. They'll be pinned
1108 * at this point until we release them.
1110 page_count
= obj
->size
/ PAGE_SIZE
;
1111 BUG_ON(obj_priv
->page_list
!= NULL
);
1112 obj_priv
->page_list
= drm_calloc(page_count
, sizeof(struct page
*),
1114 if (obj_priv
->page_list
== NULL
) {
1115 DRM_ERROR("Faled to allocate page list\n");
1119 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
1120 mapping
= inode
->i_mapping
;
1121 for (i
= 0; i
< page_count
; i
++) {
1122 page
= read_mapping_page(mapping
, i
, NULL
);
1124 ret
= PTR_ERR(page
);
1125 DRM_ERROR("read_mapping_page failed: %d\n", ret
);
1126 i915_gem_object_free_page_list(obj
);
1129 obj_priv
->page_list
[i
] = page
;
1135 * Finds free space in the GTT aperture and binds the object there.
1138 i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
, unsigned alignment
)
1140 struct drm_device
*dev
= obj
->dev
;
1141 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1142 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1143 struct drm_mm_node
*free_space
;
1144 int page_count
, ret
;
1147 alignment
= PAGE_SIZE
;
1148 if (alignment
& (PAGE_SIZE
- 1)) {
1149 DRM_ERROR("Invalid object alignment requested %u\n", alignment
);
1154 free_space
= drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
1155 obj
->size
, alignment
, 0);
1156 if (free_space
!= NULL
) {
1157 obj_priv
->gtt_space
= drm_mm_get_block(free_space
, obj
->size
,
1159 if (obj_priv
->gtt_space
!= NULL
) {
1160 obj_priv
->gtt_space
->private = obj
;
1161 obj_priv
->gtt_offset
= obj_priv
->gtt_space
->start
;
1164 if (obj_priv
->gtt_space
== NULL
) {
1165 /* If the gtt is empty and we're still having trouble
1166 * fitting our object in, we're out of memory.
1169 DRM_INFO("%s: GTT full, evicting something\n", __func__
);
1171 if (list_empty(&dev_priv
->mm
.inactive_list
) &&
1172 list_empty(&dev_priv
->mm
.flushing_list
) &&
1173 list_empty(&dev_priv
->mm
.active_list
)) {
1174 DRM_ERROR("GTT full, but LRU list empty\n");
1178 ret
= i915_gem_evict_something(dev
);
1180 DRM_ERROR("Failed to evict a buffer %d\n", ret
);
1187 DRM_INFO("Binding object of size %d at 0x%08x\n",
1188 obj
->size
, obj_priv
->gtt_offset
);
1190 ret
= i915_gem_object_get_page_list(obj
);
1192 drm_mm_put_block(obj_priv
->gtt_space
);
1193 obj_priv
->gtt_space
= NULL
;
1197 page_count
= obj
->size
/ PAGE_SIZE
;
1198 /* Create an AGP memory structure pointing at our pages, and bind it
1201 obj_priv
->agp_mem
= drm_agp_bind_pages(dev
,
1202 obj_priv
->page_list
,
1204 obj_priv
->gtt_offset
,
1205 obj_priv
->agp_type
);
1206 if (obj_priv
->agp_mem
== NULL
) {
1207 i915_gem_object_free_page_list(obj
);
1208 drm_mm_put_block(obj_priv
->gtt_space
);
1209 obj_priv
->gtt_space
= NULL
;
1212 atomic_inc(&dev
->gtt_count
);
1213 atomic_add(obj
->size
, &dev
->gtt_memory
);
1215 /* Assert that the object is not currently in any GPU domain. As it
1216 * wasn't in the GTT, there shouldn't be any way it could have been in
1219 BUG_ON(obj
->read_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1220 BUG_ON(obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1226 i915_gem_clflush_object(struct drm_gem_object
*obj
)
1228 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1230 /* If we don't have a page list set up, then we're not pinned
1231 * to GPU, and we can ignore the cache flush because it'll happen
1232 * again at bind time.
1234 if (obj_priv
->page_list
== NULL
)
1237 drm_clflush_pages(obj_priv
->page_list
, obj
->size
/ PAGE_SIZE
);
1241 * Set the next domain for the specified object. This
1242 * may not actually perform the necessary flushing/invaliding though,
1243 * as that may want to be batched with other set_domain operations
1245 * This is (we hope) the only really tricky part of gem. The goal
1246 * is fairly simple -- track which caches hold bits of the object
1247 * and make sure they remain coherent. A few concrete examples may
1248 * help to explain how it works. For shorthand, we use the notation
1249 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1250 * a pair of read and write domain masks.
1252 * Case 1: the batch buffer
1258 * 5. Unmapped from GTT
1261 * Let's take these a step at a time
1264 * Pages allocated from the kernel may still have
1265 * cache contents, so we set them to (CPU, CPU) always.
1266 * 2. Written by CPU (using pwrite)
1267 * The pwrite function calls set_domain (CPU, CPU) and
1268 * this function does nothing (as nothing changes)
1270 * This function asserts that the object is not
1271 * currently in any GPU-based read or write domains
1273 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1274 * As write_domain is zero, this function adds in the
1275 * current read domains (CPU+COMMAND, 0).
1276 * flush_domains is set to CPU.
1277 * invalidate_domains is set to COMMAND
1278 * clflush is run to get data out of the CPU caches
1279 * then i915_dev_set_domain calls i915_gem_flush to
1280 * emit an MI_FLUSH and drm_agp_chipset_flush
1281 * 5. Unmapped from GTT
1282 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1283 * flush_domains and invalidate_domains end up both zero
1284 * so no flushing/invalidating happens
1288 * Case 2: The shared render buffer
1292 * 3. Read/written by GPU
1293 * 4. set_domain to (CPU,CPU)
1294 * 5. Read/written by CPU
1295 * 6. Read/written by GPU
1298 * Same as last example, (CPU, CPU)
1300 * Nothing changes (assertions find that it is not in the GPU)
1301 * 3. Read/written by GPU
1302 * execbuffer calls set_domain (RENDER, RENDER)
1303 * flush_domains gets CPU
1304 * invalidate_domains gets GPU
1306 * MI_FLUSH and drm_agp_chipset_flush
1307 * 4. set_domain (CPU, CPU)
1308 * flush_domains gets GPU
1309 * invalidate_domains gets CPU
1310 * wait_rendering (obj) to make sure all drawing is complete.
1311 * This will include an MI_FLUSH to get the data from GPU
1313 * clflush (obj) to invalidate the CPU cache
1314 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1315 * 5. Read/written by CPU
1316 * cache lines are loaded and dirtied
1317 * 6. Read written by GPU
1318 * Same as last GPU access
1320 * Case 3: The constant buffer
1325 * 4. Updated (written) by CPU again
1334 * flush_domains = CPU
1335 * invalidate_domains = RENDER
1338 * drm_agp_chipset_flush
1339 * 4. Updated (written) by CPU again
1341 * flush_domains = 0 (no previous write domain)
1342 * invalidate_domains = 0 (no new read domains)
1345 * flush_domains = CPU
1346 * invalidate_domains = RENDER
1349 * drm_agp_chipset_flush
1352 i915_gem_object_set_domain(struct drm_gem_object
*obj
,
1353 uint32_t read_domains
,
1354 uint32_t write_domain
)
1356 struct drm_device
*dev
= obj
->dev
;
1357 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1358 uint32_t invalidate_domains
= 0;
1359 uint32_t flush_domains
= 0;
1363 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1365 obj
->read_domains
, read_domains
,
1366 obj
->write_domain
, write_domain
);
1369 * If the object isn't moving to a new write domain,
1370 * let the object stay in multiple read domains
1372 if (write_domain
== 0)
1373 read_domains
|= obj
->read_domains
;
1375 obj_priv
->dirty
= 1;
1378 * Flush the current write domain if
1379 * the new read domains don't match. Invalidate
1380 * any read domains which differ from the old
1383 if (obj
->write_domain
&& obj
->write_domain
!= read_domains
) {
1384 flush_domains
|= obj
->write_domain
;
1385 invalidate_domains
|= read_domains
& ~obj
->write_domain
;
1388 * Invalidate any read caches which may have
1389 * stale data. That is, any new read domains.
1391 invalidate_domains
|= read_domains
& ~obj
->read_domains
;
1392 if ((flush_domains
| invalidate_domains
) & I915_GEM_DOMAIN_CPU
) {
1394 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1395 __func__
, flush_domains
, invalidate_domains
);
1398 * If we're invaliding the CPU cache and flushing a GPU cache,
1399 * then pause for rendering so that the GPU caches will be
1400 * flushed before the cpu cache is invalidated
1402 if ((invalidate_domains
& I915_GEM_DOMAIN_CPU
) &&
1403 (flush_domains
& ~(I915_GEM_DOMAIN_CPU
|
1404 I915_GEM_DOMAIN_GTT
))) {
1405 ret
= i915_gem_object_wait_rendering(obj
);
1409 i915_gem_clflush_object(obj
);
1412 if ((write_domain
| flush_domains
) != 0)
1413 obj
->write_domain
= write_domain
;
1415 /* If we're invalidating the CPU domain, clear the per-page CPU
1416 * domain list as well.
1418 if (obj_priv
->page_cpu_valid
!= NULL
&&
1419 (write_domain
!= 0 ||
1420 read_domains
& I915_GEM_DOMAIN_CPU
)) {
1421 drm_free(obj_priv
->page_cpu_valid
, obj
->size
/ PAGE_SIZE
,
1423 obj_priv
->page_cpu_valid
= NULL
;
1425 obj
->read_domains
= read_domains
;
1427 dev
->invalidate_domains
|= invalidate_domains
;
1428 dev
->flush_domains
|= flush_domains
;
1430 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1432 obj
->read_domains
, obj
->write_domain
,
1433 dev
->invalidate_domains
, dev
->flush_domains
);
1439 * Set the read/write domain on a range of the object.
1441 * Currently only implemented for CPU reads, otherwise drops to normal
1442 * i915_gem_object_set_domain().
1445 i915_gem_object_set_domain_range(struct drm_gem_object
*obj
,
1448 uint32_t read_domains
,
1449 uint32_t write_domain
)
1451 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1454 if (obj
->read_domains
& I915_GEM_DOMAIN_CPU
)
1457 if (read_domains
!= I915_GEM_DOMAIN_CPU
||
1459 return i915_gem_object_set_domain(obj
,
1460 read_domains
, write_domain
);
1462 /* Wait on any GPU rendering to the object to be flushed. */
1463 ret
= i915_gem_object_wait_rendering(obj
);
1467 if (obj_priv
->page_cpu_valid
== NULL
) {
1468 obj_priv
->page_cpu_valid
= drm_calloc(1, obj
->size
/ PAGE_SIZE
,
1472 /* Flush the cache on any pages that are still invalid from the CPU's
1475 for (i
= offset
/ PAGE_SIZE
; i
<= (offset
+ size
- 1) / PAGE_SIZE
; i
++) {
1476 if (obj_priv
->page_cpu_valid
[i
])
1479 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
1481 obj_priv
->page_cpu_valid
[i
] = 1;
1488 * Once all of the objects have been set in the proper domain,
1489 * perform the necessary flush and invalidate operations.
1491 * Returns the write domains flushed, for use in flush tracking.
1494 i915_gem_dev_set_domain(struct drm_device
*dev
)
1496 uint32_t flush_domains
= dev
->flush_domains
;
1499 * Now that all the buffers are synced to the proper domains,
1500 * flush and invalidate the collected domains
1502 if (dev
->invalidate_domains
| dev
->flush_domains
) {
1504 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1506 dev
->invalidate_domains
,
1507 dev
->flush_domains
);
1510 dev
->invalidate_domains
,
1511 dev
->flush_domains
);
1512 dev
->invalidate_domains
= 0;
1513 dev
->flush_domains
= 0;
1516 return flush_domains
;
1520 * Pin an object to the GTT and evaluate the relocations landing in it.
1523 i915_gem_object_pin_and_relocate(struct drm_gem_object
*obj
,
1524 struct drm_file
*file_priv
,
1525 struct drm_i915_gem_exec_object
*entry
)
1527 struct drm_device
*dev
= obj
->dev
;
1528 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1529 struct drm_i915_gem_relocation_entry reloc
;
1530 struct drm_i915_gem_relocation_entry __user
*relocs
;
1531 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1533 void __iomem
*reloc_page
;
1535 /* Choose the GTT offset for our buffer and put it there. */
1536 ret
= i915_gem_object_pin(obj
, (uint32_t) entry
->alignment
);
1540 entry
->offset
= obj_priv
->gtt_offset
;
1542 relocs
= (struct drm_i915_gem_relocation_entry __user
*)
1543 (uintptr_t) entry
->relocs_ptr
;
1544 /* Apply the relocations, using the GTT aperture to avoid cache
1545 * flushing requirements.
1547 for (i
= 0; i
< entry
->relocation_count
; i
++) {
1548 struct drm_gem_object
*target_obj
;
1549 struct drm_i915_gem_object
*target_obj_priv
;
1550 uint32_t reloc_val
, reloc_offset
;
1551 uint32_t __iomem
*reloc_entry
;
1553 ret
= copy_from_user(&reloc
, relocs
+ i
, sizeof(reloc
));
1555 i915_gem_object_unpin(obj
);
1559 target_obj
= drm_gem_object_lookup(obj
->dev
, file_priv
,
1560 reloc
.target_handle
);
1561 if (target_obj
== NULL
) {
1562 i915_gem_object_unpin(obj
);
1565 target_obj_priv
= target_obj
->driver_private
;
1567 /* The target buffer should have appeared before us in the
1568 * exec_object list, so it should have a GTT space bound by now.
1570 if (target_obj_priv
->gtt_space
== NULL
) {
1571 DRM_ERROR("No GTT space found for object %d\n",
1572 reloc
.target_handle
);
1573 drm_gem_object_unreference(target_obj
);
1574 i915_gem_object_unpin(obj
);
1578 if (reloc
.offset
> obj
->size
- 4) {
1579 DRM_ERROR("Relocation beyond object bounds: "
1580 "obj %p target %d offset %d size %d.\n",
1581 obj
, reloc
.target_handle
,
1582 (int) reloc
.offset
, (int) obj
->size
);
1583 drm_gem_object_unreference(target_obj
);
1584 i915_gem_object_unpin(obj
);
1587 if (reloc
.offset
& 3) {
1588 DRM_ERROR("Relocation not 4-byte aligned: "
1589 "obj %p target %d offset %d.\n",
1590 obj
, reloc
.target_handle
,
1591 (int) reloc
.offset
);
1592 drm_gem_object_unreference(target_obj
);
1593 i915_gem_object_unpin(obj
);
1597 if (reloc
.write_domain
&& target_obj
->pending_write_domain
&&
1598 reloc
.write_domain
!= target_obj
->pending_write_domain
) {
1599 DRM_ERROR("Write domain conflict: "
1600 "obj %p target %d offset %d "
1601 "new %08x old %08x\n",
1602 obj
, reloc
.target_handle
,
1605 target_obj
->pending_write_domain
);
1606 drm_gem_object_unreference(target_obj
);
1607 i915_gem_object_unpin(obj
);
1612 DRM_INFO("%s: obj %p offset %08x target %d "
1613 "read %08x write %08x gtt %08x "
1614 "presumed %08x delta %08x\n",
1618 (int) reloc
.target_handle
,
1619 (int) reloc
.read_domains
,
1620 (int) reloc
.write_domain
,
1621 (int) target_obj_priv
->gtt_offset
,
1622 (int) reloc
.presumed_offset
,
1626 target_obj
->pending_read_domains
|= reloc
.read_domains
;
1627 target_obj
->pending_write_domain
|= reloc
.write_domain
;
1629 /* If the relocation already has the right value in it, no
1630 * more work needs to be done.
1632 if (target_obj_priv
->gtt_offset
== reloc
.presumed_offset
) {
1633 drm_gem_object_unreference(target_obj
);
1637 /* Now that we're going to actually write some data in,
1638 * make sure that any rendering using this buffer's contents
1641 i915_gem_object_wait_rendering(obj
);
1643 /* As we're writing through the gtt, flush
1644 * any CPU writes before we write the relocations
1646 if (obj
->write_domain
& I915_GEM_DOMAIN_CPU
) {
1647 i915_gem_clflush_object(obj
);
1648 drm_agp_chipset_flush(dev
);
1649 obj
->write_domain
= 0;
1652 /* Map the page containing the relocation we're going to
1655 reloc_offset
= obj_priv
->gtt_offset
+ reloc
.offset
;
1656 reloc_page
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
1659 reloc_entry
= (uint32_t __iomem
*)(reloc_page
+
1660 (reloc_offset
& (PAGE_SIZE
- 1)));
1661 reloc_val
= target_obj_priv
->gtt_offset
+ reloc
.delta
;
1664 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1665 obj
, (unsigned int) reloc
.offset
,
1666 readl(reloc_entry
), reloc_val
);
1668 writel(reloc_val
, reloc_entry
);
1669 io_mapping_unmap_atomic(reloc_page
);
1671 /* Write the updated presumed offset for this entry back out
1674 reloc
.presumed_offset
= target_obj_priv
->gtt_offset
;
1675 ret
= copy_to_user(relocs
+ i
, &reloc
, sizeof(reloc
));
1677 drm_gem_object_unreference(target_obj
);
1678 i915_gem_object_unpin(obj
);
1682 drm_gem_object_unreference(target_obj
);
1687 i915_gem_dump_object(obj
, 128, __func__
, ~0);
1692 /** Dispatch a batchbuffer to the ring
1695 i915_dispatch_gem_execbuffer(struct drm_device
*dev
,
1696 struct drm_i915_gem_execbuffer
*exec
,
1697 uint64_t exec_offset
)
1699 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1700 struct drm_clip_rect __user
*boxes
= (struct drm_clip_rect __user
*)
1701 (uintptr_t) exec
->cliprects_ptr
;
1702 int nbox
= exec
->num_cliprects
;
1704 uint32_t exec_start
, exec_len
;
1707 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
1708 exec_len
= (uint32_t) exec
->batch_len
;
1710 if ((exec_start
| exec_len
) & 0x7) {
1711 DRM_ERROR("alignment\n");
1718 count
= nbox
? nbox
: 1;
1720 for (i
= 0; i
< count
; i
++) {
1722 int ret
= i915_emit_box(dev
, boxes
, i
,
1723 exec
->DR1
, exec
->DR4
);
1728 if (IS_I830(dev
) || IS_845G(dev
)) {
1730 OUT_RING(MI_BATCH_BUFFER
);
1731 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
1732 OUT_RING(exec_start
+ exec_len
- 4);
1737 if (IS_I965G(dev
)) {
1738 OUT_RING(MI_BATCH_BUFFER_START
|
1740 MI_BATCH_NON_SECURE_I965
);
1741 OUT_RING(exec_start
);
1743 OUT_RING(MI_BATCH_BUFFER_START
|
1745 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
1751 /* XXX breadcrumb */
1755 /* Throttle our rendering by waiting until the ring has completed our requests
1756 * emitted over 20 msec ago.
1758 * This should get us reasonable parallelism between CPU and GPU but also
1759 * relatively low latency when blocking on a particular request to finish.
1762 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file_priv
)
1764 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
1768 mutex_lock(&dev
->struct_mutex
);
1769 seqno
= i915_file_priv
->mm
.last_gem_throttle_seqno
;
1770 i915_file_priv
->mm
.last_gem_throttle_seqno
=
1771 i915_file_priv
->mm
.last_gem_seqno
;
1773 ret
= i915_wait_request(dev
, seqno
);
1774 mutex_unlock(&dev
->struct_mutex
);
1779 i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
1780 struct drm_file
*file_priv
)
1782 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1783 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
1784 struct drm_i915_gem_execbuffer
*args
= data
;
1785 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
1786 struct drm_gem_object
**object_list
= NULL
;
1787 struct drm_gem_object
*batch_obj
;
1788 int ret
, i
, pinned
= 0;
1789 uint64_t exec_offset
;
1790 uint32_t seqno
, flush_domains
;
1793 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1794 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
1797 if (args
->buffer_count
< 1) {
1798 DRM_ERROR("execbuf with %d buffers\n", args
->buffer_count
);
1801 /* Copy in the exec list from userland */
1802 exec_list
= drm_calloc(sizeof(*exec_list
), args
->buffer_count
,
1804 object_list
= drm_calloc(sizeof(*object_list
), args
->buffer_count
,
1806 if (exec_list
== NULL
|| object_list
== NULL
) {
1807 DRM_ERROR("Failed to allocate exec or object list "
1809 args
->buffer_count
);
1813 ret
= copy_from_user(exec_list
,
1814 (struct drm_i915_relocation_entry __user
*)
1815 (uintptr_t) args
->buffers_ptr
,
1816 sizeof(*exec_list
) * args
->buffer_count
);
1818 DRM_ERROR("copy %d exec entries failed %d\n",
1819 args
->buffer_count
, ret
);
1823 mutex_lock(&dev
->struct_mutex
);
1825 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1827 if (dev_priv
->mm
.wedged
) {
1828 DRM_ERROR("Execbuf while wedged\n");
1829 mutex_unlock(&dev
->struct_mutex
);
1833 if (dev_priv
->mm
.suspended
) {
1834 DRM_ERROR("Execbuf while VT-switched.\n");
1835 mutex_unlock(&dev
->struct_mutex
);
1839 /* Zero the gloabl flush/invalidate flags. These
1840 * will be modified as each object is bound to the
1843 dev
->invalidate_domains
= 0;
1844 dev
->flush_domains
= 0;
1846 /* Look up object handles and perform the relocations */
1847 for (i
= 0; i
< args
->buffer_count
; i
++) {
1848 object_list
[i
] = drm_gem_object_lookup(dev
, file_priv
,
1849 exec_list
[i
].handle
);
1850 if (object_list
[i
] == NULL
) {
1851 DRM_ERROR("Invalid object handle %d at index %d\n",
1852 exec_list
[i
].handle
, i
);
1857 object_list
[i
]->pending_read_domains
= 0;
1858 object_list
[i
]->pending_write_domain
= 0;
1859 ret
= i915_gem_object_pin_and_relocate(object_list
[i
],
1863 DRM_ERROR("object bind and relocate failed %d\n", ret
);
1869 /* Set the pending read domains for the batch buffer to COMMAND */
1870 batch_obj
= object_list
[args
->buffer_count
-1];
1871 batch_obj
->pending_read_domains
= I915_GEM_DOMAIN_COMMAND
;
1872 batch_obj
->pending_write_domain
= 0;
1874 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1876 for (i
= 0; i
< args
->buffer_count
; i
++) {
1877 struct drm_gem_object
*obj
= object_list
[i
];
1879 /* make sure all previous memory operations have passed */
1880 ret
= i915_gem_object_set_domain(obj
,
1881 obj
->pending_read_domains
,
1882 obj
->pending_write_domain
);
1887 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1889 /* Flush/invalidate caches and chipset buffer */
1890 flush_domains
= i915_gem_dev_set_domain(dev
);
1892 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1895 for (i
= 0; i
< args
->buffer_count
; i
++) {
1896 i915_gem_object_check_coherency(object_list
[i
],
1897 exec_list
[i
].handle
);
1901 exec_offset
= exec_list
[args
->buffer_count
- 1].offset
;
1904 i915_gem_dump_object(object_list
[args
->buffer_count
- 1],
1910 (void)i915_add_request(dev
, flush_domains
);
1912 /* Exec the batchbuffer */
1913 ret
= i915_dispatch_gem_execbuffer(dev
, args
, exec_offset
);
1915 DRM_ERROR("dispatch failed %d\n", ret
);
1920 * Ensure that the commands in the batch buffer are
1921 * finished before the interrupt fires
1923 flush_domains
= i915_retire_commands(dev
);
1925 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1928 * Get a seqno representing the execution of the current buffer,
1929 * which we can wait on. We would like to mitigate these interrupts,
1930 * likely by only creating seqnos occasionally (so that we have
1931 * *some* interrupts representing completion of buffers that we can
1932 * wait on when trying to clear up gtt space).
1934 seqno
= i915_add_request(dev
, flush_domains
);
1936 i915_file_priv
->mm
.last_gem_seqno
= seqno
;
1937 for (i
= 0; i
< args
->buffer_count
; i
++) {
1938 struct drm_gem_object
*obj
= object_list
[i
];
1940 i915_gem_object_move_to_active(obj
, seqno
);
1942 DRM_INFO("%s: move to exec list %p\n", __func__
, obj
);
1946 i915_dump_lru(dev
, __func__
);
1949 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1951 /* Copy the new buffer offsets back to the user's exec list. */
1952 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
1953 (uintptr_t) args
->buffers_ptr
,
1955 sizeof(*exec_list
) * args
->buffer_count
);
1957 DRM_ERROR("failed to copy %d exec entries "
1958 "back to user (%d)\n",
1959 args
->buffer_count
, ret
);
1961 if (object_list
!= NULL
) {
1962 for (i
= 0; i
< pinned
; i
++)
1963 i915_gem_object_unpin(object_list
[i
]);
1965 for (i
= 0; i
< args
->buffer_count
; i
++)
1966 drm_gem_object_unreference(object_list
[i
]);
1968 mutex_unlock(&dev
->struct_mutex
);
1971 drm_free(object_list
, sizeof(*object_list
) * args
->buffer_count
,
1973 drm_free(exec_list
, sizeof(*exec_list
) * args
->buffer_count
,
1980 i915_gem_object_pin(struct drm_gem_object
*obj
, uint32_t alignment
)
1982 struct drm_device
*dev
= obj
->dev
;
1983 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1986 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1987 if (obj_priv
->gtt_space
== NULL
) {
1988 ret
= i915_gem_object_bind_to_gtt(obj
, alignment
);
1990 DRM_ERROR("Failure to bind: %d", ret
);
1994 obj_priv
->pin_count
++;
1996 /* If the object is not active and not pending a flush,
1997 * remove it from the inactive list
1999 if (obj_priv
->pin_count
== 1) {
2000 atomic_inc(&dev
->pin_count
);
2001 atomic_add(obj
->size
, &dev
->pin_memory
);
2002 if (!obj_priv
->active
&&
2003 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2004 I915_GEM_DOMAIN_GTT
)) == 0 &&
2005 !list_empty(&obj_priv
->list
))
2006 list_del_init(&obj_priv
->list
);
2008 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2014 i915_gem_object_unpin(struct drm_gem_object
*obj
)
2016 struct drm_device
*dev
= obj
->dev
;
2017 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2018 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2020 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2021 obj_priv
->pin_count
--;
2022 BUG_ON(obj_priv
->pin_count
< 0);
2023 BUG_ON(obj_priv
->gtt_space
== NULL
);
2025 /* If the object is no longer pinned, and is
2026 * neither active nor being flushed, then stick it on
2029 if (obj_priv
->pin_count
== 0) {
2030 if (!obj_priv
->active
&&
2031 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2032 I915_GEM_DOMAIN_GTT
)) == 0)
2033 list_move_tail(&obj_priv
->list
,
2034 &dev_priv
->mm
.inactive_list
);
2035 atomic_dec(&dev
->pin_count
);
2036 atomic_sub(obj
->size
, &dev
->pin_memory
);
2038 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2042 i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
2043 struct drm_file
*file_priv
)
2045 struct drm_i915_gem_pin
*args
= data
;
2046 struct drm_gem_object
*obj
;
2047 struct drm_i915_gem_object
*obj_priv
;
2050 mutex_lock(&dev
->struct_mutex
);
2052 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2054 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2056 mutex_unlock(&dev
->struct_mutex
);
2059 obj_priv
= obj
->driver_private
;
2061 ret
= i915_gem_object_pin(obj
, args
->alignment
);
2063 drm_gem_object_unreference(obj
);
2064 mutex_unlock(&dev
->struct_mutex
);
2068 /* XXX - flush the CPU caches for pinned objects
2069 * as the X server doesn't manage domains yet
2071 if (obj
->write_domain
& I915_GEM_DOMAIN_CPU
) {
2072 i915_gem_clflush_object(obj
);
2073 drm_agp_chipset_flush(dev
);
2074 obj
->write_domain
= 0;
2076 args
->offset
= obj_priv
->gtt_offset
;
2077 drm_gem_object_unreference(obj
);
2078 mutex_unlock(&dev
->struct_mutex
);
2084 i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
2085 struct drm_file
*file_priv
)
2087 struct drm_i915_gem_pin
*args
= data
;
2088 struct drm_gem_object
*obj
;
2090 mutex_lock(&dev
->struct_mutex
);
2092 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2094 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2096 mutex_unlock(&dev
->struct_mutex
);
2100 i915_gem_object_unpin(obj
);
2102 drm_gem_object_unreference(obj
);
2103 mutex_unlock(&dev
->struct_mutex
);
2108 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
2109 struct drm_file
*file_priv
)
2111 struct drm_i915_gem_busy
*args
= data
;
2112 struct drm_gem_object
*obj
;
2113 struct drm_i915_gem_object
*obj_priv
;
2115 mutex_lock(&dev
->struct_mutex
);
2116 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2118 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2120 mutex_unlock(&dev
->struct_mutex
);
2124 obj_priv
= obj
->driver_private
;
2125 args
->busy
= obj_priv
->active
;
2127 drm_gem_object_unreference(obj
);
2128 mutex_unlock(&dev
->struct_mutex
);
2133 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
2134 struct drm_file
*file_priv
)
2136 return i915_gem_ring_throttle(dev
, file_priv
);
2139 int i915_gem_init_object(struct drm_gem_object
*obj
)
2141 struct drm_i915_gem_object
*obj_priv
;
2143 obj_priv
= drm_calloc(1, sizeof(*obj_priv
), DRM_MEM_DRIVER
);
2144 if (obj_priv
== NULL
)
2148 * We've just allocated pages from the kernel,
2149 * so they've just been written by the CPU with
2150 * zeros. They'll need to be clflushed before we
2151 * use them with the GPU.
2153 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
2154 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
2156 obj_priv
->agp_type
= AGP_USER_MEMORY
;
2158 obj
->driver_private
= obj_priv
;
2159 obj_priv
->obj
= obj
;
2160 INIT_LIST_HEAD(&obj_priv
->list
);
2164 void i915_gem_free_object(struct drm_gem_object
*obj
)
2166 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2168 while (obj_priv
->pin_count
> 0)
2169 i915_gem_object_unpin(obj
);
2171 i915_gem_object_unbind(obj
);
2173 drm_free(obj_priv
->page_cpu_valid
, 1, DRM_MEM_DRIVER
);
2174 drm_free(obj
->driver_private
, 1, DRM_MEM_DRIVER
);
2178 i915_gem_set_domain(struct drm_gem_object
*obj
,
2179 struct drm_file
*file_priv
,
2180 uint32_t read_domains
,
2181 uint32_t write_domain
)
2183 struct drm_device
*dev
= obj
->dev
;
2185 uint32_t flush_domains
;
2187 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
2189 ret
= i915_gem_object_set_domain(obj
, read_domains
, write_domain
);
2192 flush_domains
= i915_gem_dev_set_domain(obj
->dev
);
2194 if (flush_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
))
2195 (void) i915_add_request(dev
, flush_domains
);
2200 /** Unbinds all objects that are on the given buffer list. */
2202 i915_gem_evict_from_list(struct drm_device
*dev
, struct list_head
*head
)
2204 struct drm_gem_object
*obj
;
2205 struct drm_i915_gem_object
*obj_priv
;
2208 while (!list_empty(head
)) {
2209 obj_priv
= list_first_entry(head
,
2210 struct drm_i915_gem_object
,
2212 obj
= obj_priv
->obj
;
2214 if (obj_priv
->pin_count
!= 0) {
2215 DRM_ERROR("Pinned object in unbind list\n");
2216 mutex_unlock(&dev
->struct_mutex
);
2220 ret
= i915_gem_object_unbind(obj
);
2222 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2224 mutex_unlock(&dev
->struct_mutex
);
2234 i915_gem_idle(struct drm_device
*dev
)
2236 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2237 uint32_t seqno
, cur_seqno
, last_seqno
;
2240 mutex_lock(&dev
->struct_mutex
);
2242 if (dev_priv
->mm
.suspended
|| dev_priv
->ring
.ring_obj
== NULL
) {
2243 mutex_unlock(&dev
->struct_mutex
);
2247 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2248 * We need to replace this with a semaphore, or something.
2250 dev_priv
->mm
.suspended
= 1;
2252 /* Cancel the retire work handler, wait for it to finish if running
2254 mutex_unlock(&dev
->struct_mutex
);
2255 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2256 mutex_lock(&dev
->struct_mutex
);
2258 i915_kernel_lost_context(dev
);
2260 /* Flush the GPU along with all non-CPU write domains
2262 i915_gem_flush(dev
, ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
),
2263 ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
2264 seqno
= i915_add_request(dev
, ~(I915_GEM_DOMAIN_CPU
|
2265 I915_GEM_DOMAIN_GTT
));
2268 mutex_unlock(&dev
->struct_mutex
);
2272 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
2276 cur_seqno
= i915_get_gem_seqno(dev
);
2277 if (i915_seqno_passed(cur_seqno
, seqno
))
2279 if (last_seqno
== cur_seqno
) {
2280 if (stuck
++ > 100) {
2281 DRM_ERROR("hardware wedged\n");
2282 dev_priv
->mm
.wedged
= 1;
2283 DRM_WAKEUP(&dev_priv
->irq_queue
);
2288 last_seqno
= cur_seqno
;
2290 dev_priv
->mm
.waiting_gem_seqno
= 0;
2292 i915_gem_retire_requests(dev
);
2294 if (!dev_priv
->mm
.wedged
) {
2295 /* Active and flushing should now be empty as we've
2296 * waited for a sequence higher than any pending execbuffer
2298 WARN_ON(!list_empty(&dev_priv
->mm
.active_list
));
2299 WARN_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2300 /* Request should now be empty as we've also waited
2301 * for the last request in the list
2303 WARN_ON(!list_empty(&dev_priv
->mm
.request_list
));
2306 /* Empty the active and flushing lists to inactive. If there's
2307 * anything left at this point, it means that we're wedged and
2308 * nothing good's going to happen by leaving them there. So strip
2309 * the GPU domains and just stuff them onto inactive.
2311 while (!list_empty(&dev_priv
->mm
.active_list
)) {
2312 struct drm_i915_gem_object
*obj_priv
;
2314 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
2315 struct drm_i915_gem_object
,
2317 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
2318 i915_gem_object_move_to_inactive(obj_priv
->obj
);
2321 while (!list_empty(&dev_priv
->mm
.flushing_list
)) {
2322 struct drm_i915_gem_object
*obj_priv
;
2324 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
2325 struct drm_i915_gem_object
,
2327 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
2328 i915_gem_object_move_to_inactive(obj_priv
->obj
);
2332 /* Move all inactive buffers out of the GTT. */
2333 ret
= i915_gem_evict_from_list(dev
, &dev_priv
->mm
.inactive_list
);
2334 WARN_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
2336 mutex_unlock(&dev
->struct_mutex
);
2340 i915_gem_cleanup_ringbuffer(dev
);
2341 mutex_unlock(&dev
->struct_mutex
);
2347 i915_gem_init_hws(struct drm_device
*dev
)
2349 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2350 struct drm_gem_object
*obj
;
2351 struct drm_i915_gem_object
*obj_priv
;
2354 /* If we need a physical address for the status page, it's already
2355 * initialized at driver load time.
2357 if (!I915_NEED_GFX_HWS(dev
))
2360 obj
= drm_gem_object_alloc(dev
, 4096);
2362 DRM_ERROR("Failed to allocate status page\n");
2365 obj_priv
= obj
->driver_private
;
2366 obj_priv
->agp_type
= AGP_USER_CACHED_MEMORY
;
2368 ret
= i915_gem_object_pin(obj
, 4096);
2370 drm_gem_object_unreference(obj
);
2374 dev_priv
->status_gfx_addr
= obj_priv
->gtt_offset
;
2376 dev_priv
->hw_status_page
= kmap(obj_priv
->page_list
[0]);
2377 if (dev_priv
->hw_status_page
== NULL
) {
2378 DRM_ERROR("Failed to map status page.\n");
2379 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
2380 drm_gem_object_unreference(obj
);
2383 dev_priv
->hws_obj
= obj
;
2384 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
2385 I915_WRITE(HWS_PGA
, dev_priv
->status_gfx_addr
);
2386 I915_READ(HWS_PGA
); /* posting read */
2387 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv
->status_gfx_addr
);
2393 i915_gem_init_ringbuffer(struct drm_device
*dev
)
2395 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2396 struct drm_gem_object
*obj
;
2397 struct drm_i915_gem_object
*obj_priv
;
2401 ret
= i915_gem_init_hws(dev
);
2405 obj
= drm_gem_object_alloc(dev
, 128 * 1024);
2407 DRM_ERROR("Failed to allocate ringbuffer\n");
2410 obj_priv
= obj
->driver_private
;
2412 ret
= i915_gem_object_pin(obj
, 4096);
2414 drm_gem_object_unreference(obj
);
2418 /* Set up the kernel mapping for the ring. */
2419 dev_priv
->ring
.Size
= obj
->size
;
2420 dev_priv
->ring
.tail_mask
= obj
->size
- 1;
2422 dev_priv
->ring
.map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
2423 dev_priv
->ring
.map
.size
= obj
->size
;
2424 dev_priv
->ring
.map
.type
= 0;
2425 dev_priv
->ring
.map
.flags
= 0;
2426 dev_priv
->ring
.map
.mtrr
= 0;
2428 drm_core_ioremap_wc(&dev_priv
->ring
.map
, dev
);
2429 if (dev_priv
->ring
.map
.handle
== NULL
) {
2430 DRM_ERROR("Failed to map ringbuffer.\n");
2431 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
2432 drm_gem_object_unreference(obj
);
2435 dev_priv
->ring
.ring_obj
= obj
;
2436 dev_priv
->ring
.virtual_start
= dev_priv
->ring
.map
.handle
;
2438 /* Stop the ring if it's running. */
2439 I915_WRITE(PRB0_CTL
, 0);
2440 I915_WRITE(PRB0_TAIL
, 0);
2441 I915_WRITE(PRB0_HEAD
, 0);
2443 /* Initialize the ring. */
2444 I915_WRITE(PRB0_START
, obj_priv
->gtt_offset
);
2445 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
2447 /* G45 ring initialization fails to reset head to zero */
2449 DRM_ERROR("Ring head not reset to zero "
2450 "ctl %08x head %08x tail %08x start %08x\n",
2451 I915_READ(PRB0_CTL
),
2452 I915_READ(PRB0_HEAD
),
2453 I915_READ(PRB0_TAIL
),
2454 I915_READ(PRB0_START
));
2455 I915_WRITE(PRB0_HEAD
, 0);
2457 DRM_ERROR("Ring head forced to zero "
2458 "ctl %08x head %08x tail %08x start %08x\n",
2459 I915_READ(PRB0_CTL
),
2460 I915_READ(PRB0_HEAD
),
2461 I915_READ(PRB0_TAIL
),
2462 I915_READ(PRB0_START
));
2465 I915_WRITE(PRB0_CTL
,
2466 ((obj
->size
- 4096) & RING_NR_PAGES
) |
2470 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
2472 /* If the head is still not zero, the ring is dead */
2474 DRM_ERROR("Ring initialization failed "
2475 "ctl %08x head %08x tail %08x start %08x\n",
2476 I915_READ(PRB0_CTL
),
2477 I915_READ(PRB0_HEAD
),
2478 I915_READ(PRB0_TAIL
),
2479 I915_READ(PRB0_START
));
2483 /* Update our cache of the ring state */
2484 i915_kernel_lost_context(dev
);
2490 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
)
2492 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2494 if (dev_priv
->ring
.ring_obj
== NULL
)
2497 drm_core_ioremapfree(&dev_priv
->ring
.map
, dev
);
2499 i915_gem_object_unpin(dev_priv
->ring
.ring_obj
);
2500 drm_gem_object_unreference(dev_priv
->ring
.ring_obj
);
2501 dev_priv
->ring
.ring_obj
= NULL
;
2502 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
2504 if (dev_priv
->hws_obj
!= NULL
) {
2505 struct drm_gem_object
*obj
= dev_priv
->hws_obj
;
2506 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2508 kunmap(obj_priv
->page_list
[0]);
2509 i915_gem_object_unpin(obj
);
2510 drm_gem_object_unreference(obj
);
2511 dev_priv
->hws_obj
= NULL
;
2512 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
2513 dev_priv
->hw_status_page
= NULL
;
2515 /* Write high address into HWS_PGA when disabling. */
2516 I915_WRITE(HWS_PGA
, 0x1ffff000);
2521 i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
2522 struct drm_file
*file_priv
)
2524 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2527 if (dev_priv
->mm
.wedged
) {
2528 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2529 dev_priv
->mm
.wedged
= 0;
2532 ret
= i915_gem_init_ringbuffer(dev
);
2536 dev_priv
->mm
.gtt_mapping
= io_mapping_create_wc(dev
->agp
->base
,
2537 dev
->agp
->agp_info
.aper_size
2540 mutex_lock(&dev
->struct_mutex
);
2541 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
2542 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2543 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
2544 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
2545 dev_priv
->mm
.suspended
= 0;
2546 mutex_unlock(&dev
->struct_mutex
);
2548 drm_irq_install(dev
);
2554 i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
2555 struct drm_file
*file_priv
)
2557 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2560 ret
= i915_gem_idle(dev
);
2561 drm_irq_uninstall(dev
);
2563 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2568 i915_gem_lastclose(struct drm_device
*dev
)
2572 ret
= i915_gem_idle(dev
);
2574 DRM_ERROR("failed to idle hardware: %d\n", ret
);
2578 i915_gem_load(struct drm_device
*dev
)
2580 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2582 INIT_LIST_HEAD(&dev_priv
->mm
.active_list
);
2583 INIT_LIST_HEAD(&dev_priv
->mm
.flushing_list
);
2584 INIT_LIST_HEAD(&dev_priv
->mm
.inactive_list
);
2585 INIT_LIST_HEAD(&dev_priv
->mm
.request_list
);
2586 INIT_DELAYED_WORK(&dev_priv
->mm
.retire_work
,
2587 i915_gem_retire_work_handler
);
2588 dev_priv
->mm
.next_gem_seqno
= 1;
2590 i915_gem_detect_bit_6_swizzle(dev
);