2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
34 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
37 i915_gem_object_set_domain(struct drm_gem_object
*obj
,
38 uint32_t read_domains
,
39 uint32_t write_domain
);
41 i915_gem_object_set_domain_range(struct drm_gem_object
*obj
,
44 uint32_t read_domains
,
45 uint32_t write_domain
);
47 i915_gem_set_domain(struct drm_gem_object
*obj
,
48 struct drm_file
*file_priv
,
49 uint32_t read_domains
,
50 uint32_t write_domain
);
51 static int i915_gem_object_get_page_list(struct drm_gem_object
*obj
);
52 static void i915_gem_object_free_page_list(struct drm_gem_object
*obj
);
53 static int i915_gem_object_wait_rendering(struct drm_gem_object
*obj
);
56 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
);
59 i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
60 struct drm_file
*file_priv
)
62 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
63 struct drm_i915_gem_init
*args
= data
;
65 mutex_lock(&dev
->struct_mutex
);
67 if (args
->gtt_start
>= args
->gtt_end
||
68 (args
->gtt_start
& (PAGE_SIZE
- 1)) != 0 ||
69 (args
->gtt_end
& (PAGE_SIZE
- 1)) != 0) {
70 mutex_unlock(&dev
->struct_mutex
);
74 drm_mm_init(&dev_priv
->mm
.gtt_space
, args
->gtt_start
,
75 args
->gtt_end
- args
->gtt_start
);
77 dev
->gtt_total
= (uint32_t) (args
->gtt_end
- args
->gtt_start
);
79 mutex_unlock(&dev
->struct_mutex
);
85 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
86 struct drm_file
*file_priv
)
88 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
89 struct drm_i915_gem_get_aperture
*args
= data
;
90 struct drm_i915_gem_object
*obj_priv
;
92 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
95 args
->aper_size
= dev
->gtt_total
;
96 args
->aper_available_size
= args
->aper_size
;
98 list_for_each_entry(obj_priv
, &dev_priv
->mm
.active_list
, list
) {
99 if (obj_priv
->pin_count
> 0)
100 args
->aper_available_size
-= obj_priv
->obj
->size
;
108 * Creates a new mm object and returns a handle to it.
111 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
112 struct drm_file
*file_priv
)
114 struct drm_i915_gem_create
*args
= data
;
115 struct drm_gem_object
*obj
;
118 args
->size
= roundup(args
->size
, PAGE_SIZE
);
120 /* Allocate the new object */
121 obj
= drm_gem_object_alloc(dev
, args
->size
);
125 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
126 mutex_lock(&dev
->struct_mutex
);
127 drm_gem_object_handle_unreference(obj
);
128 mutex_unlock(&dev
->struct_mutex
);
133 args
->handle
= handle
;
139 * Reads data from the object referenced by handle.
141 * On error, the contents of *data are undefined.
144 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
145 struct drm_file
*file_priv
)
147 struct drm_i915_gem_pread
*args
= data
;
148 struct drm_gem_object
*obj
;
149 struct drm_i915_gem_object
*obj_priv
;
154 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
157 obj_priv
= obj
->driver_private
;
159 /* Bounds check source.
161 * XXX: This could use review for overflow issues...
163 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
164 args
->offset
+ args
->size
> obj
->size
) {
165 drm_gem_object_unreference(obj
);
169 mutex_lock(&dev
->struct_mutex
);
171 ret
= i915_gem_object_set_domain_range(obj
, args
->offset
, args
->size
,
172 I915_GEM_DOMAIN_CPU
, 0);
174 drm_gem_object_unreference(obj
);
175 mutex_unlock(&dev
->struct_mutex
);
179 offset
= args
->offset
;
181 read
= vfs_read(obj
->filp
, (char __user
*)(uintptr_t)args
->data_ptr
,
182 args
->size
, &offset
);
183 if (read
!= args
->size
) {
184 drm_gem_object_unreference(obj
);
185 mutex_unlock(&dev
->struct_mutex
);
192 drm_gem_object_unreference(obj
);
193 mutex_unlock(&dev
->struct_mutex
);
198 /* This is the fast write path which cannot handle
199 * page faults in the source data
203 fast_user_write(struct io_mapping
*mapping
,
204 loff_t page_base
, int page_offset
,
205 char __user
*user_data
,
209 unsigned long unwritten
;
211 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
);
212 unwritten
= __copy_from_user_inatomic_nocache(vaddr_atomic
+ page_offset
,
214 io_mapping_unmap_atomic(vaddr_atomic
);
220 /* Here's the write path which can sleep for
225 slow_user_write(struct io_mapping
*mapping
,
226 loff_t page_base
, int page_offset
,
227 char __user
*user_data
,
231 unsigned long unwritten
;
233 vaddr
= io_mapping_map_wc(mapping
, page_base
);
236 unwritten
= __copy_from_user(vaddr
+ page_offset
,
238 io_mapping_unmap(vaddr
);
245 i915_gem_gtt_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
246 struct drm_i915_gem_pwrite
*args
,
247 struct drm_file
*file_priv
)
249 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
250 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
252 loff_t offset
, page_base
;
253 char __user
*user_data
;
254 int page_offset
, page_length
;
257 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
259 if (!access_ok(VERIFY_READ
, user_data
, remain
))
263 mutex_lock(&dev
->struct_mutex
);
264 ret
= i915_gem_object_pin(obj
, 0);
266 mutex_unlock(&dev
->struct_mutex
);
269 ret
= i915_gem_set_domain(obj
, file_priv
,
270 I915_GEM_DOMAIN_GTT
, I915_GEM_DOMAIN_GTT
);
274 obj_priv
= obj
->driver_private
;
275 offset
= obj_priv
->gtt_offset
+ args
->offset
;
279 /* Operation in this page
281 * page_base = page offset within aperture
282 * page_offset = offset within page
283 * page_length = bytes to copy for this page
285 page_base
= (offset
& ~(PAGE_SIZE
-1));
286 page_offset
= offset
& (PAGE_SIZE
-1);
287 page_length
= remain
;
288 if ((page_offset
+ remain
) > PAGE_SIZE
)
289 page_length
= PAGE_SIZE
- page_offset
;
291 ret
= fast_user_write (dev_priv
->mm
.gtt_mapping
, page_base
,
292 page_offset
, user_data
, page_length
);
294 /* If we get a fault while copying data, then (presumably) our
295 * source page isn't available. In this case, use the
296 * non-atomic function
299 ret
= slow_user_write (dev_priv
->mm
.gtt_mapping
,
300 page_base
, page_offset
,
301 user_data
, page_length
);
306 remain
-= page_length
;
307 user_data
+= page_length
;
308 offset
+= page_length
;
312 i915_gem_object_unpin(obj
);
313 mutex_unlock(&dev
->struct_mutex
);
319 i915_gem_shmem_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
320 struct drm_i915_gem_pwrite
*args
,
321 struct drm_file
*file_priv
)
327 mutex_lock(&dev
->struct_mutex
);
329 ret
= i915_gem_set_domain(obj
, file_priv
,
330 I915_GEM_DOMAIN_CPU
, I915_GEM_DOMAIN_CPU
);
332 mutex_unlock(&dev
->struct_mutex
);
336 offset
= args
->offset
;
338 written
= vfs_write(obj
->filp
,
339 (char __user
*)(uintptr_t) args
->data_ptr
,
340 args
->size
, &offset
);
341 if (written
!= args
->size
) {
342 mutex_unlock(&dev
->struct_mutex
);
349 mutex_unlock(&dev
->struct_mutex
);
355 * Writes data to the object referenced by handle.
357 * On error, the contents of the buffer that were to be modified are undefined.
360 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
361 struct drm_file
*file_priv
)
363 struct drm_i915_gem_pwrite
*args
= data
;
364 struct drm_gem_object
*obj
;
365 struct drm_i915_gem_object
*obj_priv
;
368 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
371 obj_priv
= obj
->driver_private
;
373 /* Bounds check destination.
375 * XXX: This could use review for overflow issues...
377 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
378 args
->offset
+ args
->size
> obj
->size
) {
379 drm_gem_object_unreference(obj
);
383 /* We can only do the GTT pwrite on untiled buffers, as otherwise
384 * it would end up going through the fenced access, and we'll get
385 * different detiling behavior between reading and writing.
386 * pread/pwrite currently are reading and writing from the CPU
387 * perspective, requiring manual detiling by the client.
389 if (obj_priv
->tiling_mode
== I915_TILING_NONE
&&
391 ret
= i915_gem_gtt_pwrite(dev
, obj
, args
, file_priv
);
393 ret
= i915_gem_shmem_pwrite(dev
, obj
, args
, file_priv
);
397 DRM_INFO("pwrite failed %d\n", ret
);
400 drm_gem_object_unreference(obj
);
406 * Called when user space prepares to use an object
409 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
410 struct drm_file
*file_priv
)
412 struct drm_i915_gem_set_domain
*args
= data
;
413 struct drm_gem_object
*obj
;
416 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
419 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
423 mutex_lock(&dev
->struct_mutex
);
425 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
426 obj
, obj
->size
, args
->read_domains
, args
->write_domain
);
428 ret
= i915_gem_set_domain(obj
, file_priv
,
429 args
->read_domains
, args
->write_domain
);
430 drm_gem_object_unreference(obj
);
431 mutex_unlock(&dev
->struct_mutex
);
436 * Called when user space has done writes to this buffer
439 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
440 struct drm_file
*file_priv
)
442 struct drm_i915_gem_sw_finish
*args
= data
;
443 struct drm_gem_object
*obj
;
444 struct drm_i915_gem_object
*obj_priv
;
447 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
450 mutex_lock(&dev
->struct_mutex
);
451 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
453 mutex_unlock(&dev
->struct_mutex
);
458 DRM_INFO("%s: sw_finish %d (%p %d)\n",
459 __func__
, args
->handle
, obj
, obj
->size
);
461 obj_priv
= obj
->driver_private
;
463 /* Pinned buffers may be scanout, so flush the cache */
464 if ((obj
->write_domain
& I915_GEM_DOMAIN_CPU
) && obj_priv
->pin_count
) {
465 i915_gem_clflush_object(obj
);
466 drm_agp_chipset_flush(dev
);
468 drm_gem_object_unreference(obj
);
469 mutex_unlock(&dev
->struct_mutex
);
474 * Maps the contents of an object, returning the address it is mapped
477 * While the mapping holds a reference on the contents of the object, it doesn't
478 * imply a ref on the object itself.
481 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
482 struct drm_file
*file_priv
)
484 struct drm_i915_gem_mmap
*args
= data
;
485 struct drm_gem_object
*obj
;
489 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
492 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
496 offset
= args
->offset
;
498 down_write(¤t
->mm
->mmap_sem
);
499 addr
= do_mmap(obj
->filp
, 0, args
->size
,
500 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
502 up_write(¤t
->mm
->mmap_sem
);
503 mutex_lock(&dev
->struct_mutex
);
504 drm_gem_object_unreference(obj
);
505 mutex_unlock(&dev
->struct_mutex
);
506 if (IS_ERR((void *)addr
))
509 args
->addr_ptr
= (uint64_t) addr
;
515 i915_gem_object_free_page_list(struct drm_gem_object
*obj
)
517 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
518 int page_count
= obj
->size
/ PAGE_SIZE
;
521 if (obj_priv
->page_list
== NULL
)
525 for (i
= 0; i
< page_count
; i
++)
526 if (obj_priv
->page_list
[i
] != NULL
) {
528 set_page_dirty(obj_priv
->page_list
[i
]);
529 mark_page_accessed(obj_priv
->page_list
[i
]);
530 page_cache_release(obj_priv
->page_list
[i
]);
534 drm_free(obj_priv
->page_list
,
535 page_count
* sizeof(struct page
*),
537 obj_priv
->page_list
= NULL
;
541 i915_gem_object_move_to_active(struct drm_gem_object
*obj
)
543 struct drm_device
*dev
= obj
->dev
;
544 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
545 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
547 /* Add a reference if we're newly entering the active list. */
548 if (!obj_priv
->active
) {
549 drm_gem_object_reference(obj
);
550 obj_priv
->active
= 1;
552 /* Move from whatever list we were on to the tail of execution. */
553 list_move_tail(&obj_priv
->list
,
554 &dev_priv
->mm
.active_list
);
559 i915_gem_object_move_to_inactive(struct drm_gem_object
*obj
)
561 struct drm_device
*dev
= obj
->dev
;
562 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
563 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
565 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
566 if (obj_priv
->pin_count
!= 0)
567 list_del_init(&obj_priv
->list
);
569 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
571 if (obj_priv
->active
) {
572 obj_priv
->active
= 0;
573 drm_gem_object_unreference(obj
);
575 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
579 * Creates a new sequence number, emitting a write of it to the status page
580 * plus an interrupt, which will trigger i915_user_interrupt_handler.
582 * Must be called with struct_lock held.
584 * Returned sequence numbers are nonzero on success.
587 i915_add_request(struct drm_device
*dev
, uint32_t flush_domains
)
589 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
590 struct drm_i915_gem_request
*request
;
595 request
= drm_calloc(1, sizeof(*request
), DRM_MEM_DRIVER
);
599 /* Grab the seqno we're going to make this request be, and bump the
600 * next (skipping 0 so it can be the reserved no-seqno value).
602 seqno
= dev_priv
->mm
.next_gem_seqno
;
603 dev_priv
->mm
.next_gem_seqno
++;
604 if (dev_priv
->mm
.next_gem_seqno
== 0)
605 dev_priv
->mm
.next_gem_seqno
++;
608 OUT_RING(MI_STORE_DWORD_INDEX
);
609 OUT_RING(I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
612 OUT_RING(MI_USER_INTERRUPT
);
615 DRM_DEBUG("%d\n", seqno
);
617 request
->seqno
= seqno
;
618 request
->emitted_jiffies
= jiffies
;
619 request
->flush_domains
= flush_domains
;
620 was_empty
= list_empty(&dev_priv
->mm
.request_list
);
621 list_add_tail(&request
->list
, &dev_priv
->mm
.request_list
);
623 if (was_empty
&& !dev_priv
->mm
.suspended
)
624 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
629 * Command execution barrier
631 * Ensures that all commands in the ring are finished
632 * before signalling the CPU
635 i915_retire_commands(struct drm_device
*dev
)
637 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
638 uint32_t cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
639 uint32_t flush_domains
= 0;
642 /* The sampler always gets flushed on i965 (sigh) */
644 flush_domains
|= I915_GEM_DOMAIN_SAMPLER
;
647 OUT_RING(0); /* noop */
649 return flush_domains
;
653 * Moves buffers associated only with the given active seqno from the active
654 * to inactive list, potentially freeing them.
657 i915_gem_retire_request(struct drm_device
*dev
,
658 struct drm_i915_gem_request
*request
)
660 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
662 /* Move any buffers on the active list that are no longer referenced
663 * by the ringbuffer to the flushing/inactive lists as appropriate.
665 while (!list_empty(&dev_priv
->mm
.active_list
)) {
666 struct drm_gem_object
*obj
;
667 struct drm_i915_gem_object
*obj_priv
;
669 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
670 struct drm_i915_gem_object
,
674 /* If the seqno being retired doesn't match the oldest in the
675 * list, then the oldest in the list must still be newer than
678 if (obj_priv
->last_rendering_seqno
!= request
->seqno
)
681 DRM_INFO("%s: retire %d moves to inactive list %p\n",
682 __func__
, request
->seqno
, obj
);
685 if (obj
->write_domain
!= 0) {
686 list_move_tail(&obj_priv
->list
,
687 &dev_priv
->mm
.flushing_list
);
689 i915_gem_object_move_to_inactive(obj
);
693 if (request
->flush_domains
!= 0) {
694 struct drm_i915_gem_object
*obj_priv
, *next
;
696 /* Clear the write domain and activity from any buffers
697 * that are just waiting for a flush matching the one retired.
699 list_for_each_entry_safe(obj_priv
, next
,
700 &dev_priv
->mm
.flushing_list
, list
) {
701 struct drm_gem_object
*obj
= obj_priv
->obj
;
703 if (obj
->write_domain
& request
->flush_domains
) {
704 obj
->write_domain
= 0;
705 i915_gem_object_move_to_inactive(obj
);
713 * Returns true if seq1 is later than seq2.
716 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
718 return (int32_t)(seq1
- seq2
) >= 0;
722 i915_get_gem_seqno(struct drm_device
*dev
)
724 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
726 return READ_HWSP(dev_priv
, I915_GEM_HWS_INDEX
);
730 * This function clears the request list as sequence numbers are passed.
733 i915_gem_retire_requests(struct drm_device
*dev
)
735 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
738 seqno
= i915_get_gem_seqno(dev
);
740 while (!list_empty(&dev_priv
->mm
.request_list
)) {
741 struct drm_i915_gem_request
*request
;
742 uint32_t retiring_seqno
;
744 request
= list_first_entry(&dev_priv
->mm
.request_list
,
745 struct drm_i915_gem_request
,
747 retiring_seqno
= request
->seqno
;
749 if (i915_seqno_passed(seqno
, retiring_seqno
) ||
750 dev_priv
->mm
.wedged
) {
751 i915_gem_retire_request(dev
, request
);
753 list_del(&request
->list
);
754 drm_free(request
, sizeof(*request
), DRM_MEM_DRIVER
);
761 i915_gem_retire_work_handler(struct work_struct
*work
)
763 drm_i915_private_t
*dev_priv
;
764 struct drm_device
*dev
;
766 dev_priv
= container_of(work
, drm_i915_private_t
,
767 mm
.retire_work
.work
);
770 mutex_lock(&dev
->struct_mutex
);
771 i915_gem_retire_requests(dev
);
772 if (!dev_priv
->mm
.suspended
&&
773 !list_empty(&dev_priv
->mm
.request_list
))
774 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
775 mutex_unlock(&dev
->struct_mutex
);
779 * Waits for a sequence number to be signaled, and cleans up the
780 * request and object lists appropriately for that event.
783 i915_wait_request(struct drm_device
*dev
, uint32_t seqno
)
785 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
790 if (!i915_seqno_passed(i915_get_gem_seqno(dev
), seqno
)) {
791 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
792 i915_user_irq_get(dev
);
793 ret
= wait_event_interruptible(dev_priv
->irq_queue
,
794 i915_seqno_passed(i915_get_gem_seqno(dev
),
796 dev_priv
->mm
.wedged
);
797 i915_user_irq_put(dev
);
798 dev_priv
->mm
.waiting_gem_seqno
= 0;
800 if (dev_priv
->mm
.wedged
)
803 if (ret
&& ret
!= -ERESTARTSYS
)
804 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
805 __func__
, ret
, seqno
, i915_get_gem_seqno(dev
));
807 /* Directly dispatch request retiring. While we have the work queue
808 * to handle this, the waiter on a request often wants an associated
809 * buffer to have made it to the inactive list, and we would need
810 * a separate wait queue to handle that.
813 i915_gem_retire_requests(dev
);
819 i915_gem_flush(struct drm_device
*dev
,
820 uint32_t invalidate_domains
,
821 uint32_t flush_domains
)
823 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
828 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__
,
829 invalidate_domains
, flush_domains
);
832 if (flush_domains
& I915_GEM_DOMAIN_CPU
)
833 drm_agp_chipset_flush(dev
);
835 if ((invalidate_domains
| flush_domains
) & ~(I915_GEM_DOMAIN_CPU
|
836 I915_GEM_DOMAIN_GTT
)) {
840 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
841 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
842 * also flushed at 2d versus 3d pipeline switches.
846 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
847 * MI_READ_FLUSH is set, and is always flushed on 965.
849 * I915_GEM_DOMAIN_COMMAND may not exist?
851 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
852 * invalidated when MI_EXE_FLUSH is set.
854 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
855 * invalidated with every MI_FLUSH.
859 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
860 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
861 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
862 * are flushed at any MI_FLUSH.
865 cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
866 if ((invalidate_domains
|flush_domains
) &
867 I915_GEM_DOMAIN_RENDER
)
868 cmd
&= ~MI_NO_WRITE_FLUSH
;
869 if (!IS_I965G(dev
)) {
871 * On the 965, the sampler cache always gets flushed
872 * and this bit is reserved.
874 if (invalidate_domains
& I915_GEM_DOMAIN_SAMPLER
)
875 cmd
|= MI_READ_FLUSH
;
877 if (invalidate_domains
& I915_GEM_DOMAIN_INSTRUCTION
)
881 DRM_INFO("%s: queue flush %08x to ring\n", __func__
, cmd
);
885 OUT_RING(0); /* noop */
891 * Ensures that all rendering to the object has completed and the object is
892 * safe to unbind from the GTT or access from the CPU.
895 i915_gem_object_wait_rendering(struct drm_gem_object
*obj
)
897 struct drm_device
*dev
= obj
->dev
;
898 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
901 /* If there are writes queued to the buffer, flush and
902 * create a new seqno to wait for.
904 if (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
)) {
905 uint32_t write_domain
= obj
->write_domain
;
907 DRM_INFO("%s: flushing object %p from write domain %08x\n",
908 __func__
, obj
, write_domain
);
910 i915_gem_flush(dev
, 0, write_domain
);
912 i915_gem_object_move_to_active(obj
);
913 obj_priv
->last_rendering_seqno
= i915_add_request(dev
,
915 BUG_ON(obj_priv
->last_rendering_seqno
== 0);
917 DRM_INFO("%s: flush moves to exec list %p\n", __func__
, obj
);
921 /* If there is rendering queued on the buffer being evicted, wait for
924 if (obj_priv
->active
) {
926 DRM_INFO("%s: object %p wait for seqno %08x\n",
927 __func__
, obj
, obj_priv
->last_rendering_seqno
);
929 ret
= i915_wait_request(dev
, obj_priv
->last_rendering_seqno
);
938 * Unbinds an object from the GTT aperture.
941 i915_gem_object_unbind(struct drm_gem_object
*obj
)
943 struct drm_device
*dev
= obj
->dev
;
944 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
948 DRM_INFO("%s:%d %p\n", __func__
, __LINE__
, obj
);
949 DRM_INFO("gtt_space %p\n", obj_priv
->gtt_space
);
951 if (obj_priv
->gtt_space
== NULL
)
954 if (obj_priv
->pin_count
!= 0) {
955 DRM_ERROR("Attempting to unbind pinned buffer\n");
959 /* Wait for any rendering to complete
961 ret
= i915_gem_object_wait_rendering(obj
);
963 DRM_ERROR("wait_rendering failed: %d\n", ret
);
967 /* Move the object to the CPU domain to ensure that
968 * any possible CPU writes while it's not in the GTT
969 * are flushed when we go to remap it. This will
970 * also ensure that all pending GPU writes are finished
973 ret
= i915_gem_object_set_domain(obj
, I915_GEM_DOMAIN_CPU
,
974 I915_GEM_DOMAIN_CPU
);
976 DRM_ERROR("set_domain failed: %d\n", ret
);
980 if (obj_priv
->agp_mem
!= NULL
) {
981 drm_unbind_agp(obj_priv
->agp_mem
);
982 drm_free_agp(obj_priv
->agp_mem
, obj
->size
/ PAGE_SIZE
);
983 obj_priv
->agp_mem
= NULL
;
986 BUG_ON(obj_priv
->active
);
988 i915_gem_object_free_page_list(obj
);
990 if (obj_priv
->gtt_space
) {
991 atomic_dec(&dev
->gtt_count
);
992 atomic_sub(obj
->size
, &dev
->gtt_memory
);
994 drm_mm_put_block(obj_priv
->gtt_space
);
995 obj_priv
->gtt_space
= NULL
;
998 /* Remove ourselves from the LRU list if present. */
999 if (!list_empty(&obj_priv
->list
))
1000 list_del_init(&obj_priv
->list
);
1006 i915_gem_evict_something(struct drm_device
*dev
)
1008 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1009 struct drm_gem_object
*obj
;
1010 struct drm_i915_gem_object
*obj_priv
;
1014 /* If there's an inactive buffer available now, grab it
1017 if (!list_empty(&dev_priv
->mm
.inactive_list
)) {
1018 obj_priv
= list_first_entry(&dev_priv
->mm
.inactive_list
,
1019 struct drm_i915_gem_object
,
1021 obj
= obj_priv
->obj
;
1022 BUG_ON(obj_priv
->pin_count
!= 0);
1024 DRM_INFO("%s: evicting %p\n", __func__
, obj
);
1026 BUG_ON(obj_priv
->active
);
1028 /* Wait on the rendering and unbind the buffer. */
1029 ret
= i915_gem_object_unbind(obj
);
1033 /* If we didn't get anything, but the ring is still processing
1034 * things, wait for one of those things to finish and hopefully
1035 * leave us a buffer to evict.
1037 if (!list_empty(&dev_priv
->mm
.request_list
)) {
1038 struct drm_i915_gem_request
*request
;
1040 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1041 struct drm_i915_gem_request
,
1044 ret
= i915_wait_request(dev
, request
->seqno
);
1048 /* if waiting caused an object to become inactive,
1049 * then loop around and wait for it. Otherwise, we
1050 * assume that waiting freed and unbound something,
1051 * so there should now be some space in the GTT
1053 if (!list_empty(&dev_priv
->mm
.inactive_list
))
1058 /* If we didn't have anything on the request list but there
1059 * are buffers awaiting a flush, emit one and try again.
1060 * When we wait on it, those buffers waiting for that flush
1061 * will get moved to inactive.
1063 if (!list_empty(&dev_priv
->mm
.flushing_list
)) {
1064 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
1065 struct drm_i915_gem_object
,
1067 obj
= obj_priv
->obj
;
1072 i915_add_request(dev
, obj
->write_domain
);
1078 DRM_ERROR("inactive empty %d request empty %d "
1079 "flushing empty %d\n",
1080 list_empty(&dev_priv
->mm
.inactive_list
),
1081 list_empty(&dev_priv
->mm
.request_list
),
1082 list_empty(&dev_priv
->mm
.flushing_list
));
1083 /* If we didn't do any of the above, there's nothing to be done
1084 * and we just can't fit it in.
1092 i915_gem_object_get_page_list(struct drm_gem_object
*obj
)
1094 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1096 struct address_space
*mapping
;
1097 struct inode
*inode
;
1101 if (obj_priv
->page_list
)
1104 /* Get the list of pages out of our struct file. They'll be pinned
1105 * at this point until we release them.
1107 page_count
= obj
->size
/ PAGE_SIZE
;
1108 BUG_ON(obj_priv
->page_list
!= NULL
);
1109 obj_priv
->page_list
= drm_calloc(page_count
, sizeof(struct page
*),
1111 if (obj_priv
->page_list
== NULL
) {
1112 DRM_ERROR("Faled to allocate page list\n");
1116 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
1117 mapping
= inode
->i_mapping
;
1118 for (i
= 0; i
< page_count
; i
++) {
1119 page
= read_mapping_page(mapping
, i
, NULL
);
1121 ret
= PTR_ERR(page
);
1122 DRM_ERROR("read_mapping_page failed: %d\n", ret
);
1123 i915_gem_object_free_page_list(obj
);
1126 obj_priv
->page_list
[i
] = page
;
1132 * Finds free space in the GTT aperture and binds the object there.
1135 i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
, unsigned alignment
)
1137 struct drm_device
*dev
= obj
->dev
;
1138 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1139 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1140 struct drm_mm_node
*free_space
;
1141 int page_count
, ret
;
1144 alignment
= PAGE_SIZE
;
1145 if (alignment
& (PAGE_SIZE
- 1)) {
1146 DRM_ERROR("Invalid object alignment requested %u\n", alignment
);
1151 free_space
= drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
1152 obj
->size
, alignment
, 0);
1153 if (free_space
!= NULL
) {
1154 obj_priv
->gtt_space
= drm_mm_get_block(free_space
, obj
->size
,
1156 if (obj_priv
->gtt_space
!= NULL
) {
1157 obj_priv
->gtt_space
->private = obj
;
1158 obj_priv
->gtt_offset
= obj_priv
->gtt_space
->start
;
1161 if (obj_priv
->gtt_space
== NULL
) {
1162 /* If the gtt is empty and we're still having trouble
1163 * fitting our object in, we're out of memory.
1166 DRM_INFO("%s: GTT full, evicting something\n", __func__
);
1168 if (list_empty(&dev_priv
->mm
.inactive_list
) &&
1169 list_empty(&dev_priv
->mm
.flushing_list
) &&
1170 list_empty(&dev_priv
->mm
.active_list
)) {
1171 DRM_ERROR("GTT full, but LRU list empty\n");
1175 ret
= i915_gem_evict_something(dev
);
1177 DRM_ERROR("Failed to evict a buffer %d\n", ret
);
1184 DRM_INFO("Binding object of size %d at 0x%08x\n",
1185 obj
->size
, obj_priv
->gtt_offset
);
1187 ret
= i915_gem_object_get_page_list(obj
);
1189 drm_mm_put_block(obj_priv
->gtt_space
);
1190 obj_priv
->gtt_space
= NULL
;
1194 page_count
= obj
->size
/ PAGE_SIZE
;
1195 /* Create an AGP memory structure pointing at our pages, and bind it
1198 obj_priv
->agp_mem
= drm_agp_bind_pages(dev
,
1199 obj_priv
->page_list
,
1201 obj_priv
->gtt_offset
,
1202 obj_priv
->agp_type
);
1203 if (obj_priv
->agp_mem
== NULL
) {
1204 i915_gem_object_free_page_list(obj
);
1205 drm_mm_put_block(obj_priv
->gtt_space
);
1206 obj_priv
->gtt_space
= NULL
;
1209 atomic_inc(&dev
->gtt_count
);
1210 atomic_add(obj
->size
, &dev
->gtt_memory
);
1212 /* Assert that the object is not currently in any GPU domain. As it
1213 * wasn't in the GTT, there shouldn't be any way it could have been in
1216 BUG_ON(obj
->read_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1217 BUG_ON(obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
1223 i915_gem_clflush_object(struct drm_gem_object
*obj
)
1225 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1227 /* If we don't have a page list set up, then we're not pinned
1228 * to GPU, and we can ignore the cache flush because it'll happen
1229 * again at bind time.
1231 if (obj_priv
->page_list
== NULL
)
1234 drm_clflush_pages(obj_priv
->page_list
, obj
->size
/ PAGE_SIZE
);
1238 * Set the next domain for the specified object. This
1239 * may not actually perform the necessary flushing/invaliding though,
1240 * as that may want to be batched with other set_domain operations
1242 * This is (we hope) the only really tricky part of gem. The goal
1243 * is fairly simple -- track which caches hold bits of the object
1244 * and make sure they remain coherent. A few concrete examples may
1245 * help to explain how it works. For shorthand, we use the notation
1246 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1247 * a pair of read and write domain masks.
1249 * Case 1: the batch buffer
1255 * 5. Unmapped from GTT
1258 * Let's take these a step at a time
1261 * Pages allocated from the kernel may still have
1262 * cache contents, so we set them to (CPU, CPU) always.
1263 * 2. Written by CPU (using pwrite)
1264 * The pwrite function calls set_domain (CPU, CPU) and
1265 * this function does nothing (as nothing changes)
1267 * This function asserts that the object is not
1268 * currently in any GPU-based read or write domains
1270 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1271 * As write_domain is zero, this function adds in the
1272 * current read domains (CPU+COMMAND, 0).
1273 * flush_domains is set to CPU.
1274 * invalidate_domains is set to COMMAND
1275 * clflush is run to get data out of the CPU caches
1276 * then i915_dev_set_domain calls i915_gem_flush to
1277 * emit an MI_FLUSH and drm_agp_chipset_flush
1278 * 5. Unmapped from GTT
1279 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1280 * flush_domains and invalidate_domains end up both zero
1281 * so no flushing/invalidating happens
1285 * Case 2: The shared render buffer
1289 * 3. Read/written by GPU
1290 * 4. set_domain to (CPU,CPU)
1291 * 5. Read/written by CPU
1292 * 6. Read/written by GPU
1295 * Same as last example, (CPU, CPU)
1297 * Nothing changes (assertions find that it is not in the GPU)
1298 * 3. Read/written by GPU
1299 * execbuffer calls set_domain (RENDER, RENDER)
1300 * flush_domains gets CPU
1301 * invalidate_domains gets GPU
1303 * MI_FLUSH and drm_agp_chipset_flush
1304 * 4. set_domain (CPU, CPU)
1305 * flush_domains gets GPU
1306 * invalidate_domains gets CPU
1307 * wait_rendering (obj) to make sure all drawing is complete.
1308 * This will include an MI_FLUSH to get the data from GPU
1310 * clflush (obj) to invalidate the CPU cache
1311 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1312 * 5. Read/written by CPU
1313 * cache lines are loaded and dirtied
1314 * 6. Read written by GPU
1315 * Same as last GPU access
1317 * Case 3: The constant buffer
1322 * 4. Updated (written) by CPU again
1331 * flush_domains = CPU
1332 * invalidate_domains = RENDER
1335 * drm_agp_chipset_flush
1336 * 4. Updated (written) by CPU again
1338 * flush_domains = 0 (no previous write domain)
1339 * invalidate_domains = 0 (no new read domains)
1342 * flush_domains = CPU
1343 * invalidate_domains = RENDER
1346 * drm_agp_chipset_flush
1349 i915_gem_object_set_domain(struct drm_gem_object
*obj
,
1350 uint32_t read_domains
,
1351 uint32_t write_domain
)
1353 struct drm_device
*dev
= obj
->dev
;
1354 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1355 uint32_t invalidate_domains
= 0;
1356 uint32_t flush_domains
= 0;
1360 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1362 obj
->read_domains
, read_domains
,
1363 obj
->write_domain
, write_domain
);
1366 * If the object isn't moving to a new write domain,
1367 * let the object stay in multiple read domains
1369 if (write_domain
== 0)
1370 read_domains
|= obj
->read_domains
;
1372 obj_priv
->dirty
= 1;
1375 * Flush the current write domain if
1376 * the new read domains don't match. Invalidate
1377 * any read domains which differ from the old
1380 if (obj
->write_domain
&& obj
->write_domain
!= read_domains
) {
1381 flush_domains
|= obj
->write_domain
;
1382 invalidate_domains
|= read_domains
& ~obj
->write_domain
;
1385 * Invalidate any read caches which may have
1386 * stale data. That is, any new read domains.
1388 invalidate_domains
|= read_domains
& ~obj
->read_domains
;
1389 if ((flush_domains
| invalidate_domains
) & I915_GEM_DOMAIN_CPU
) {
1391 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1392 __func__
, flush_domains
, invalidate_domains
);
1395 * If we're invaliding the CPU cache and flushing a GPU cache,
1396 * then pause for rendering so that the GPU caches will be
1397 * flushed before the cpu cache is invalidated
1399 if ((invalidate_domains
& I915_GEM_DOMAIN_CPU
) &&
1400 (flush_domains
& ~(I915_GEM_DOMAIN_CPU
|
1401 I915_GEM_DOMAIN_GTT
))) {
1402 ret
= i915_gem_object_wait_rendering(obj
);
1406 i915_gem_clflush_object(obj
);
1409 if ((write_domain
| flush_domains
) != 0)
1410 obj
->write_domain
= write_domain
;
1412 /* If we're invalidating the CPU domain, clear the per-page CPU
1413 * domain list as well.
1415 if (obj_priv
->page_cpu_valid
!= NULL
&&
1416 (write_domain
!= 0 ||
1417 read_domains
& I915_GEM_DOMAIN_CPU
)) {
1418 drm_free(obj_priv
->page_cpu_valid
, obj
->size
/ PAGE_SIZE
,
1420 obj_priv
->page_cpu_valid
= NULL
;
1422 obj
->read_domains
= read_domains
;
1424 dev
->invalidate_domains
|= invalidate_domains
;
1425 dev
->flush_domains
|= flush_domains
;
1427 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1429 obj
->read_domains
, obj
->write_domain
,
1430 dev
->invalidate_domains
, dev
->flush_domains
);
1436 * Set the read/write domain on a range of the object.
1438 * Currently only implemented for CPU reads, otherwise drops to normal
1439 * i915_gem_object_set_domain().
1442 i915_gem_object_set_domain_range(struct drm_gem_object
*obj
,
1445 uint32_t read_domains
,
1446 uint32_t write_domain
)
1448 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1451 if (obj
->read_domains
& I915_GEM_DOMAIN_CPU
)
1454 if (read_domains
!= I915_GEM_DOMAIN_CPU
||
1456 return i915_gem_object_set_domain(obj
,
1457 read_domains
, write_domain
);
1459 /* Wait on any GPU rendering to the object to be flushed. */
1460 ret
= i915_gem_object_wait_rendering(obj
);
1464 if (obj_priv
->page_cpu_valid
== NULL
) {
1465 obj_priv
->page_cpu_valid
= drm_calloc(1, obj
->size
/ PAGE_SIZE
,
1469 /* Flush the cache on any pages that are still invalid from the CPU's
1472 for (i
= offset
/ PAGE_SIZE
; i
<= (offset
+ size
- 1) / PAGE_SIZE
; i
++) {
1473 if (obj_priv
->page_cpu_valid
[i
])
1476 drm_clflush_pages(obj_priv
->page_list
+ i
, 1);
1478 obj_priv
->page_cpu_valid
[i
] = 1;
1485 * Once all of the objects have been set in the proper domain,
1486 * perform the necessary flush and invalidate operations.
1488 * Returns the write domains flushed, for use in flush tracking.
1491 i915_gem_dev_set_domain(struct drm_device
*dev
)
1493 uint32_t flush_domains
= dev
->flush_domains
;
1496 * Now that all the buffers are synced to the proper domains,
1497 * flush and invalidate the collected domains
1499 if (dev
->invalidate_domains
| dev
->flush_domains
) {
1501 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1503 dev
->invalidate_domains
,
1504 dev
->flush_domains
);
1507 dev
->invalidate_domains
,
1508 dev
->flush_domains
);
1509 dev
->invalidate_domains
= 0;
1510 dev
->flush_domains
= 0;
1513 return flush_domains
;
1517 * Pin an object to the GTT and evaluate the relocations landing in it.
1520 i915_gem_object_pin_and_relocate(struct drm_gem_object
*obj
,
1521 struct drm_file
*file_priv
,
1522 struct drm_i915_gem_exec_object
*entry
)
1524 struct drm_device
*dev
= obj
->dev
;
1525 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1526 struct drm_i915_gem_relocation_entry reloc
;
1527 struct drm_i915_gem_relocation_entry __user
*relocs
;
1528 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1530 void __iomem
*reloc_page
;
1532 /* Choose the GTT offset for our buffer and put it there. */
1533 ret
= i915_gem_object_pin(obj
, (uint32_t) entry
->alignment
);
1537 entry
->offset
= obj_priv
->gtt_offset
;
1539 relocs
= (struct drm_i915_gem_relocation_entry __user
*)
1540 (uintptr_t) entry
->relocs_ptr
;
1541 /* Apply the relocations, using the GTT aperture to avoid cache
1542 * flushing requirements.
1544 for (i
= 0; i
< entry
->relocation_count
; i
++) {
1545 struct drm_gem_object
*target_obj
;
1546 struct drm_i915_gem_object
*target_obj_priv
;
1547 uint32_t reloc_val
, reloc_offset
;
1548 uint32_t __iomem
*reloc_entry
;
1550 ret
= copy_from_user(&reloc
, relocs
+ i
, sizeof(reloc
));
1552 i915_gem_object_unpin(obj
);
1556 target_obj
= drm_gem_object_lookup(obj
->dev
, file_priv
,
1557 reloc
.target_handle
);
1558 if (target_obj
== NULL
) {
1559 i915_gem_object_unpin(obj
);
1562 target_obj_priv
= target_obj
->driver_private
;
1564 /* The target buffer should have appeared before us in the
1565 * exec_object list, so it should have a GTT space bound by now.
1567 if (target_obj_priv
->gtt_space
== NULL
) {
1568 DRM_ERROR("No GTT space found for object %d\n",
1569 reloc
.target_handle
);
1570 drm_gem_object_unreference(target_obj
);
1571 i915_gem_object_unpin(obj
);
1575 if (reloc
.offset
> obj
->size
- 4) {
1576 DRM_ERROR("Relocation beyond object bounds: "
1577 "obj %p target %d offset %d size %d.\n",
1578 obj
, reloc
.target_handle
,
1579 (int) reloc
.offset
, (int) obj
->size
);
1580 drm_gem_object_unreference(target_obj
);
1581 i915_gem_object_unpin(obj
);
1584 if (reloc
.offset
& 3) {
1585 DRM_ERROR("Relocation not 4-byte aligned: "
1586 "obj %p target %d offset %d.\n",
1587 obj
, reloc
.target_handle
,
1588 (int) reloc
.offset
);
1589 drm_gem_object_unreference(target_obj
);
1590 i915_gem_object_unpin(obj
);
1594 if (reloc
.write_domain
&& target_obj
->pending_write_domain
&&
1595 reloc
.write_domain
!= target_obj
->pending_write_domain
) {
1596 DRM_ERROR("Write domain conflict: "
1597 "obj %p target %d offset %d "
1598 "new %08x old %08x\n",
1599 obj
, reloc
.target_handle
,
1602 target_obj
->pending_write_domain
);
1603 drm_gem_object_unreference(target_obj
);
1604 i915_gem_object_unpin(obj
);
1609 DRM_INFO("%s: obj %p offset %08x target %d "
1610 "read %08x write %08x gtt %08x "
1611 "presumed %08x delta %08x\n",
1615 (int) reloc
.target_handle
,
1616 (int) reloc
.read_domains
,
1617 (int) reloc
.write_domain
,
1618 (int) target_obj_priv
->gtt_offset
,
1619 (int) reloc
.presumed_offset
,
1623 target_obj
->pending_read_domains
|= reloc
.read_domains
;
1624 target_obj
->pending_write_domain
|= reloc
.write_domain
;
1626 /* If the relocation already has the right value in it, no
1627 * more work needs to be done.
1629 if (target_obj_priv
->gtt_offset
== reloc
.presumed_offset
) {
1630 drm_gem_object_unreference(target_obj
);
1634 /* Now that we're going to actually write some data in,
1635 * make sure that any rendering using this buffer's contents
1638 i915_gem_object_wait_rendering(obj
);
1640 /* As we're writing through the gtt, flush
1641 * any CPU writes before we write the relocations
1643 if (obj
->write_domain
& I915_GEM_DOMAIN_CPU
) {
1644 i915_gem_clflush_object(obj
);
1645 drm_agp_chipset_flush(dev
);
1646 obj
->write_domain
= 0;
1649 /* Map the page containing the relocation we're going to
1652 reloc_offset
= obj_priv
->gtt_offset
+ reloc
.offset
;
1653 reloc_page
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
1656 reloc_entry
= (uint32_t __iomem
*)(reloc_page
+
1657 (reloc_offset
& (PAGE_SIZE
- 1)));
1658 reloc_val
= target_obj_priv
->gtt_offset
+ reloc
.delta
;
1661 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1662 obj
, (unsigned int) reloc
.offset
,
1663 readl(reloc_entry
), reloc_val
);
1665 writel(reloc_val
, reloc_entry
);
1666 io_mapping_unmap_atomic(reloc_page
);
1668 /* Write the updated presumed offset for this entry back out
1671 reloc
.presumed_offset
= target_obj_priv
->gtt_offset
;
1672 ret
= copy_to_user(relocs
+ i
, &reloc
, sizeof(reloc
));
1674 drm_gem_object_unreference(target_obj
);
1675 i915_gem_object_unpin(obj
);
1679 drm_gem_object_unreference(target_obj
);
1684 i915_gem_dump_object(obj
, 128, __func__
, ~0);
1689 /** Dispatch a batchbuffer to the ring
1692 i915_dispatch_gem_execbuffer(struct drm_device
*dev
,
1693 struct drm_i915_gem_execbuffer
*exec
,
1694 uint64_t exec_offset
)
1696 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1697 struct drm_clip_rect __user
*boxes
= (struct drm_clip_rect __user
*)
1698 (uintptr_t) exec
->cliprects_ptr
;
1699 int nbox
= exec
->num_cliprects
;
1701 uint32_t exec_start
, exec_len
;
1704 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
1705 exec_len
= (uint32_t) exec
->batch_len
;
1707 if ((exec_start
| exec_len
) & 0x7) {
1708 DRM_ERROR("alignment\n");
1715 count
= nbox
? nbox
: 1;
1717 for (i
= 0; i
< count
; i
++) {
1719 int ret
= i915_emit_box(dev
, boxes
, i
,
1720 exec
->DR1
, exec
->DR4
);
1725 if (IS_I830(dev
) || IS_845G(dev
)) {
1727 OUT_RING(MI_BATCH_BUFFER
);
1728 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
1729 OUT_RING(exec_start
+ exec_len
- 4);
1734 if (IS_I965G(dev
)) {
1735 OUT_RING(MI_BATCH_BUFFER_START
|
1737 MI_BATCH_NON_SECURE_I965
);
1738 OUT_RING(exec_start
);
1740 OUT_RING(MI_BATCH_BUFFER_START
|
1742 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
1748 /* XXX breadcrumb */
1752 /* Throttle our rendering by waiting until the ring has completed our requests
1753 * emitted over 20 msec ago.
1755 * This should get us reasonable parallelism between CPU and GPU but also
1756 * relatively low latency when blocking on a particular request to finish.
1759 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file_priv
)
1761 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
1765 mutex_lock(&dev
->struct_mutex
);
1766 seqno
= i915_file_priv
->mm
.last_gem_throttle_seqno
;
1767 i915_file_priv
->mm
.last_gem_throttle_seqno
=
1768 i915_file_priv
->mm
.last_gem_seqno
;
1770 ret
= i915_wait_request(dev
, seqno
);
1771 mutex_unlock(&dev
->struct_mutex
);
1776 i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
1777 struct drm_file
*file_priv
)
1779 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1780 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
1781 struct drm_i915_gem_execbuffer
*args
= data
;
1782 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
1783 struct drm_gem_object
**object_list
= NULL
;
1784 struct drm_gem_object
*batch_obj
;
1785 int ret
, i
, pinned
= 0;
1786 uint64_t exec_offset
;
1787 uint32_t seqno
, flush_domains
;
1790 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1791 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
1794 if (args
->buffer_count
< 1) {
1795 DRM_ERROR("execbuf with %d buffers\n", args
->buffer_count
);
1798 /* Copy in the exec list from userland */
1799 exec_list
= drm_calloc(sizeof(*exec_list
), args
->buffer_count
,
1801 object_list
= drm_calloc(sizeof(*object_list
), args
->buffer_count
,
1803 if (exec_list
== NULL
|| object_list
== NULL
) {
1804 DRM_ERROR("Failed to allocate exec or object list "
1806 args
->buffer_count
);
1810 ret
= copy_from_user(exec_list
,
1811 (struct drm_i915_relocation_entry __user
*)
1812 (uintptr_t) args
->buffers_ptr
,
1813 sizeof(*exec_list
) * args
->buffer_count
);
1815 DRM_ERROR("copy %d exec entries failed %d\n",
1816 args
->buffer_count
, ret
);
1820 mutex_lock(&dev
->struct_mutex
);
1822 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1824 if (dev_priv
->mm
.wedged
) {
1825 DRM_ERROR("Execbuf while wedged\n");
1826 mutex_unlock(&dev
->struct_mutex
);
1830 if (dev_priv
->mm
.suspended
) {
1831 DRM_ERROR("Execbuf while VT-switched.\n");
1832 mutex_unlock(&dev
->struct_mutex
);
1836 /* Zero the gloabl flush/invalidate flags. These
1837 * will be modified as each object is bound to the
1840 dev
->invalidate_domains
= 0;
1841 dev
->flush_domains
= 0;
1843 /* Look up object handles and perform the relocations */
1844 for (i
= 0; i
< args
->buffer_count
; i
++) {
1845 object_list
[i
] = drm_gem_object_lookup(dev
, file_priv
,
1846 exec_list
[i
].handle
);
1847 if (object_list
[i
] == NULL
) {
1848 DRM_ERROR("Invalid object handle %d at index %d\n",
1849 exec_list
[i
].handle
, i
);
1854 object_list
[i
]->pending_read_domains
= 0;
1855 object_list
[i
]->pending_write_domain
= 0;
1856 ret
= i915_gem_object_pin_and_relocate(object_list
[i
],
1860 DRM_ERROR("object bind and relocate failed %d\n", ret
);
1866 /* Set the pending read domains for the batch buffer to COMMAND */
1867 batch_obj
= object_list
[args
->buffer_count
-1];
1868 batch_obj
->pending_read_domains
= I915_GEM_DOMAIN_COMMAND
;
1869 batch_obj
->pending_write_domain
= 0;
1871 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1873 for (i
= 0; i
< args
->buffer_count
; i
++) {
1874 struct drm_gem_object
*obj
= object_list
[i
];
1875 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1877 if (obj_priv
->gtt_space
== NULL
) {
1878 /* We evicted the buffer in the process of validating
1879 * our set of buffers in. We could try to recover by
1880 * kicking them everything out and trying again from
1887 /* make sure all previous memory operations have passed */
1888 ret
= i915_gem_object_set_domain(obj
,
1889 obj
->pending_read_domains
,
1890 obj
->pending_write_domain
);
1895 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1897 /* Flush/invalidate caches and chipset buffer */
1898 flush_domains
= i915_gem_dev_set_domain(dev
);
1900 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1903 for (i
= 0; i
< args
->buffer_count
; i
++) {
1904 i915_gem_object_check_coherency(object_list
[i
],
1905 exec_list
[i
].handle
);
1909 exec_offset
= exec_list
[args
->buffer_count
- 1].offset
;
1912 i915_gem_dump_object(object_list
[args
->buffer_count
- 1],
1918 (void)i915_add_request(dev
, flush_domains
);
1920 /* Exec the batchbuffer */
1921 ret
= i915_dispatch_gem_execbuffer(dev
, args
, exec_offset
);
1923 DRM_ERROR("dispatch failed %d\n", ret
);
1928 * Ensure that the commands in the batch buffer are
1929 * finished before the interrupt fires
1931 flush_domains
= i915_retire_commands(dev
);
1933 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1936 * Get a seqno representing the execution of the current buffer,
1937 * which we can wait on. We would like to mitigate these interrupts,
1938 * likely by only creating seqnos occasionally (so that we have
1939 * *some* interrupts representing completion of buffers that we can
1940 * wait on when trying to clear up gtt space).
1942 seqno
= i915_add_request(dev
, flush_domains
);
1944 i915_file_priv
->mm
.last_gem_seqno
= seqno
;
1945 for (i
= 0; i
< args
->buffer_count
; i
++) {
1946 struct drm_gem_object
*obj
= object_list
[i
];
1947 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1949 i915_gem_object_move_to_active(obj
);
1950 obj_priv
->last_rendering_seqno
= seqno
;
1952 DRM_INFO("%s: move to exec list %p\n", __func__
, obj
);
1956 i915_dump_lru(dev
, __func__
);
1959 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1961 /* Copy the new buffer offsets back to the user's exec list. */
1962 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
1963 (uintptr_t) args
->buffers_ptr
,
1965 sizeof(*exec_list
) * args
->buffer_count
);
1967 DRM_ERROR("failed to copy %d exec entries "
1968 "back to user (%d)\n",
1969 args
->buffer_count
, ret
);
1971 if (object_list
!= NULL
) {
1972 for (i
= 0; i
< pinned
; i
++)
1973 i915_gem_object_unpin(object_list
[i
]);
1975 for (i
= 0; i
< args
->buffer_count
; i
++)
1976 drm_gem_object_unreference(object_list
[i
]);
1978 mutex_unlock(&dev
->struct_mutex
);
1981 drm_free(object_list
, sizeof(*object_list
) * args
->buffer_count
,
1983 drm_free(exec_list
, sizeof(*exec_list
) * args
->buffer_count
,
1990 i915_gem_object_pin(struct drm_gem_object
*obj
, uint32_t alignment
)
1992 struct drm_device
*dev
= obj
->dev
;
1993 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1996 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1997 if (obj_priv
->gtt_space
== NULL
) {
1998 ret
= i915_gem_object_bind_to_gtt(obj
, alignment
);
2000 DRM_ERROR("Failure to bind: %d", ret
);
2004 obj_priv
->pin_count
++;
2006 /* If the object is not active and not pending a flush,
2007 * remove it from the inactive list
2009 if (obj_priv
->pin_count
== 1) {
2010 atomic_inc(&dev
->pin_count
);
2011 atomic_add(obj
->size
, &dev
->pin_memory
);
2012 if (!obj_priv
->active
&&
2013 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2014 I915_GEM_DOMAIN_GTT
)) == 0 &&
2015 !list_empty(&obj_priv
->list
))
2016 list_del_init(&obj_priv
->list
);
2018 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2024 i915_gem_object_unpin(struct drm_gem_object
*obj
)
2026 struct drm_device
*dev
= obj
->dev
;
2027 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2028 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2030 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2031 obj_priv
->pin_count
--;
2032 BUG_ON(obj_priv
->pin_count
< 0);
2033 BUG_ON(obj_priv
->gtt_space
== NULL
);
2035 /* If the object is no longer pinned, and is
2036 * neither active nor being flushed, then stick it on
2039 if (obj_priv
->pin_count
== 0) {
2040 if (!obj_priv
->active
&&
2041 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
2042 I915_GEM_DOMAIN_GTT
)) == 0)
2043 list_move_tail(&obj_priv
->list
,
2044 &dev_priv
->mm
.inactive_list
);
2045 atomic_dec(&dev
->pin_count
);
2046 atomic_sub(obj
->size
, &dev
->pin_memory
);
2048 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
2052 i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
2053 struct drm_file
*file_priv
)
2055 struct drm_i915_gem_pin
*args
= data
;
2056 struct drm_gem_object
*obj
;
2057 struct drm_i915_gem_object
*obj_priv
;
2060 mutex_lock(&dev
->struct_mutex
);
2062 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2064 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2066 mutex_unlock(&dev
->struct_mutex
);
2069 obj_priv
= obj
->driver_private
;
2071 ret
= i915_gem_object_pin(obj
, args
->alignment
);
2073 drm_gem_object_unreference(obj
);
2074 mutex_unlock(&dev
->struct_mutex
);
2078 /* XXX - flush the CPU caches for pinned objects
2079 * as the X server doesn't manage domains yet
2081 if (obj
->write_domain
& I915_GEM_DOMAIN_CPU
) {
2082 i915_gem_clflush_object(obj
);
2083 drm_agp_chipset_flush(dev
);
2084 obj
->write_domain
= 0;
2086 args
->offset
= obj_priv
->gtt_offset
;
2087 drm_gem_object_unreference(obj
);
2088 mutex_unlock(&dev
->struct_mutex
);
2094 i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
2095 struct drm_file
*file_priv
)
2097 struct drm_i915_gem_pin
*args
= data
;
2098 struct drm_gem_object
*obj
;
2100 mutex_lock(&dev
->struct_mutex
);
2102 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2104 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2106 mutex_unlock(&dev
->struct_mutex
);
2110 i915_gem_object_unpin(obj
);
2112 drm_gem_object_unreference(obj
);
2113 mutex_unlock(&dev
->struct_mutex
);
2118 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
2119 struct drm_file
*file_priv
)
2121 struct drm_i915_gem_busy
*args
= data
;
2122 struct drm_gem_object
*obj
;
2123 struct drm_i915_gem_object
*obj_priv
;
2125 mutex_lock(&dev
->struct_mutex
);
2126 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
2128 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2130 mutex_unlock(&dev
->struct_mutex
);
2134 obj_priv
= obj
->driver_private
;
2135 args
->busy
= obj_priv
->active
;
2137 drm_gem_object_unreference(obj
);
2138 mutex_unlock(&dev
->struct_mutex
);
2143 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
2144 struct drm_file
*file_priv
)
2146 return i915_gem_ring_throttle(dev
, file_priv
);
2149 int i915_gem_init_object(struct drm_gem_object
*obj
)
2151 struct drm_i915_gem_object
*obj_priv
;
2153 obj_priv
= drm_calloc(1, sizeof(*obj_priv
), DRM_MEM_DRIVER
);
2154 if (obj_priv
== NULL
)
2158 * We've just allocated pages from the kernel,
2159 * so they've just been written by the CPU with
2160 * zeros. They'll need to be clflushed before we
2161 * use them with the GPU.
2163 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
2164 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
2166 obj_priv
->agp_type
= AGP_USER_MEMORY
;
2168 obj
->driver_private
= obj_priv
;
2169 obj_priv
->obj
= obj
;
2170 INIT_LIST_HEAD(&obj_priv
->list
);
2174 void i915_gem_free_object(struct drm_gem_object
*obj
)
2176 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2178 while (obj_priv
->pin_count
> 0)
2179 i915_gem_object_unpin(obj
);
2181 i915_gem_object_unbind(obj
);
2183 drm_free(obj_priv
->page_cpu_valid
, 1, DRM_MEM_DRIVER
);
2184 drm_free(obj
->driver_private
, 1, DRM_MEM_DRIVER
);
2188 i915_gem_set_domain(struct drm_gem_object
*obj
,
2189 struct drm_file
*file_priv
,
2190 uint32_t read_domains
,
2191 uint32_t write_domain
)
2193 struct drm_device
*dev
= obj
->dev
;
2195 uint32_t flush_domains
;
2197 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
2199 ret
= i915_gem_object_set_domain(obj
, read_domains
, write_domain
);
2202 flush_domains
= i915_gem_dev_set_domain(obj
->dev
);
2204 if (flush_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
))
2205 (void) i915_add_request(dev
, flush_domains
);
2210 /** Unbinds all objects that are on the given buffer list. */
2212 i915_gem_evict_from_list(struct drm_device
*dev
, struct list_head
*head
)
2214 struct drm_gem_object
*obj
;
2215 struct drm_i915_gem_object
*obj_priv
;
2218 while (!list_empty(head
)) {
2219 obj_priv
= list_first_entry(head
,
2220 struct drm_i915_gem_object
,
2222 obj
= obj_priv
->obj
;
2224 if (obj_priv
->pin_count
!= 0) {
2225 DRM_ERROR("Pinned object in unbind list\n");
2226 mutex_unlock(&dev
->struct_mutex
);
2230 ret
= i915_gem_object_unbind(obj
);
2232 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2234 mutex_unlock(&dev
->struct_mutex
);
2244 i915_gem_idle(struct drm_device
*dev
)
2246 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2247 uint32_t seqno
, cur_seqno
, last_seqno
;
2250 mutex_lock(&dev
->struct_mutex
);
2252 if (dev_priv
->mm
.suspended
|| dev_priv
->ring
.ring_obj
== NULL
) {
2253 mutex_unlock(&dev
->struct_mutex
);
2257 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2258 * We need to replace this with a semaphore, or something.
2260 dev_priv
->mm
.suspended
= 1;
2262 /* Cancel the retire work handler, wait for it to finish if running
2264 mutex_unlock(&dev
->struct_mutex
);
2265 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2266 mutex_lock(&dev
->struct_mutex
);
2268 i915_kernel_lost_context(dev
);
2270 /* Flush the GPU along with all non-CPU write domains
2272 i915_gem_flush(dev
, ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
),
2273 ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
2274 seqno
= i915_add_request(dev
, ~(I915_GEM_DOMAIN_CPU
|
2275 I915_GEM_DOMAIN_GTT
));
2278 mutex_unlock(&dev
->struct_mutex
);
2282 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
2286 cur_seqno
= i915_get_gem_seqno(dev
);
2287 if (i915_seqno_passed(cur_seqno
, seqno
))
2289 if (last_seqno
== cur_seqno
) {
2290 if (stuck
++ > 100) {
2291 DRM_ERROR("hardware wedged\n");
2292 dev_priv
->mm
.wedged
= 1;
2293 DRM_WAKEUP(&dev_priv
->irq_queue
);
2298 last_seqno
= cur_seqno
;
2300 dev_priv
->mm
.waiting_gem_seqno
= 0;
2302 i915_gem_retire_requests(dev
);
2304 if (!dev_priv
->mm
.wedged
) {
2305 /* Active and flushing should now be empty as we've
2306 * waited for a sequence higher than any pending execbuffer
2308 WARN_ON(!list_empty(&dev_priv
->mm
.active_list
));
2309 WARN_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2310 /* Request should now be empty as we've also waited
2311 * for the last request in the list
2313 WARN_ON(!list_empty(&dev_priv
->mm
.request_list
));
2316 /* Empty the active and flushing lists to inactive. If there's
2317 * anything left at this point, it means that we're wedged and
2318 * nothing good's going to happen by leaving them there. So strip
2319 * the GPU domains and just stuff them onto inactive.
2321 while (!list_empty(&dev_priv
->mm
.active_list
)) {
2322 struct drm_i915_gem_object
*obj_priv
;
2324 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
2325 struct drm_i915_gem_object
,
2327 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
2328 i915_gem_object_move_to_inactive(obj_priv
->obj
);
2331 while (!list_empty(&dev_priv
->mm
.flushing_list
)) {
2332 struct drm_i915_gem_object
*obj_priv
;
2334 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
2335 struct drm_i915_gem_object
,
2337 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
2338 i915_gem_object_move_to_inactive(obj_priv
->obj
);
2342 /* Move all inactive buffers out of the GTT. */
2343 ret
= i915_gem_evict_from_list(dev
, &dev_priv
->mm
.inactive_list
);
2344 WARN_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
2346 mutex_unlock(&dev
->struct_mutex
);
2350 i915_gem_cleanup_ringbuffer(dev
);
2351 mutex_unlock(&dev
->struct_mutex
);
2357 i915_gem_init_hws(struct drm_device
*dev
)
2359 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2360 struct drm_gem_object
*obj
;
2361 struct drm_i915_gem_object
*obj_priv
;
2364 /* If we need a physical address for the status page, it's already
2365 * initialized at driver load time.
2367 if (!I915_NEED_GFX_HWS(dev
))
2370 obj
= drm_gem_object_alloc(dev
, 4096);
2372 DRM_ERROR("Failed to allocate status page\n");
2375 obj_priv
= obj
->driver_private
;
2376 obj_priv
->agp_type
= AGP_USER_CACHED_MEMORY
;
2378 ret
= i915_gem_object_pin(obj
, 4096);
2380 drm_gem_object_unreference(obj
);
2384 dev_priv
->status_gfx_addr
= obj_priv
->gtt_offset
;
2386 dev_priv
->hw_status_page
= kmap(obj_priv
->page_list
[0]);
2387 if (dev_priv
->hw_status_page
== NULL
) {
2388 DRM_ERROR("Failed to map status page.\n");
2389 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
2390 drm_gem_object_unreference(obj
);
2393 dev_priv
->hws_obj
= obj
;
2394 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
2395 I915_WRITE(HWS_PGA
, dev_priv
->status_gfx_addr
);
2396 I915_READ(HWS_PGA
); /* posting read */
2397 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv
->status_gfx_addr
);
2403 i915_gem_init_ringbuffer(struct drm_device
*dev
)
2405 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2406 struct drm_gem_object
*obj
;
2407 struct drm_i915_gem_object
*obj_priv
;
2411 ret
= i915_gem_init_hws(dev
);
2415 obj
= drm_gem_object_alloc(dev
, 128 * 1024);
2417 DRM_ERROR("Failed to allocate ringbuffer\n");
2420 obj_priv
= obj
->driver_private
;
2422 ret
= i915_gem_object_pin(obj
, 4096);
2424 drm_gem_object_unreference(obj
);
2428 /* Set up the kernel mapping for the ring. */
2429 dev_priv
->ring
.Size
= obj
->size
;
2430 dev_priv
->ring
.tail_mask
= obj
->size
- 1;
2432 dev_priv
->ring
.map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
2433 dev_priv
->ring
.map
.size
= obj
->size
;
2434 dev_priv
->ring
.map
.type
= 0;
2435 dev_priv
->ring
.map
.flags
= 0;
2436 dev_priv
->ring
.map
.mtrr
= 0;
2438 drm_core_ioremap_wc(&dev_priv
->ring
.map
, dev
);
2439 if (dev_priv
->ring
.map
.handle
== NULL
) {
2440 DRM_ERROR("Failed to map ringbuffer.\n");
2441 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
2442 drm_gem_object_unreference(obj
);
2445 dev_priv
->ring
.ring_obj
= obj
;
2446 dev_priv
->ring
.virtual_start
= dev_priv
->ring
.map
.handle
;
2448 /* Stop the ring if it's running. */
2449 I915_WRITE(PRB0_CTL
, 0);
2450 I915_WRITE(PRB0_TAIL
, 0);
2451 I915_WRITE(PRB0_HEAD
, 0);
2453 /* Initialize the ring. */
2454 I915_WRITE(PRB0_START
, obj_priv
->gtt_offset
);
2455 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
2457 /* G45 ring initialization fails to reset head to zero */
2459 DRM_ERROR("Ring head not reset to zero "
2460 "ctl %08x head %08x tail %08x start %08x\n",
2461 I915_READ(PRB0_CTL
),
2462 I915_READ(PRB0_HEAD
),
2463 I915_READ(PRB0_TAIL
),
2464 I915_READ(PRB0_START
));
2465 I915_WRITE(PRB0_HEAD
, 0);
2467 DRM_ERROR("Ring head forced to zero "
2468 "ctl %08x head %08x tail %08x start %08x\n",
2469 I915_READ(PRB0_CTL
),
2470 I915_READ(PRB0_HEAD
),
2471 I915_READ(PRB0_TAIL
),
2472 I915_READ(PRB0_START
));
2475 I915_WRITE(PRB0_CTL
,
2476 ((obj
->size
- 4096) & RING_NR_PAGES
) |
2480 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
2482 /* If the head is still not zero, the ring is dead */
2484 DRM_ERROR("Ring initialization failed "
2485 "ctl %08x head %08x tail %08x start %08x\n",
2486 I915_READ(PRB0_CTL
),
2487 I915_READ(PRB0_HEAD
),
2488 I915_READ(PRB0_TAIL
),
2489 I915_READ(PRB0_START
));
2493 /* Update our cache of the ring state */
2494 i915_kernel_lost_context(dev
);
2500 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
)
2502 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2504 if (dev_priv
->ring
.ring_obj
== NULL
)
2507 drm_core_ioremapfree(&dev_priv
->ring
.map
, dev
);
2509 i915_gem_object_unpin(dev_priv
->ring
.ring_obj
);
2510 drm_gem_object_unreference(dev_priv
->ring
.ring_obj
);
2511 dev_priv
->ring
.ring_obj
= NULL
;
2512 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
2514 if (dev_priv
->hws_obj
!= NULL
) {
2515 struct drm_gem_object
*obj
= dev_priv
->hws_obj
;
2516 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2518 kunmap(obj_priv
->page_list
[0]);
2519 i915_gem_object_unpin(obj
);
2520 drm_gem_object_unreference(obj
);
2521 dev_priv
->hws_obj
= NULL
;
2522 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
2523 dev_priv
->hw_status_page
= NULL
;
2525 /* Write high address into HWS_PGA when disabling. */
2526 I915_WRITE(HWS_PGA
, 0x1ffff000);
2531 i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
2532 struct drm_file
*file_priv
)
2534 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2537 if (dev_priv
->mm
.wedged
) {
2538 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2539 dev_priv
->mm
.wedged
= 0;
2542 ret
= i915_gem_init_ringbuffer(dev
);
2546 dev_priv
->mm
.gtt_mapping
= io_mapping_create_wc(dev
->agp
->base
,
2547 dev
->agp
->agp_info
.aper_size
2550 mutex_lock(&dev
->struct_mutex
);
2551 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
2552 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
2553 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
2554 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
2555 dev_priv
->mm
.suspended
= 0;
2556 mutex_unlock(&dev
->struct_mutex
);
2558 drm_irq_install(dev
);
2564 i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
2565 struct drm_file
*file_priv
)
2567 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2570 ret
= i915_gem_idle(dev
);
2571 drm_irq_uninstall(dev
);
2573 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2578 i915_gem_lastclose(struct drm_device
*dev
)
2582 ret
= i915_gem_idle(dev
);
2584 DRM_ERROR("failed to idle hardware: %d\n", ret
);
2588 i915_gem_load(struct drm_device
*dev
)
2590 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2592 INIT_LIST_HEAD(&dev_priv
->mm
.active_list
);
2593 INIT_LIST_HEAD(&dev_priv
->mm
.flushing_list
);
2594 INIT_LIST_HEAD(&dev_priv
->mm
.inactive_list
);
2595 INIT_LIST_HEAD(&dev_priv
->mm
.request_list
);
2596 INIT_DELAYED_WORK(&dev_priv
->mm
.retire_work
,
2597 i915_gem_retire_work_handler
);
2598 dev_priv
->mm
.next_gem_seqno
= 1;
2600 i915_gem_detect_bit_6_swizzle(dev
);