2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
33 #include <linux/pci.h>
35 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
37 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
);
38 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
);
39 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
);
40 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
,
42 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
45 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
);
46 static int i915_gem_object_wait_rendering(struct drm_gem_object
*obj
);
47 static int i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
,
49 static int i915_gem_object_get_fence_reg(struct drm_gem_object
*obj
, bool write
);
50 static void i915_gem_clear_fence_reg(struct drm_gem_object
*obj
);
51 static int i915_gem_evict_something(struct drm_device
*dev
);
52 static int i915_gem_phys_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
53 struct drm_i915_gem_pwrite
*args
,
54 struct drm_file
*file_priv
);
56 int i915_gem_do_init(struct drm_device
*dev
, unsigned long start
,
59 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
62 (start
& (PAGE_SIZE
- 1)) != 0 ||
63 (end
& (PAGE_SIZE
- 1)) != 0) {
67 drm_mm_init(&dev_priv
->mm
.gtt_space
, start
,
70 dev
->gtt_total
= (uint32_t) (end
- start
);
76 i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
77 struct drm_file
*file_priv
)
79 struct drm_i915_gem_init
*args
= data
;
82 mutex_lock(&dev
->struct_mutex
);
83 ret
= i915_gem_do_init(dev
, args
->gtt_start
, args
->gtt_end
);
84 mutex_unlock(&dev
->struct_mutex
);
90 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
91 struct drm_file
*file_priv
)
93 struct drm_i915_gem_get_aperture
*args
= data
;
95 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
98 args
->aper_size
= dev
->gtt_total
;
99 args
->aper_available_size
= (args
->aper_size
-
100 atomic_read(&dev
->pin_memory
));
107 * Creates a new mm object and returns a handle to it.
110 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
111 struct drm_file
*file_priv
)
113 struct drm_i915_gem_create
*args
= data
;
114 struct drm_gem_object
*obj
;
117 args
->size
= roundup(args
->size
, PAGE_SIZE
);
119 /* Allocate the new object */
120 obj
= drm_gem_object_alloc(dev
, args
->size
);
124 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
125 mutex_lock(&dev
->struct_mutex
);
126 drm_gem_object_handle_unreference(obj
);
127 mutex_unlock(&dev
->struct_mutex
);
132 args
->handle
= handle
;
138 fast_shmem_read(struct page
**pages
,
139 loff_t page_base
, int page_offset
,
146 vaddr
= kmap_atomic(pages
[page_base
>> PAGE_SHIFT
], KM_USER0
);
149 unwritten
= __copy_to_user_inatomic(data
, vaddr
+ page_offset
, length
);
150 kunmap_atomic(vaddr
, KM_USER0
);
158 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object
*obj
)
160 drm_i915_private_t
*dev_priv
= obj
->dev
->dev_private
;
161 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
163 return dev_priv
->mm
.bit_6_swizzle_x
== I915_BIT_6_SWIZZLE_9_10_17
&&
164 obj_priv
->tiling_mode
!= I915_TILING_NONE
;
168 slow_shmem_copy(struct page
*dst_page
,
170 struct page
*src_page
,
174 char *dst_vaddr
, *src_vaddr
;
176 dst_vaddr
= kmap_atomic(dst_page
, KM_USER0
);
177 if (dst_vaddr
== NULL
)
180 src_vaddr
= kmap_atomic(src_page
, KM_USER1
);
181 if (src_vaddr
== NULL
) {
182 kunmap_atomic(dst_vaddr
, KM_USER0
);
186 memcpy(dst_vaddr
+ dst_offset
, src_vaddr
+ src_offset
, length
);
188 kunmap_atomic(src_vaddr
, KM_USER1
);
189 kunmap_atomic(dst_vaddr
, KM_USER0
);
195 slow_shmem_bit17_copy(struct page
*gpu_page
,
197 struct page
*cpu_page
,
202 char *gpu_vaddr
, *cpu_vaddr
;
204 /* Use the unswizzled path if this page isn't affected. */
205 if ((page_to_phys(gpu_page
) & (1 << 17)) == 0) {
207 return slow_shmem_copy(cpu_page
, cpu_offset
,
208 gpu_page
, gpu_offset
, length
);
210 return slow_shmem_copy(gpu_page
, gpu_offset
,
211 cpu_page
, cpu_offset
, length
);
214 gpu_vaddr
= kmap_atomic(gpu_page
, KM_USER0
);
215 if (gpu_vaddr
== NULL
)
218 cpu_vaddr
= kmap_atomic(cpu_page
, KM_USER1
);
219 if (cpu_vaddr
== NULL
) {
220 kunmap_atomic(gpu_vaddr
, KM_USER0
);
224 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225 * XORing with the other bits (A9 for Y, A9 and A10 for X)
228 int cacheline_end
= ALIGN(gpu_offset
+ 1, 64);
229 int this_length
= min(cacheline_end
- gpu_offset
, length
);
230 int swizzled_gpu_offset
= gpu_offset
^ 64;
233 memcpy(cpu_vaddr
+ cpu_offset
,
234 gpu_vaddr
+ swizzled_gpu_offset
,
237 memcpy(gpu_vaddr
+ swizzled_gpu_offset
,
238 cpu_vaddr
+ cpu_offset
,
241 cpu_offset
+= this_length
;
242 gpu_offset
+= this_length
;
243 length
-= this_length
;
246 kunmap_atomic(cpu_vaddr
, KM_USER1
);
247 kunmap_atomic(gpu_vaddr
, KM_USER0
);
253 * This is the fast shmem pread path, which attempts to copy_from_user directly
254 * from the backing pages of the object to the user's address space. On a
255 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
258 i915_gem_shmem_pread_fast(struct drm_device
*dev
, struct drm_gem_object
*obj
,
259 struct drm_i915_gem_pread
*args
,
260 struct drm_file
*file_priv
)
262 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
264 loff_t offset
, page_base
;
265 char __user
*user_data
;
266 int page_offset
, page_length
;
269 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
272 mutex_lock(&dev
->struct_mutex
);
274 ret
= i915_gem_object_get_pages(obj
);
278 ret
= i915_gem_object_set_cpu_read_domain_range(obj
, args
->offset
,
283 obj_priv
= obj
->driver_private
;
284 offset
= args
->offset
;
287 /* Operation in this page
289 * page_base = page offset within aperture
290 * page_offset = offset within page
291 * page_length = bytes to copy for this page
293 page_base
= (offset
& ~(PAGE_SIZE
-1));
294 page_offset
= offset
& (PAGE_SIZE
-1);
295 page_length
= remain
;
296 if ((page_offset
+ remain
) > PAGE_SIZE
)
297 page_length
= PAGE_SIZE
- page_offset
;
299 ret
= fast_shmem_read(obj_priv
->pages
,
300 page_base
, page_offset
,
301 user_data
, page_length
);
305 remain
-= page_length
;
306 user_data
+= page_length
;
307 offset
+= page_length
;
311 i915_gem_object_put_pages(obj
);
313 mutex_unlock(&dev
->struct_mutex
);
319 * This is the fallback shmem pread path, which allocates temporary storage
320 * in kernel space to copy_to_user into outside of the struct_mutex, so we
321 * can copy out of the object's backing pages while holding the struct mutex
322 * and not take page faults.
325 i915_gem_shmem_pread_slow(struct drm_device
*dev
, struct drm_gem_object
*obj
,
326 struct drm_i915_gem_pread
*args
,
327 struct drm_file
*file_priv
)
329 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
330 struct mm_struct
*mm
= current
->mm
;
331 struct page
**user_pages
;
333 loff_t offset
, pinned_pages
, i
;
334 loff_t first_data_page
, last_data_page
, num_pages
;
335 int shmem_page_index
, shmem_page_offset
;
336 int data_page_index
, data_page_offset
;
339 uint64_t data_ptr
= args
->data_ptr
;
340 int do_bit17_swizzling
;
344 /* Pin the user pages containing the data. We can't fault while
345 * holding the struct mutex, yet we want to hold it while
346 * dereferencing the user data.
348 first_data_page
= data_ptr
/ PAGE_SIZE
;
349 last_data_page
= (data_ptr
+ args
->size
- 1) / PAGE_SIZE
;
350 num_pages
= last_data_page
- first_data_page
+ 1;
352 user_pages
= drm_calloc_large(num_pages
, sizeof(struct page
*));
353 if (user_pages
== NULL
)
356 down_read(&mm
->mmap_sem
);
357 pinned_pages
= get_user_pages(current
, mm
, (uintptr_t)args
->data_ptr
,
358 num_pages
, 1, 0, user_pages
, NULL
);
359 up_read(&mm
->mmap_sem
);
360 if (pinned_pages
< num_pages
) {
362 goto fail_put_user_pages
;
365 do_bit17_swizzling
= i915_gem_object_needs_bit17_swizzle(obj
);
367 mutex_lock(&dev
->struct_mutex
);
369 ret
= i915_gem_object_get_pages(obj
);
373 ret
= i915_gem_object_set_cpu_read_domain_range(obj
, args
->offset
,
378 obj_priv
= obj
->driver_private
;
379 offset
= args
->offset
;
382 /* Operation in this page
384 * shmem_page_index = page number within shmem file
385 * shmem_page_offset = offset within page in shmem file
386 * data_page_index = page number in get_user_pages return
387 * data_page_offset = offset with data_page_index page.
388 * page_length = bytes to copy for this page
390 shmem_page_index
= offset
/ PAGE_SIZE
;
391 shmem_page_offset
= offset
& ~PAGE_MASK
;
392 data_page_index
= data_ptr
/ PAGE_SIZE
- first_data_page
;
393 data_page_offset
= data_ptr
& ~PAGE_MASK
;
395 page_length
= remain
;
396 if ((shmem_page_offset
+ page_length
) > PAGE_SIZE
)
397 page_length
= PAGE_SIZE
- shmem_page_offset
;
398 if ((data_page_offset
+ page_length
) > PAGE_SIZE
)
399 page_length
= PAGE_SIZE
- data_page_offset
;
401 if (do_bit17_swizzling
) {
402 ret
= slow_shmem_bit17_copy(obj_priv
->pages
[shmem_page_index
],
404 user_pages
[data_page_index
],
409 ret
= slow_shmem_copy(user_pages
[data_page_index
],
411 obj_priv
->pages
[shmem_page_index
],
418 remain
-= page_length
;
419 data_ptr
+= page_length
;
420 offset
+= page_length
;
424 i915_gem_object_put_pages(obj
);
426 mutex_unlock(&dev
->struct_mutex
);
428 for (i
= 0; i
< pinned_pages
; i
++) {
429 SetPageDirty(user_pages
[i
]);
430 page_cache_release(user_pages
[i
]);
432 drm_free_large(user_pages
);
438 * Reads data from the object referenced by handle.
440 * On error, the contents of *data are undefined.
443 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
444 struct drm_file
*file_priv
)
446 struct drm_i915_gem_pread
*args
= data
;
447 struct drm_gem_object
*obj
;
448 struct drm_i915_gem_object
*obj_priv
;
451 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
454 obj_priv
= obj
->driver_private
;
456 /* Bounds check source.
458 * XXX: This could use review for overflow issues...
460 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
461 args
->offset
+ args
->size
> obj
->size
) {
462 drm_gem_object_unreference(obj
);
466 if (i915_gem_object_needs_bit17_swizzle(obj
)) {
467 ret
= i915_gem_shmem_pread_slow(dev
, obj
, args
, file_priv
);
469 ret
= i915_gem_shmem_pread_fast(dev
, obj
, args
, file_priv
);
471 ret
= i915_gem_shmem_pread_slow(dev
, obj
, args
,
475 drm_gem_object_unreference(obj
);
480 /* This is the fast write path which cannot handle
481 * page faults in the source data
485 fast_user_write(struct io_mapping
*mapping
,
486 loff_t page_base
, int page_offset
,
487 char __user
*user_data
,
491 unsigned long unwritten
;
493 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
);
494 unwritten
= __copy_from_user_inatomic_nocache(vaddr_atomic
+ page_offset
,
496 io_mapping_unmap_atomic(vaddr_atomic
);
502 /* Here's the write path which can sleep for
507 slow_kernel_write(struct io_mapping
*mapping
,
508 loff_t gtt_base
, int gtt_offset
,
509 struct page
*user_page
, int user_offset
,
512 char *src_vaddr
, *dst_vaddr
;
513 unsigned long unwritten
;
515 dst_vaddr
= io_mapping_map_atomic_wc(mapping
, gtt_base
);
516 src_vaddr
= kmap_atomic(user_page
, KM_USER1
);
517 unwritten
= __copy_from_user_inatomic_nocache(dst_vaddr
+ gtt_offset
,
518 src_vaddr
+ user_offset
,
520 kunmap_atomic(src_vaddr
, KM_USER1
);
521 io_mapping_unmap_atomic(dst_vaddr
);
528 fast_shmem_write(struct page
**pages
,
529 loff_t page_base
, int page_offset
,
534 unsigned long unwritten
;
536 vaddr
= kmap_atomic(pages
[page_base
>> PAGE_SHIFT
], KM_USER0
);
539 unwritten
= __copy_from_user_inatomic(vaddr
+ page_offset
, data
, length
);
540 kunmap_atomic(vaddr
, KM_USER0
);
548 * This is the fast pwrite path, where we copy the data directly from the
549 * user into the GTT, uncached.
552 i915_gem_gtt_pwrite_fast(struct drm_device
*dev
, struct drm_gem_object
*obj
,
553 struct drm_i915_gem_pwrite
*args
,
554 struct drm_file
*file_priv
)
556 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
557 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
559 loff_t offset
, page_base
;
560 char __user
*user_data
;
561 int page_offset
, page_length
;
564 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
566 if (!access_ok(VERIFY_READ
, user_data
, remain
))
570 mutex_lock(&dev
->struct_mutex
);
571 ret
= i915_gem_object_pin(obj
, 0);
573 mutex_unlock(&dev
->struct_mutex
);
576 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
580 obj_priv
= obj
->driver_private
;
581 offset
= obj_priv
->gtt_offset
+ args
->offset
;
584 /* Operation in this page
586 * page_base = page offset within aperture
587 * page_offset = offset within page
588 * page_length = bytes to copy for this page
590 page_base
= (offset
& ~(PAGE_SIZE
-1));
591 page_offset
= offset
& (PAGE_SIZE
-1);
592 page_length
= remain
;
593 if ((page_offset
+ remain
) > PAGE_SIZE
)
594 page_length
= PAGE_SIZE
- page_offset
;
596 ret
= fast_user_write (dev_priv
->mm
.gtt_mapping
, page_base
,
597 page_offset
, user_data
, page_length
);
599 /* If we get a fault while copying data, then (presumably) our
600 * source page isn't available. Return the error and we'll
601 * retry in the slow path.
606 remain
-= page_length
;
607 user_data
+= page_length
;
608 offset
+= page_length
;
612 i915_gem_object_unpin(obj
);
613 mutex_unlock(&dev
->struct_mutex
);
619 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
620 * the memory and maps it using kmap_atomic for copying.
622 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
623 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
626 i915_gem_gtt_pwrite_slow(struct drm_device
*dev
, struct drm_gem_object
*obj
,
627 struct drm_i915_gem_pwrite
*args
,
628 struct drm_file
*file_priv
)
630 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
631 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
633 loff_t gtt_page_base
, offset
;
634 loff_t first_data_page
, last_data_page
, num_pages
;
635 loff_t pinned_pages
, i
;
636 struct page
**user_pages
;
637 struct mm_struct
*mm
= current
->mm
;
638 int gtt_page_offset
, data_page_offset
, data_page_index
, page_length
;
640 uint64_t data_ptr
= args
->data_ptr
;
644 /* Pin the user pages containing the data. We can't fault while
645 * holding the struct mutex, and all of the pwrite implementations
646 * want to hold it while dereferencing the user data.
648 first_data_page
= data_ptr
/ PAGE_SIZE
;
649 last_data_page
= (data_ptr
+ args
->size
- 1) / PAGE_SIZE
;
650 num_pages
= last_data_page
- first_data_page
+ 1;
652 user_pages
= drm_calloc_large(num_pages
, sizeof(struct page
*));
653 if (user_pages
== NULL
)
656 down_read(&mm
->mmap_sem
);
657 pinned_pages
= get_user_pages(current
, mm
, (uintptr_t)args
->data_ptr
,
658 num_pages
, 0, 0, user_pages
, NULL
);
659 up_read(&mm
->mmap_sem
);
660 if (pinned_pages
< num_pages
) {
662 goto out_unpin_pages
;
665 mutex_lock(&dev
->struct_mutex
);
666 ret
= i915_gem_object_pin(obj
, 0);
670 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
672 goto out_unpin_object
;
674 obj_priv
= obj
->driver_private
;
675 offset
= obj_priv
->gtt_offset
+ args
->offset
;
678 /* Operation in this page
680 * gtt_page_base = page offset within aperture
681 * gtt_page_offset = offset within page in aperture
682 * data_page_index = page number in get_user_pages return
683 * data_page_offset = offset with data_page_index page.
684 * page_length = bytes to copy for this page
686 gtt_page_base
= offset
& PAGE_MASK
;
687 gtt_page_offset
= offset
& ~PAGE_MASK
;
688 data_page_index
= data_ptr
/ PAGE_SIZE
- first_data_page
;
689 data_page_offset
= data_ptr
& ~PAGE_MASK
;
691 page_length
= remain
;
692 if ((gtt_page_offset
+ page_length
) > PAGE_SIZE
)
693 page_length
= PAGE_SIZE
- gtt_page_offset
;
694 if ((data_page_offset
+ page_length
) > PAGE_SIZE
)
695 page_length
= PAGE_SIZE
- data_page_offset
;
697 ret
= slow_kernel_write(dev_priv
->mm
.gtt_mapping
,
698 gtt_page_base
, gtt_page_offset
,
699 user_pages
[data_page_index
],
703 /* If we get a fault while copying data, then (presumably) our
704 * source page isn't available. Return the error and we'll
705 * retry in the slow path.
708 goto out_unpin_object
;
710 remain
-= page_length
;
711 offset
+= page_length
;
712 data_ptr
+= page_length
;
716 i915_gem_object_unpin(obj
);
718 mutex_unlock(&dev
->struct_mutex
);
720 for (i
= 0; i
< pinned_pages
; i
++)
721 page_cache_release(user_pages
[i
]);
722 drm_free_large(user_pages
);
728 * This is the fast shmem pwrite path, which attempts to directly
729 * copy_from_user into the kmapped pages backing the object.
732 i915_gem_shmem_pwrite_fast(struct drm_device
*dev
, struct drm_gem_object
*obj
,
733 struct drm_i915_gem_pwrite
*args
,
734 struct drm_file
*file_priv
)
736 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
738 loff_t offset
, page_base
;
739 char __user
*user_data
;
740 int page_offset
, page_length
;
743 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
746 mutex_lock(&dev
->struct_mutex
);
748 ret
= i915_gem_object_get_pages(obj
);
752 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
756 obj_priv
= obj
->driver_private
;
757 offset
= args
->offset
;
761 /* Operation in this page
763 * page_base = page offset within aperture
764 * page_offset = offset within page
765 * page_length = bytes to copy for this page
767 page_base
= (offset
& ~(PAGE_SIZE
-1));
768 page_offset
= offset
& (PAGE_SIZE
-1);
769 page_length
= remain
;
770 if ((page_offset
+ remain
) > PAGE_SIZE
)
771 page_length
= PAGE_SIZE
- page_offset
;
773 ret
= fast_shmem_write(obj_priv
->pages
,
774 page_base
, page_offset
,
775 user_data
, page_length
);
779 remain
-= page_length
;
780 user_data
+= page_length
;
781 offset
+= page_length
;
785 i915_gem_object_put_pages(obj
);
787 mutex_unlock(&dev
->struct_mutex
);
793 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
794 * the memory and maps it using kmap_atomic for copying.
796 * This avoids taking mmap_sem for faulting on the user's address while the
797 * struct_mutex is held.
800 i915_gem_shmem_pwrite_slow(struct drm_device
*dev
, struct drm_gem_object
*obj
,
801 struct drm_i915_gem_pwrite
*args
,
802 struct drm_file
*file_priv
)
804 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
805 struct mm_struct
*mm
= current
->mm
;
806 struct page
**user_pages
;
808 loff_t offset
, pinned_pages
, i
;
809 loff_t first_data_page
, last_data_page
, num_pages
;
810 int shmem_page_index
, shmem_page_offset
;
811 int data_page_index
, data_page_offset
;
814 uint64_t data_ptr
= args
->data_ptr
;
815 int do_bit17_swizzling
;
819 /* Pin the user pages containing the data. We can't fault while
820 * holding the struct mutex, and all of the pwrite implementations
821 * want to hold it while dereferencing the user data.
823 first_data_page
= data_ptr
/ PAGE_SIZE
;
824 last_data_page
= (data_ptr
+ args
->size
- 1) / PAGE_SIZE
;
825 num_pages
= last_data_page
- first_data_page
+ 1;
827 user_pages
= drm_calloc_large(num_pages
, sizeof(struct page
*));
828 if (user_pages
== NULL
)
831 down_read(&mm
->mmap_sem
);
832 pinned_pages
= get_user_pages(current
, mm
, (uintptr_t)args
->data_ptr
,
833 num_pages
, 0, 0, user_pages
, NULL
);
834 up_read(&mm
->mmap_sem
);
835 if (pinned_pages
< num_pages
) {
837 goto fail_put_user_pages
;
840 do_bit17_swizzling
= i915_gem_object_needs_bit17_swizzle(obj
);
842 mutex_lock(&dev
->struct_mutex
);
844 ret
= i915_gem_object_get_pages(obj
);
848 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
852 obj_priv
= obj
->driver_private
;
853 offset
= args
->offset
;
857 /* Operation in this page
859 * shmem_page_index = page number within shmem file
860 * shmem_page_offset = offset within page in shmem file
861 * data_page_index = page number in get_user_pages return
862 * data_page_offset = offset with data_page_index page.
863 * page_length = bytes to copy for this page
865 shmem_page_index
= offset
/ PAGE_SIZE
;
866 shmem_page_offset
= offset
& ~PAGE_MASK
;
867 data_page_index
= data_ptr
/ PAGE_SIZE
- first_data_page
;
868 data_page_offset
= data_ptr
& ~PAGE_MASK
;
870 page_length
= remain
;
871 if ((shmem_page_offset
+ page_length
) > PAGE_SIZE
)
872 page_length
= PAGE_SIZE
- shmem_page_offset
;
873 if ((data_page_offset
+ page_length
) > PAGE_SIZE
)
874 page_length
= PAGE_SIZE
- data_page_offset
;
876 if (do_bit17_swizzling
) {
877 ret
= slow_shmem_bit17_copy(obj_priv
->pages
[shmem_page_index
],
879 user_pages
[data_page_index
],
884 ret
= slow_shmem_copy(obj_priv
->pages
[shmem_page_index
],
886 user_pages
[data_page_index
],
893 remain
-= page_length
;
894 data_ptr
+= page_length
;
895 offset
+= page_length
;
899 i915_gem_object_put_pages(obj
);
901 mutex_unlock(&dev
->struct_mutex
);
903 for (i
= 0; i
< pinned_pages
; i
++)
904 page_cache_release(user_pages
[i
]);
905 drm_free_large(user_pages
);
911 * Writes data to the object referenced by handle.
913 * On error, the contents of the buffer that were to be modified are undefined.
916 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
917 struct drm_file
*file_priv
)
919 struct drm_i915_gem_pwrite
*args
= data
;
920 struct drm_gem_object
*obj
;
921 struct drm_i915_gem_object
*obj_priv
;
924 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
927 obj_priv
= obj
->driver_private
;
929 /* Bounds check destination.
931 * XXX: This could use review for overflow issues...
933 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
934 args
->offset
+ args
->size
> obj
->size
) {
935 drm_gem_object_unreference(obj
);
939 /* We can only do the GTT pwrite on untiled buffers, as otherwise
940 * it would end up going through the fenced access, and we'll get
941 * different detiling behavior between reading and writing.
942 * pread/pwrite currently are reading and writing from the CPU
943 * perspective, requiring manual detiling by the client.
945 if (obj_priv
->phys_obj
)
946 ret
= i915_gem_phys_pwrite(dev
, obj
, args
, file_priv
);
947 else if (obj_priv
->tiling_mode
== I915_TILING_NONE
&&
948 dev
->gtt_total
!= 0) {
949 ret
= i915_gem_gtt_pwrite_fast(dev
, obj
, args
, file_priv
);
950 if (ret
== -EFAULT
) {
951 ret
= i915_gem_gtt_pwrite_slow(dev
, obj
, args
,
954 } else if (i915_gem_object_needs_bit17_swizzle(obj
)) {
955 ret
= i915_gem_shmem_pwrite_slow(dev
, obj
, args
, file_priv
);
957 ret
= i915_gem_shmem_pwrite_fast(dev
, obj
, args
, file_priv
);
958 if (ret
== -EFAULT
) {
959 ret
= i915_gem_shmem_pwrite_slow(dev
, obj
, args
,
966 DRM_INFO("pwrite failed %d\n", ret
);
969 drm_gem_object_unreference(obj
);
975 * Called when user space prepares to use an object with the CPU, either
976 * through the mmap ioctl's mapping or a GTT mapping.
979 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
980 struct drm_file
*file_priv
)
982 struct drm_i915_gem_set_domain
*args
= data
;
983 struct drm_gem_object
*obj
;
984 uint32_t read_domains
= args
->read_domains
;
985 uint32_t write_domain
= args
->write_domain
;
988 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
991 /* Only handle setting domains to types used by the CPU. */
992 if (write_domain
& I915_GEM_GPU_DOMAINS
)
995 if (read_domains
& I915_GEM_GPU_DOMAINS
)
998 /* Having something in the write domain implies it's in the read
999 * domain, and only that read domain. Enforce that in the request.
1001 if (write_domain
!= 0 && read_domains
!= write_domain
)
1004 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
1008 mutex_lock(&dev
->struct_mutex
);
1010 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
1011 obj
, obj
->size
, read_domains
, write_domain
);
1013 if (read_domains
& I915_GEM_DOMAIN_GTT
) {
1014 ret
= i915_gem_object_set_to_gtt_domain(obj
, write_domain
!= 0);
1016 /* Silently promote "you're not bound, there was nothing to do"
1017 * to success, since the client was just asking us to
1018 * make sure everything was done.
1023 ret
= i915_gem_object_set_to_cpu_domain(obj
, write_domain
!= 0);
1026 drm_gem_object_unreference(obj
);
1027 mutex_unlock(&dev
->struct_mutex
);
1032 * Called when user space has done writes to this buffer
1035 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
1036 struct drm_file
*file_priv
)
1038 struct drm_i915_gem_sw_finish
*args
= data
;
1039 struct drm_gem_object
*obj
;
1040 struct drm_i915_gem_object
*obj_priv
;
1043 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
1046 mutex_lock(&dev
->struct_mutex
);
1047 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
1049 mutex_unlock(&dev
->struct_mutex
);
1054 DRM_INFO("%s: sw_finish %d (%p %d)\n",
1055 __func__
, args
->handle
, obj
, obj
->size
);
1057 obj_priv
= obj
->driver_private
;
1059 /* Pinned buffers may be scanout, so flush the cache */
1060 if (obj_priv
->pin_count
)
1061 i915_gem_object_flush_cpu_write_domain(obj
);
1063 drm_gem_object_unreference(obj
);
1064 mutex_unlock(&dev
->struct_mutex
);
1069 * Maps the contents of an object, returning the address it is mapped
1072 * While the mapping holds a reference on the contents of the object, it doesn't
1073 * imply a ref on the object itself.
1076 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
1077 struct drm_file
*file_priv
)
1079 struct drm_i915_gem_mmap
*args
= data
;
1080 struct drm_gem_object
*obj
;
1084 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
1087 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
1091 offset
= args
->offset
;
1093 down_write(¤t
->mm
->mmap_sem
);
1094 addr
= do_mmap(obj
->filp
, 0, args
->size
,
1095 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1097 up_write(¤t
->mm
->mmap_sem
);
1098 mutex_lock(&dev
->struct_mutex
);
1099 drm_gem_object_unreference(obj
);
1100 mutex_unlock(&dev
->struct_mutex
);
1101 if (IS_ERR((void *)addr
))
1104 args
->addr_ptr
= (uint64_t) addr
;
1110 * i915_gem_fault - fault a page into the GTT
1111 * vma: VMA in question
1114 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1115 * from userspace. The fault handler takes care of binding the object to
1116 * the GTT (if needed), allocating and programming a fence register (again,
1117 * only if needed based on whether the old reg is still valid or the object
1118 * is tiled) and inserting a new PTE into the faulting process.
1120 * Note that the faulting process may involve evicting existing objects
1121 * from the GTT and/or fence registers to make room. So performance may
1122 * suffer if the GTT working set is large or there are few fence registers
1125 int i915_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1127 struct drm_gem_object
*obj
= vma
->vm_private_data
;
1128 struct drm_device
*dev
= obj
->dev
;
1129 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1130 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1131 pgoff_t page_offset
;
1134 bool write
= !!(vmf
->flags
& FAULT_FLAG_WRITE
);
1136 /* We don't use vmf->pgoff since that has the fake offset */
1137 page_offset
= ((unsigned long)vmf
->virtual_address
- vma
->vm_start
) >>
1140 /* Now bind it into the GTT if needed */
1141 mutex_lock(&dev
->struct_mutex
);
1142 if (!obj_priv
->gtt_space
) {
1143 ret
= i915_gem_object_bind_to_gtt(obj
, obj_priv
->gtt_alignment
);
1145 mutex_unlock(&dev
->struct_mutex
);
1146 return VM_FAULT_SIGBUS
;
1149 ret
= i915_gem_object_set_to_gtt_domain(obj
, write
);
1151 mutex_unlock(&dev
->struct_mutex
);
1152 return VM_FAULT_SIGBUS
;
1155 list_add_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
1158 /* Need a new fence register? */
1159 if (obj_priv
->fence_reg
== I915_FENCE_REG_NONE
&&
1160 obj_priv
->tiling_mode
!= I915_TILING_NONE
) {
1161 ret
= i915_gem_object_get_fence_reg(obj
, write
);
1163 mutex_unlock(&dev
->struct_mutex
);
1164 return VM_FAULT_SIGBUS
;
1168 pfn
= ((dev
->agp
->base
+ obj_priv
->gtt_offset
) >> PAGE_SHIFT
) +
1171 /* Finally, remap it using the new GTT offset */
1172 ret
= vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
1174 mutex_unlock(&dev
->struct_mutex
);
1179 return VM_FAULT_OOM
;
1182 return VM_FAULT_SIGBUS
;
1184 return VM_FAULT_NOPAGE
;
1189 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1190 * @obj: obj in question
1192 * GEM memory mapping works by handing back to userspace a fake mmap offset
1193 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1194 * up the object based on the offset and sets up the various memory mapping
1197 * This routine allocates and attaches a fake offset for @obj.
1200 i915_gem_create_mmap_offset(struct drm_gem_object
*obj
)
1202 struct drm_device
*dev
= obj
->dev
;
1203 struct drm_gem_mm
*mm
= dev
->mm_private
;
1204 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1205 struct drm_map_list
*list
;
1206 struct drm_local_map
*map
;
1209 /* Set the object up for mmap'ing */
1210 list
= &obj
->map_list
;
1211 list
->map
= drm_calloc(1, sizeof(struct drm_map_list
),
1217 map
->type
= _DRM_GEM
;
1218 map
->size
= obj
->size
;
1221 /* Get a DRM GEM mmap offset allocated... */
1222 list
->file_offset_node
= drm_mm_search_free(&mm
->offset_manager
,
1223 obj
->size
/ PAGE_SIZE
, 0, 0);
1224 if (!list
->file_offset_node
) {
1225 DRM_ERROR("failed to allocate offset for bo %d\n", obj
->name
);
1230 list
->file_offset_node
= drm_mm_get_block(list
->file_offset_node
,
1231 obj
->size
/ PAGE_SIZE
, 0);
1232 if (!list
->file_offset_node
) {
1237 list
->hash
.key
= list
->file_offset_node
->start
;
1238 if (drm_ht_insert_item(&mm
->offset_hash
, &list
->hash
)) {
1239 DRM_ERROR("failed to add to map hash\n");
1243 /* By now we should be all set, any drm_mmap request on the offset
1244 * below will get to our mmap & fault handler */
1245 obj_priv
->mmap_offset
= ((uint64_t) list
->hash
.key
) << PAGE_SHIFT
;
1250 drm_mm_put_block(list
->file_offset_node
);
1252 drm_free(list
->map
, sizeof(struct drm_map_list
), DRM_MEM_DRIVER
);
1258 i915_gem_free_mmap_offset(struct drm_gem_object
*obj
)
1260 struct drm_device
*dev
= obj
->dev
;
1261 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1262 struct drm_gem_mm
*mm
= dev
->mm_private
;
1263 struct drm_map_list
*list
;
1265 list
= &obj
->map_list
;
1266 drm_ht_remove_item(&mm
->offset_hash
, &list
->hash
);
1268 if (list
->file_offset_node
) {
1269 drm_mm_put_block(list
->file_offset_node
);
1270 list
->file_offset_node
= NULL
;
1274 drm_free(list
->map
, sizeof(struct drm_map
), DRM_MEM_DRIVER
);
1278 obj_priv
->mmap_offset
= 0;
1282 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1283 * @obj: object to check
1285 * Return the required GTT alignment for an object, taking into account
1286 * potential fence register mapping if needed.
1289 i915_gem_get_gtt_alignment(struct drm_gem_object
*obj
)
1291 struct drm_device
*dev
= obj
->dev
;
1292 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1296 * Minimum alignment is 4k (GTT page size), but might be greater
1297 * if a fence register is needed for the object.
1299 if (IS_I965G(dev
) || obj_priv
->tiling_mode
== I915_TILING_NONE
)
1303 * Previous chips need to be aligned to the size of the smallest
1304 * fence register that can contain the object.
1311 for (i
= start
; i
< obj
->size
; i
<<= 1)
1318 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1320 * @data: GTT mapping ioctl data
1321 * @file_priv: GEM object info
1323 * Simply returns the fake offset to userspace so it can mmap it.
1324 * The mmap call will end up in drm_gem_mmap(), which will set things
1325 * up so we can get faults in the handler above.
1327 * The fault handler will take care of binding the object into the GTT
1328 * (since it may have been evicted to make room for something), allocating
1329 * a fence register, and mapping the appropriate aperture address into
1333 i915_gem_mmap_gtt_ioctl(struct drm_device
*dev
, void *data
,
1334 struct drm_file
*file_priv
)
1336 struct drm_i915_gem_mmap_gtt
*args
= data
;
1337 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1338 struct drm_gem_object
*obj
;
1339 struct drm_i915_gem_object
*obj_priv
;
1342 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
1345 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
1349 mutex_lock(&dev
->struct_mutex
);
1351 obj_priv
= obj
->driver_private
;
1353 if (!obj_priv
->mmap_offset
) {
1354 ret
= i915_gem_create_mmap_offset(obj
);
1356 drm_gem_object_unreference(obj
);
1357 mutex_unlock(&dev
->struct_mutex
);
1362 args
->offset
= obj_priv
->mmap_offset
;
1364 obj_priv
->gtt_alignment
= i915_gem_get_gtt_alignment(obj
);
1366 /* Make sure the alignment is correct for fence regs etc */
1367 if (obj_priv
->agp_mem
&&
1368 (obj_priv
->gtt_offset
& (obj_priv
->gtt_alignment
- 1))) {
1369 drm_gem_object_unreference(obj
);
1370 mutex_unlock(&dev
->struct_mutex
);
1375 * Pull it into the GTT so that we have a page list (makes the
1376 * initial fault faster and any subsequent flushing possible).
1378 if (!obj_priv
->agp_mem
) {
1379 ret
= i915_gem_object_bind_to_gtt(obj
, obj_priv
->gtt_alignment
);
1381 drm_gem_object_unreference(obj
);
1382 mutex_unlock(&dev
->struct_mutex
);
1385 list_add_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
1388 drm_gem_object_unreference(obj
);
1389 mutex_unlock(&dev
->struct_mutex
);
1395 i915_gem_object_put_pages(struct drm_gem_object
*obj
)
1397 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1398 int page_count
= obj
->size
/ PAGE_SIZE
;
1401 BUG_ON(obj_priv
->pages_refcount
== 0);
1403 if (--obj_priv
->pages_refcount
!= 0)
1406 if (obj_priv
->tiling_mode
!= I915_TILING_NONE
)
1407 i915_gem_object_save_bit_17_swizzle(obj
);
1409 for (i
= 0; i
< page_count
; i
++)
1410 if (obj_priv
->pages
[i
] != NULL
) {
1411 if (obj_priv
->dirty
)
1412 set_page_dirty(obj_priv
->pages
[i
]);
1413 mark_page_accessed(obj_priv
->pages
[i
]);
1414 page_cache_release(obj_priv
->pages
[i
]);
1416 obj_priv
->dirty
= 0;
1418 drm_free_large(obj_priv
->pages
);
1419 obj_priv
->pages
= NULL
;
1423 i915_gem_object_move_to_active(struct drm_gem_object
*obj
, uint32_t seqno
)
1425 struct drm_device
*dev
= obj
->dev
;
1426 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1427 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1429 /* Add a reference if we're newly entering the active list. */
1430 if (!obj_priv
->active
) {
1431 drm_gem_object_reference(obj
);
1432 obj_priv
->active
= 1;
1434 /* Move from whatever list we were on to the tail of execution. */
1435 spin_lock(&dev_priv
->mm
.active_list_lock
);
1436 list_move_tail(&obj_priv
->list
,
1437 &dev_priv
->mm
.active_list
);
1438 spin_unlock(&dev_priv
->mm
.active_list_lock
);
1439 obj_priv
->last_rendering_seqno
= seqno
;
1443 i915_gem_object_move_to_flushing(struct drm_gem_object
*obj
)
1445 struct drm_device
*dev
= obj
->dev
;
1446 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1447 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1449 BUG_ON(!obj_priv
->active
);
1450 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.flushing_list
);
1451 obj_priv
->last_rendering_seqno
= 0;
1455 i915_gem_object_move_to_inactive(struct drm_gem_object
*obj
)
1457 struct drm_device
*dev
= obj
->dev
;
1458 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1459 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1461 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1462 if (obj_priv
->pin_count
!= 0)
1463 list_del_init(&obj_priv
->list
);
1465 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
1467 obj_priv
->last_rendering_seqno
= 0;
1468 if (obj_priv
->active
) {
1469 obj_priv
->active
= 0;
1470 drm_gem_object_unreference(obj
);
1472 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1476 * Creates a new sequence number, emitting a write of it to the status page
1477 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1479 * Must be called with struct_lock held.
1481 * Returned sequence numbers are nonzero on success.
1484 i915_add_request(struct drm_device
*dev
, struct drm_file
*file_priv
,
1485 uint32_t flush_domains
)
1487 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1488 struct drm_i915_file_private
*i915_file_priv
= NULL
;
1489 struct drm_i915_gem_request
*request
;
1494 if (file_priv
!= NULL
)
1495 i915_file_priv
= file_priv
->driver_priv
;
1497 request
= drm_calloc(1, sizeof(*request
), DRM_MEM_DRIVER
);
1498 if (request
== NULL
)
1501 /* Grab the seqno we're going to make this request be, and bump the
1502 * next (skipping 0 so it can be the reserved no-seqno value).
1504 seqno
= dev_priv
->mm
.next_gem_seqno
;
1505 dev_priv
->mm
.next_gem_seqno
++;
1506 if (dev_priv
->mm
.next_gem_seqno
== 0)
1507 dev_priv
->mm
.next_gem_seqno
++;
1510 OUT_RING(MI_STORE_DWORD_INDEX
);
1511 OUT_RING(I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
1514 OUT_RING(MI_USER_INTERRUPT
);
1517 DRM_DEBUG("%d\n", seqno
);
1519 request
->seqno
= seqno
;
1520 request
->emitted_jiffies
= jiffies
;
1521 was_empty
= list_empty(&dev_priv
->mm
.request_list
);
1522 list_add_tail(&request
->list
, &dev_priv
->mm
.request_list
);
1523 if (i915_file_priv
) {
1524 list_add_tail(&request
->client_list
,
1525 &i915_file_priv
->mm
.request_list
);
1527 INIT_LIST_HEAD(&request
->client_list
);
1530 /* Associate any objects on the flushing list matching the write
1531 * domain we're flushing with our flush.
1533 if (flush_domains
!= 0) {
1534 struct drm_i915_gem_object
*obj_priv
, *next
;
1536 list_for_each_entry_safe(obj_priv
, next
,
1537 &dev_priv
->mm
.flushing_list
, list
) {
1538 struct drm_gem_object
*obj
= obj_priv
->obj
;
1540 if ((obj
->write_domain
& flush_domains
) ==
1541 obj
->write_domain
) {
1542 obj
->write_domain
= 0;
1543 i915_gem_object_move_to_active(obj
, seqno
);
1549 if (was_empty
&& !dev_priv
->mm
.suspended
)
1550 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
1555 * Command execution barrier
1557 * Ensures that all commands in the ring are finished
1558 * before signalling the CPU
1561 i915_retire_commands(struct drm_device
*dev
)
1563 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1564 uint32_t cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
1565 uint32_t flush_domains
= 0;
1568 /* The sampler always gets flushed on i965 (sigh) */
1570 flush_domains
|= I915_GEM_DOMAIN_SAMPLER
;
1573 OUT_RING(0); /* noop */
1575 return flush_domains
;
1579 * Moves buffers associated only with the given active seqno from the active
1580 * to inactive list, potentially freeing them.
1583 i915_gem_retire_request(struct drm_device
*dev
,
1584 struct drm_i915_gem_request
*request
)
1586 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1588 /* Move any buffers on the active list that are no longer referenced
1589 * by the ringbuffer to the flushing/inactive lists as appropriate.
1591 spin_lock(&dev_priv
->mm
.active_list_lock
);
1592 while (!list_empty(&dev_priv
->mm
.active_list
)) {
1593 struct drm_gem_object
*obj
;
1594 struct drm_i915_gem_object
*obj_priv
;
1596 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
1597 struct drm_i915_gem_object
,
1599 obj
= obj_priv
->obj
;
1601 /* If the seqno being retired doesn't match the oldest in the
1602 * list, then the oldest in the list must still be newer than
1605 if (obj_priv
->last_rendering_seqno
!= request
->seqno
)
1609 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1610 __func__
, request
->seqno
, obj
);
1613 if (obj
->write_domain
!= 0)
1614 i915_gem_object_move_to_flushing(obj
);
1616 /* Take a reference on the object so it won't be
1617 * freed while the spinlock is held. The list
1618 * protection for this spinlock is safe when breaking
1619 * the lock like this since the next thing we do
1620 * is just get the head of the list again.
1622 drm_gem_object_reference(obj
);
1623 i915_gem_object_move_to_inactive(obj
);
1624 spin_unlock(&dev_priv
->mm
.active_list_lock
);
1625 drm_gem_object_unreference(obj
);
1626 spin_lock(&dev_priv
->mm
.active_list_lock
);
1630 spin_unlock(&dev_priv
->mm
.active_list_lock
);
1634 * Returns true if seq1 is later than seq2.
1637 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
1639 return (int32_t)(seq1
- seq2
) >= 0;
1643 i915_get_gem_seqno(struct drm_device
*dev
)
1645 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1647 return READ_HWSP(dev_priv
, I915_GEM_HWS_INDEX
);
1651 * This function clears the request list as sequence numbers are passed.
1654 i915_gem_retire_requests(struct drm_device
*dev
)
1656 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1659 if (!dev_priv
->hw_status_page
)
1662 seqno
= i915_get_gem_seqno(dev
);
1664 while (!list_empty(&dev_priv
->mm
.request_list
)) {
1665 struct drm_i915_gem_request
*request
;
1666 uint32_t retiring_seqno
;
1668 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1669 struct drm_i915_gem_request
,
1671 retiring_seqno
= request
->seqno
;
1673 if (i915_seqno_passed(seqno
, retiring_seqno
) ||
1674 dev_priv
->mm
.wedged
) {
1675 i915_gem_retire_request(dev
, request
);
1677 list_del(&request
->list
);
1678 list_del(&request
->client_list
);
1679 drm_free(request
, sizeof(*request
), DRM_MEM_DRIVER
);
1686 i915_gem_retire_work_handler(struct work_struct
*work
)
1688 drm_i915_private_t
*dev_priv
;
1689 struct drm_device
*dev
;
1691 dev_priv
= container_of(work
, drm_i915_private_t
,
1692 mm
.retire_work
.work
);
1693 dev
= dev_priv
->dev
;
1695 mutex_lock(&dev
->struct_mutex
);
1696 i915_gem_retire_requests(dev
);
1697 if (!dev_priv
->mm
.suspended
&&
1698 !list_empty(&dev_priv
->mm
.request_list
))
1699 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
1700 mutex_unlock(&dev
->struct_mutex
);
1704 * Waits for a sequence number to be signaled, and cleans up the
1705 * request and object lists appropriately for that event.
1708 i915_wait_request(struct drm_device
*dev
, uint32_t seqno
)
1710 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1716 if (!i915_seqno_passed(i915_get_gem_seqno(dev
), seqno
)) {
1718 ier
= I915_READ(DEIER
) | I915_READ(GTIER
);
1720 ier
= I915_READ(IER
);
1722 DRM_ERROR("something (likely vbetool) disabled "
1723 "interrupts, re-enabling\n");
1724 i915_driver_irq_preinstall(dev
);
1725 i915_driver_irq_postinstall(dev
);
1728 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
1729 i915_user_irq_get(dev
);
1730 ret
= wait_event_interruptible(dev_priv
->irq_queue
,
1731 i915_seqno_passed(i915_get_gem_seqno(dev
),
1733 dev_priv
->mm
.wedged
);
1734 i915_user_irq_put(dev
);
1735 dev_priv
->mm
.waiting_gem_seqno
= 0;
1737 if (dev_priv
->mm
.wedged
)
1740 if (ret
&& ret
!= -ERESTARTSYS
)
1741 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1742 __func__
, ret
, seqno
, i915_get_gem_seqno(dev
));
1744 /* Directly dispatch request retiring. While we have the work queue
1745 * to handle this, the waiter on a request often wants an associated
1746 * buffer to have made it to the inactive list, and we would need
1747 * a separate wait queue to handle that.
1750 i915_gem_retire_requests(dev
);
1756 i915_gem_flush(struct drm_device
*dev
,
1757 uint32_t invalidate_domains
,
1758 uint32_t flush_domains
)
1760 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1765 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__
,
1766 invalidate_domains
, flush_domains
);
1769 if (flush_domains
& I915_GEM_DOMAIN_CPU
)
1770 drm_agp_chipset_flush(dev
);
1772 if ((invalidate_domains
| flush_domains
) & I915_GEM_GPU_DOMAINS
) {
1774 * read/write caches:
1776 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1777 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1778 * also flushed at 2d versus 3d pipeline switches.
1782 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1783 * MI_READ_FLUSH is set, and is always flushed on 965.
1785 * I915_GEM_DOMAIN_COMMAND may not exist?
1787 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1788 * invalidated when MI_EXE_FLUSH is set.
1790 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1791 * invalidated with every MI_FLUSH.
1795 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1796 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1797 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1798 * are flushed at any MI_FLUSH.
1801 cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
1802 if ((invalidate_domains
|flush_domains
) &
1803 I915_GEM_DOMAIN_RENDER
)
1804 cmd
&= ~MI_NO_WRITE_FLUSH
;
1805 if (!IS_I965G(dev
)) {
1807 * On the 965, the sampler cache always gets flushed
1808 * and this bit is reserved.
1810 if (invalidate_domains
& I915_GEM_DOMAIN_SAMPLER
)
1811 cmd
|= MI_READ_FLUSH
;
1813 if (invalidate_domains
& I915_GEM_DOMAIN_INSTRUCTION
)
1814 cmd
|= MI_EXE_FLUSH
;
1817 DRM_INFO("%s: queue flush %08x to ring\n", __func__
, cmd
);
1821 OUT_RING(0); /* noop */
1827 * Ensures that all rendering to the object has completed and the object is
1828 * safe to unbind from the GTT or access from the CPU.
1831 i915_gem_object_wait_rendering(struct drm_gem_object
*obj
)
1833 struct drm_device
*dev
= obj
->dev
;
1834 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1837 /* This function only exists to support waiting for existing rendering,
1838 * not for emitting required flushes.
1840 BUG_ON((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) != 0);
1842 /* If there is rendering queued on the buffer being evicted, wait for
1845 if (obj_priv
->active
) {
1847 DRM_INFO("%s: object %p wait for seqno %08x\n",
1848 __func__
, obj
, obj_priv
->last_rendering_seqno
);
1850 ret
= i915_wait_request(dev
, obj_priv
->last_rendering_seqno
);
1859 * Unbinds an object from the GTT aperture.
1862 i915_gem_object_unbind(struct drm_gem_object
*obj
)
1864 struct drm_device
*dev
= obj
->dev
;
1865 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1870 DRM_INFO("%s:%d %p\n", __func__
, __LINE__
, obj
);
1871 DRM_INFO("gtt_space %p\n", obj_priv
->gtt_space
);
1873 if (obj_priv
->gtt_space
== NULL
)
1876 if (obj_priv
->pin_count
!= 0) {
1877 DRM_ERROR("Attempting to unbind pinned buffer\n");
1881 /* Move the object to the CPU domain to ensure that
1882 * any possible CPU writes while it's not in the GTT
1883 * are flushed when we go to remap it. This will
1884 * also ensure that all pending GPU writes are finished
1887 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
1889 if (ret
!= -ERESTARTSYS
)
1890 DRM_ERROR("set_domain failed: %d\n", ret
);
1894 if (obj_priv
->agp_mem
!= NULL
) {
1895 drm_unbind_agp(obj_priv
->agp_mem
);
1896 drm_free_agp(obj_priv
->agp_mem
, obj
->size
/ PAGE_SIZE
);
1897 obj_priv
->agp_mem
= NULL
;
1900 BUG_ON(obj_priv
->active
);
1902 /* blow away mappings if mapped through GTT */
1903 offset
= ((loff_t
) obj
->map_list
.hash
.key
) << PAGE_SHIFT
;
1904 if (dev
->dev_mapping
)
1905 unmap_mapping_range(dev
->dev_mapping
, offset
, obj
->size
, 1);
1907 if (obj_priv
->fence_reg
!= I915_FENCE_REG_NONE
)
1908 i915_gem_clear_fence_reg(obj
);
1910 i915_gem_object_put_pages(obj
);
1912 if (obj_priv
->gtt_space
) {
1913 atomic_dec(&dev
->gtt_count
);
1914 atomic_sub(obj
->size
, &dev
->gtt_memory
);
1916 drm_mm_put_block(obj_priv
->gtt_space
);
1917 obj_priv
->gtt_space
= NULL
;
1920 /* Remove ourselves from the LRU list if present. */
1921 if (!list_empty(&obj_priv
->list
))
1922 list_del_init(&obj_priv
->list
);
1928 i915_gem_evict_something(struct drm_device
*dev
)
1930 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1931 struct drm_gem_object
*obj
;
1932 struct drm_i915_gem_object
*obj_priv
;
1936 /* If there's an inactive buffer available now, grab it
1939 if (!list_empty(&dev_priv
->mm
.inactive_list
)) {
1940 obj_priv
= list_first_entry(&dev_priv
->mm
.inactive_list
,
1941 struct drm_i915_gem_object
,
1943 obj
= obj_priv
->obj
;
1944 BUG_ON(obj_priv
->pin_count
!= 0);
1946 DRM_INFO("%s: evicting %p\n", __func__
, obj
);
1948 BUG_ON(obj_priv
->active
);
1950 /* Wait on the rendering and unbind the buffer. */
1951 ret
= i915_gem_object_unbind(obj
);
1955 /* If we didn't get anything, but the ring is still processing
1956 * things, wait for one of those things to finish and hopefully
1957 * leave us a buffer to evict.
1959 if (!list_empty(&dev_priv
->mm
.request_list
)) {
1960 struct drm_i915_gem_request
*request
;
1962 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1963 struct drm_i915_gem_request
,
1966 ret
= i915_wait_request(dev
, request
->seqno
);
1970 /* if waiting caused an object to become inactive,
1971 * then loop around and wait for it. Otherwise, we
1972 * assume that waiting freed and unbound something,
1973 * so there should now be some space in the GTT
1975 if (!list_empty(&dev_priv
->mm
.inactive_list
))
1980 /* If we didn't have anything on the request list but there
1981 * are buffers awaiting a flush, emit one and try again.
1982 * When we wait on it, those buffers waiting for that flush
1983 * will get moved to inactive.
1985 if (!list_empty(&dev_priv
->mm
.flushing_list
)) {
1986 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
1987 struct drm_i915_gem_object
,
1989 obj
= obj_priv
->obj
;
1994 i915_add_request(dev
, NULL
, obj
->write_domain
);
2000 DRM_ERROR("inactive empty %d request empty %d "
2001 "flushing empty %d\n",
2002 list_empty(&dev_priv
->mm
.inactive_list
),
2003 list_empty(&dev_priv
->mm
.request_list
),
2004 list_empty(&dev_priv
->mm
.flushing_list
));
2005 /* If we didn't do any of the above, there's nothing to be done
2006 * and we just can't fit it in.
2014 i915_gem_evict_everything(struct drm_device
*dev
)
2019 ret
= i915_gem_evict_something(dev
);
2029 i915_gem_object_get_pages(struct drm_gem_object
*obj
)
2031 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2033 struct address_space
*mapping
;
2034 struct inode
*inode
;
2038 if (obj_priv
->pages_refcount
++ != 0)
2041 /* Get the list of pages out of our struct file. They'll be pinned
2042 * at this point until we release them.
2044 page_count
= obj
->size
/ PAGE_SIZE
;
2045 BUG_ON(obj_priv
->pages
!= NULL
);
2046 obj_priv
->pages
= drm_calloc_large(page_count
, sizeof(struct page
*));
2047 if (obj_priv
->pages
== NULL
) {
2048 DRM_ERROR("Faled to allocate page list\n");
2049 obj_priv
->pages_refcount
--;
2053 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
2054 mapping
= inode
->i_mapping
;
2055 for (i
= 0; i
< page_count
; i
++) {
2056 page
= read_mapping_page(mapping
, i
, NULL
);
2058 ret
= PTR_ERR(page
);
2059 DRM_ERROR("read_mapping_page failed: %d\n", ret
);
2060 i915_gem_object_put_pages(obj
);
2063 obj_priv
->pages
[i
] = page
;
2066 if (obj_priv
->tiling_mode
!= I915_TILING_NONE
)
2067 i915_gem_object_do_bit_17_swizzle(obj
);
2072 static void i965_write_fence_reg(struct drm_i915_fence_reg
*reg
)
2074 struct drm_gem_object
*obj
= reg
->obj
;
2075 struct drm_device
*dev
= obj
->dev
;
2076 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2077 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2078 int regnum
= obj_priv
->fence_reg
;
2081 val
= (uint64_t)((obj_priv
->gtt_offset
+ obj
->size
- 4096) &
2083 val
|= obj_priv
->gtt_offset
& 0xfffff000;
2084 val
|= ((obj_priv
->stride
/ 128) - 1) << I965_FENCE_PITCH_SHIFT
;
2085 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
2086 val
|= 1 << I965_FENCE_TILING_Y_SHIFT
;
2087 val
|= I965_FENCE_REG_VALID
;
2089 I915_WRITE64(FENCE_REG_965_0
+ (regnum
* 8), val
);
2092 static void i915_write_fence_reg(struct drm_i915_fence_reg
*reg
)
2094 struct drm_gem_object
*obj
= reg
->obj
;
2095 struct drm_device
*dev
= obj
->dev
;
2096 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2097 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2098 int regnum
= obj_priv
->fence_reg
;
2100 uint32_t fence_reg
, val
;
2103 if ((obj_priv
->gtt_offset
& ~I915_FENCE_START_MASK
) ||
2104 (obj_priv
->gtt_offset
& (obj
->size
- 1))) {
2105 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2106 __func__
, obj_priv
->gtt_offset
, obj
->size
);
2110 if (obj_priv
->tiling_mode
== I915_TILING_Y
&&
2111 HAS_128_BYTE_Y_TILING(dev
))
2116 /* Note: pitch better be a power of two tile widths */
2117 pitch_val
= obj_priv
->stride
/ tile_width
;
2118 pitch_val
= ffs(pitch_val
) - 1;
2120 val
= obj_priv
->gtt_offset
;
2121 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
2122 val
|= 1 << I830_FENCE_TILING_Y_SHIFT
;
2123 val
|= I915_FENCE_SIZE_BITS(obj
->size
);
2124 val
|= pitch_val
<< I830_FENCE_PITCH_SHIFT
;
2125 val
|= I830_FENCE_REG_VALID
;
2128 fence_reg
= FENCE_REG_830_0
+ (regnum
* 4);
2130 fence_reg
= FENCE_REG_945_8
+ ((regnum
- 8) * 4);
2131 I915_WRITE(fence_reg
, val
);
2134 static void i830_write_fence_reg(struct drm_i915_fence_reg
*reg
)
2136 struct drm_gem_object
*obj
= reg
->obj
;
2137 struct drm_device
*dev
= obj
->dev
;
2138 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2139 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2140 int regnum
= obj_priv
->fence_reg
;
2143 uint32_t fence_size_bits
;
2145 if ((obj_priv
->gtt_offset
& ~I830_FENCE_START_MASK
) ||
2146 (obj_priv
->gtt_offset
& (obj
->size
- 1))) {
2147 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2148 __func__
, obj_priv
->gtt_offset
);
2152 pitch_val
= obj_priv
->stride
/ 128;
2153 pitch_val
= ffs(pitch_val
) - 1;
2154 WARN_ON(pitch_val
> I830_FENCE_MAX_PITCH_VAL
);
2156 val
= obj_priv
->gtt_offset
;
2157 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
2158 val
|= 1 << I830_FENCE_TILING_Y_SHIFT
;
2159 fence_size_bits
= I830_FENCE_SIZE_BITS(obj
->size
);
2160 WARN_ON(fence_size_bits
& ~0x00000f00);
2161 val
|= fence_size_bits
;
2162 val
|= pitch_val
<< I830_FENCE_PITCH_SHIFT
;
2163 val
|= I830_FENCE_REG_VALID
;
2165 I915_WRITE(FENCE_REG_830_0
+ (regnum
* 4), val
);
2170 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2171 * @obj: object to map through a fence reg
2172 * @write: object is about to be written
2174 * When mapping objects through the GTT, userspace wants to be able to write
2175 * to them without having to worry about swizzling if the object is tiled.
2177 * This function walks the fence regs looking for a free one for @obj,
2178 * stealing one if it can't find any.
2180 * It then sets up the reg based on the object's properties: address, pitch
2181 * and tiling format.
2184 i915_gem_object_get_fence_reg(struct drm_gem_object
*obj
, bool write
)
2186 struct drm_device
*dev
= obj
->dev
;
2187 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2188 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2189 struct drm_i915_fence_reg
*reg
= NULL
;
2190 struct drm_i915_gem_object
*old_obj_priv
= NULL
;
2193 switch (obj_priv
->tiling_mode
) {
2194 case I915_TILING_NONE
:
2195 WARN(1, "allocating a fence for non-tiled object?\n");
2198 if (!obj_priv
->stride
)
2200 WARN((obj_priv
->stride
& (512 - 1)),
2201 "object 0x%08x is X tiled but has non-512B pitch\n",
2202 obj_priv
->gtt_offset
);
2205 if (!obj_priv
->stride
)
2207 WARN((obj_priv
->stride
& (128 - 1)),
2208 "object 0x%08x is Y tiled but has non-128B pitch\n",
2209 obj_priv
->gtt_offset
);
2213 /* First try to find a free reg */
2216 for (i
= dev_priv
->fence_reg_start
; i
< dev_priv
->num_fence_regs
; i
++) {
2217 reg
= &dev_priv
->fence_regs
[i
];
2221 old_obj_priv
= reg
->obj
->driver_private
;
2222 if (!old_obj_priv
->pin_count
)
2226 /* None available, try to steal one or wait for a user to finish */
2227 if (i
== dev_priv
->num_fence_regs
) {
2228 uint32_t seqno
= dev_priv
->mm
.next_gem_seqno
;
2234 for (i
= dev_priv
->fence_reg_start
;
2235 i
< dev_priv
->num_fence_regs
; i
++) {
2236 uint32_t this_seqno
;
2238 reg
= &dev_priv
->fence_regs
[i
];
2239 old_obj_priv
= reg
->obj
->driver_private
;
2241 if (old_obj_priv
->pin_count
)
2244 /* i915 uses fences for GPU access to tiled buffers */
2245 if (IS_I965G(dev
) || !old_obj_priv
->active
)
2248 /* find the seqno of the first available fence */
2249 this_seqno
= old_obj_priv
->last_rendering_seqno
;
2250 if (this_seqno
!= 0 &&
2251 reg
->obj
->write_domain
== 0 &&
2252 i915_seqno_passed(seqno
, this_seqno
))
2257 * Now things get ugly... we have to wait for one of the
2258 * objects to finish before trying again.
2260 if (i
== dev_priv
->num_fence_regs
) {
2261 if (seqno
== dev_priv
->mm
.next_gem_seqno
) {
2263 I915_GEM_GPU_DOMAINS
,
2264 I915_GEM_GPU_DOMAINS
);
2265 seqno
= i915_add_request(dev
, NULL
,
2266 I915_GEM_GPU_DOMAINS
);
2271 ret
= i915_wait_request(dev
, seqno
);
2278 * Zap this virtual mapping so we can set up a fence again
2279 * for this object next time we need it.
2281 offset
= ((loff_t
) reg
->obj
->map_list
.hash
.key
) << PAGE_SHIFT
;
2282 if (dev
->dev_mapping
)
2283 unmap_mapping_range(dev
->dev_mapping
, offset
,
2285 old_obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
2288 obj_priv
->fence_reg
= i
;
2292 i965_write_fence_reg(reg
);
2293 else if (IS_I9XX(dev
))
2294 i915_write_fence_reg(reg
);
2296 i830_write_fence_reg(reg
);
2302 * i915_gem_clear_fence_reg - clear out fence register info
2303 * @obj: object to clear
2305 * Zeroes out the fence register itself and clears out the associated
2306 * data structures in dev_priv and obj_priv.
2309 i915_gem_clear_fence_reg(struct drm_gem_object
*obj
)
2311 struct drm_device
*dev
= obj
->dev
;
2312 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2313 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2316 I915_WRITE64(FENCE_REG_965_0
+ (obj_priv
->fence_reg
* 8), 0);
2320 if (obj_priv
->fence_reg
< 8)
2321 fence_reg
= FENCE_REG_830_0
+ obj_priv
->fence_reg
* 4;
2323 fence_reg
= FENCE_REG_945_8
+ (obj_priv
->fence_reg
-
2326 I915_WRITE(fence_reg
, 0);
2329 dev_priv
->fence_regs
[obj_priv
->fence_reg
].obj
= NULL
;
2330 obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
2334 * Finds free space in the GTT aperture and binds the object there.
2337 i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
, unsigned alignment
)
2339 struct drm_device
*dev
= obj
->dev
;
2340 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2341 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2342 struct drm_mm_node
*free_space
;
2343 int page_count
, ret
;
2345 if (dev_priv
->mm
.suspended
)
2348 alignment
= i915_gem_get_gtt_alignment(obj
);
2349 if (alignment
& (i915_gem_get_gtt_alignment(obj
) - 1)) {
2350 DRM_ERROR("Invalid object alignment requested %u\n", alignment
);
2355 free_space
= drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
2356 obj
->size
, alignment
, 0);
2357 if (free_space
!= NULL
) {
2358 obj_priv
->gtt_space
= drm_mm_get_block(free_space
, obj
->size
,
2360 if (obj_priv
->gtt_space
!= NULL
) {
2361 obj_priv
->gtt_space
->private = obj
;
2362 obj_priv
->gtt_offset
= obj_priv
->gtt_space
->start
;
2365 if (obj_priv
->gtt_space
== NULL
) {
2368 /* If the gtt is empty and we're still having trouble
2369 * fitting our object in, we're out of memory.
2372 DRM_INFO("%s: GTT full, evicting something\n", __func__
);
2374 spin_lock(&dev_priv
->mm
.active_list_lock
);
2375 lists_empty
= (list_empty(&dev_priv
->mm
.inactive_list
) &&
2376 list_empty(&dev_priv
->mm
.flushing_list
) &&
2377 list_empty(&dev_priv
->mm
.active_list
));
2378 spin_unlock(&dev_priv
->mm
.active_list_lock
);
2380 DRM_ERROR("GTT full, but LRU list empty\n");
2384 ret
= i915_gem_evict_something(dev
);
2386 if (ret
!= -ERESTARTSYS
)
2387 DRM_ERROR("Failed to evict a buffer %d\n", ret
);
2394 DRM_INFO("Binding object of size %d at 0x%08x\n",
2395 obj
->size
, obj_priv
->gtt_offset
);
2397 ret
= i915_gem_object_get_pages(obj
);
2399 drm_mm_put_block(obj_priv
->gtt_space
);
2400 obj_priv
->gtt_space
= NULL
;
2404 page_count
= obj
->size
/ PAGE_SIZE
;
2405 /* Create an AGP memory structure pointing at our pages, and bind it
2408 obj_priv
->agp_mem
= drm_agp_bind_pages(dev
,
2411 obj_priv
->gtt_offset
,
2412 obj_priv
->agp_type
);
2413 if (obj_priv
->agp_mem
== NULL
) {
2414 i915_gem_object_put_pages(obj
);
2415 drm_mm_put_block(obj_priv
->gtt_space
);
2416 obj_priv
->gtt_space
= NULL
;
2419 atomic_inc(&dev
->gtt_count
);
2420 atomic_add(obj
->size
, &dev
->gtt_memory
);
2422 /* Assert that the object is not currently in any GPU domain. As it
2423 * wasn't in the GTT, there shouldn't be any way it could have been in
2426 BUG_ON(obj
->read_domains
& I915_GEM_GPU_DOMAINS
);
2427 BUG_ON(obj
->write_domain
& I915_GEM_GPU_DOMAINS
);
2433 i915_gem_clflush_object(struct drm_gem_object
*obj
)
2435 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2437 /* If we don't have a page list set up, then we're not pinned
2438 * to GPU, and we can ignore the cache flush because it'll happen
2439 * again at bind time.
2441 if (obj_priv
->pages
== NULL
)
2444 /* XXX: The 865 in particular appears to be weird in how it handles
2445 * cache flushing. We haven't figured it out, but the
2446 * clflush+agp_chipset_flush doesn't appear to successfully get the
2447 * data visible to the PGU, while wbinvd + agp_chipset_flush does.
2449 if (IS_I865G(obj
->dev
)) {
2454 drm_clflush_pages(obj_priv
->pages
, obj
->size
/ PAGE_SIZE
);
2457 /** Flushes any GPU write domain for the object if it's dirty. */
2459 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
)
2461 struct drm_device
*dev
= obj
->dev
;
2464 if ((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) == 0)
2467 /* Queue the GPU write cache flushing we need. */
2468 i915_gem_flush(dev
, 0, obj
->write_domain
);
2469 seqno
= i915_add_request(dev
, NULL
, obj
->write_domain
);
2470 obj
->write_domain
= 0;
2471 i915_gem_object_move_to_active(obj
, seqno
);
2474 /** Flushes the GTT write domain for the object if it's dirty. */
2476 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
)
2478 if (obj
->write_domain
!= I915_GEM_DOMAIN_GTT
)
2481 /* No actual flushing is required for the GTT write domain. Writes
2482 * to it immediately go to main memory as far as we know, so there's
2483 * no chipset flush. It also doesn't land in render cache.
2485 obj
->write_domain
= 0;
2488 /** Flushes the CPU write domain for the object if it's dirty. */
2490 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
)
2492 struct drm_device
*dev
= obj
->dev
;
2494 if (obj
->write_domain
!= I915_GEM_DOMAIN_CPU
)
2497 i915_gem_clflush_object(obj
);
2498 drm_agp_chipset_flush(dev
);
2499 obj
->write_domain
= 0;
2503 * Moves a single object to the GTT read, and possibly write domain.
2505 * This function returns when the move is complete, including waiting on
2509 i915_gem_object_set_to_gtt_domain(struct drm_gem_object
*obj
, int write
)
2511 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2514 /* Not valid to be called on unbound objects. */
2515 if (obj_priv
->gtt_space
== NULL
)
2518 i915_gem_object_flush_gpu_write_domain(obj
);
2519 /* Wait on any GPU rendering and flushing to occur. */
2520 ret
= i915_gem_object_wait_rendering(obj
);
2524 /* If we're writing through the GTT domain, then CPU and GPU caches
2525 * will need to be invalidated at next use.
2528 obj
->read_domains
&= I915_GEM_DOMAIN_GTT
;
2530 i915_gem_object_flush_cpu_write_domain(obj
);
2532 /* It should now be out of any other write domains, and we can update
2533 * the domain values for our changes.
2535 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_GTT
) != 0);
2536 obj
->read_domains
|= I915_GEM_DOMAIN_GTT
;
2538 obj
->write_domain
= I915_GEM_DOMAIN_GTT
;
2539 obj_priv
->dirty
= 1;
2546 * Moves a single object to the CPU read, and possibly write domain.
2548 * This function returns when the move is complete, including waiting on
2552 i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
, int write
)
2556 i915_gem_object_flush_gpu_write_domain(obj
);
2557 /* Wait on any GPU rendering and flushing to occur. */
2558 ret
= i915_gem_object_wait_rendering(obj
);
2562 i915_gem_object_flush_gtt_write_domain(obj
);
2564 /* If we have a partially-valid cache of the object in the CPU,
2565 * finish invalidating it and free the per-page flags.
2567 i915_gem_object_set_to_full_cpu_read_domain(obj
);
2569 /* Flush the CPU cache if it's still invalid. */
2570 if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0) {
2571 i915_gem_clflush_object(obj
);
2573 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
2576 /* It should now be out of any other write domains, and we can update
2577 * the domain values for our changes.
2579 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
2581 /* If we're writing through the CPU, then the GPU read domains will
2582 * need to be invalidated at next use.
2585 obj
->read_domains
&= I915_GEM_DOMAIN_CPU
;
2586 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
2593 * Set the next domain for the specified object. This
2594 * may not actually perform the necessary flushing/invaliding though,
2595 * as that may want to be batched with other set_domain operations
2597 * This is (we hope) the only really tricky part of gem. The goal
2598 * is fairly simple -- track which caches hold bits of the object
2599 * and make sure they remain coherent. A few concrete examples may
2600 * help to explain how it works. For shorthand, we use the notation
2601 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2602 * a pair of read and write domain masks.
2604 * Case 1: the batch buffer
2610 * 5. Unmapped from GTT
2613 * Let's take these a step at a time
2616 * Pages allocated from the kernel may still have
2617 * cache contents, so we set them to (CPU, CPU) always.
2618 * 2. Written by CPU (using pwrite)
2619 * The pwrite function calls set_domain (CPU, CPU) and
2620 * this function does nothing (as nothing changes)
2622 * This function asserts that the object is not
2623 * currently in any GPU-based read or write domains
2625 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2626 * As write_domain is zero, this function adds in the
2627 * current read domains (CPU+COMMAND, 0).
2628 * flush_domains is set to CPU.
2629 * invalidate_domains is set to COMMAND
2630 * clflush is run to get data out of the CPU caches
2631 * then i915_dev_set_domain calls i915_gem_flush to
2632 * emit an MI_FLUSH and drm_agp_chipset_flush
2633 * 5. Unmapped from GTT
2634 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2635 * flush_domains and invalidate_domains end up both zero
2636 * so no flushing/invalidating happens
2640 * Case 2: The shared render buffer
2644 * 3. Read/written by GPU
2645 * 4. set_domain to (CPU,CPU)
2646 * 5. Read/written by CPU
2647 * 6. Read/written by GPU
2650 * Same as last example, (CPU, CPU)
2652 * Nothing changes (assertions find that it is not in the GPU)
2653 * 3. Read/written by GPU
2654 * execbuffer calls set_domain (RENDER, RENDER)
2655 * flush_domains gets CPU
2656 * invalidate_domains gets GPU
2658 * MI_FLUSH and drm_agp_chipset_flush
2659 * 4. set_domain (CPU, CPU)
2660 * flush_domains gets GPU
2661 * invalidate_domains gets CPU
2662 * wait_rendering (obj) to make sure all drawing is complete.
2663 * This will include an MI_FLUSH to get the data from GPU
2665 * clflush (obj) to invalidate the CPU cache
2666 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2667 * 5. Read/written by CPU
2668 * cache lines are loaded and dirtied
2669 * 6. Read written by GPU
2670 * Same as last GPU access
2672 * Case 3: The constant buffer
2677 * 4. Updated (written) by CPU again
2686 * flush_domains = CPU
2687 * invalidate_domains = RENDER
2690 * drm_agp_chipset_flush
2691 * 4. Updated (written) by CPU again
2693 * flush_domains = 0 (no previous write domain)
2694 * invalidate_domains = 0 (no new read domains)
2697 * flush_domains = CPU
2698 * invalidate_domains = RENDER
2701 * drm_agp_chipset_flush
2704 i915_gem_object_set_to_gpu_domain(struct drm_gem_object
*obj
)
2706 struct drm_device
*dev
= obj
->dev
;
2707 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2708 uint32_t invalidate_domains
= 0;
2709 uint32_t flush_domains
= 0;
2711 BUG_ON(obj
->pending_read_domains
& I915_GEM_DOMAIN_CPU
);
2712 BUG_ON(obj
->pending_write_domain
== I915_GEM_DOMAIN_CPU
);
2715 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2717 obj
->read_domains
, obj
->pending_read_domains
,
2718 obj
->write_domain
, obj
->pending_write_domain
);
2721 * If the object isn't moving to a new write domain,
2722 * let the object stay in multiple read domains
2724 if (obj
->pending_write_domain
== 0)
2725 obj
->pending_read_domains
|= obj
->read_domains
;
2727 obj_priv
->dirty
= 1;
2730 * Flush the current write domain if
2731 * the new read domains don't match. Invalidate
2732 * any read domains which differ from the old
2735 if (obj
->write_domain
&&
2736 obj
->write_domain
!= obj
->pending_read_domains
) {
2737 flush_domains
|= obj
->write_domain
;
2738 invalidate_domains
|=
2739 obj
->pending_read_domains
& ~obj
->write_domain
;
2742 * Invalidate any read caches which may have
2743 * stale data. That is, any new read domains.
2745 invalidate_domains
|= obj
->pending_read_domains
& ~obj
->read_domains
;
2746 if ((flush_domains
| invalidate_domains
) & I915_GEM_DOMAIN_CPU
) {
2748 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2749 __func__
, flush_domains
, invalidate_domains
);
2751 i915_gem_clflush_object(obj
);
2754 /* The actual obj->write_domain will be updated with
2755 * pending_write_domain after we emit the accumulated flush for all
2756 * of our domain changes in execbuffers (which clears objects'
2757 * write_domains). So if we have a current write domain that we
2758 * aren't changing, set pending_write_domain to that.
2760 if (flush_domains
== 0 && obj
->pending_write_domain
== 0)
2761 obj
->pending_write_domain
= obj
->write_domain
;
2762 obj
->read_domains
= obj
->pending_read_domains
;
2764 dev
->invalidate_domains
|= invalidate_domains
;
2765 dev
->flush_domains
|= flush_domains
;
2767 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2769 obj
->read_domains
, obj
->write_domain
,
2770 dev
->invalidate_domains
, dev
->flush_domains
);
2775 * Moves the object from a partially CPU read to a full one.
2777 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2778 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2781 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
)
2783 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2785 if (!obj_priv
->page_cpu_valid
)
2788 /* If we're partially in the CPU read domain, finish moving it in.
2790 if (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) {
2793 for (i
= 0; i
<= (obj
->size
- 1) / PAGE_SIZE
; i
++) {
2794 if (obj_priv
->page_cpu_valid
[i
])
2796 drm_clflush_pages(obj_priv
->pages
+ i
, 1);
2800 /* Free the page_cpu_valid mappings which are now stale, whether
2801 * or not we've got I915_GEM_DOMAIN_CPU.
2803 drm_free(obj_priv
->page_cpu_valid
, obj
->size
/ PAGE_SIZE
,
2805 obj_priv
->page_cpu_valid
= NULL
;
2809 * Set the CPU read domain on a range of the object.
2811 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2812 * not entirely valid. The page_cpu_valid member of the object flags which
2813 * pages have been flushed, and will be respected by
2814 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2815 * of the whole object.
2817 * This function returns when the move is complete, including waiting on
2821 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
2822 uint64_t offset
, uint64_t size
)
2824 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2827 if (offset
== 0 && size
== obj
->size
)
2828 return i915_gem_object_set_to_cpu_domain(obj
, 0);
2830 i915_gem_object_flush_gpu_write_domain(obj
);
2831 /* Wait on any GPU rendering and flushing to occur. */
2832 ret
= i915_gem_object_wait_rendering(obj
);
2835 i915_gem_object_flush_gtt_write_domain(obj
);
2837 /* If we're already fully in the CPU read domain, we're done. */
2838 if (obj_priv
->page_cpu_valid
== NULL
&&
2839 (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) != 0)
2842 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2843 * newly adding I915_GEM_DOMAIN_CPU
2845 if (obj_priv
->page_cpu_valid
== NULL
) {
2846 obj_priv
->page_cpu_valid
= drm_calloc(1, obj
->size
/ PAGE_SIZE
,
2848 if (obj_priv
->page_cpu_valid
== NULL
)
2850 } else if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0)
2851 memset(obj_priv
->page_cpu_valid
, 0, obj
->size
/ PAGE_SIZE
);
2853 /* Flush the cache on any pages that are still invalid from the CPU's
2856 for (i
= offset
/ PAGE_SIZE
; i
<= (offset
+ size
- 1) / PAGE_SIZE
;
2858 if (obj_priv
->page_cpu_valid
[i
])
2861 drm_clflush_pages(obj_priv
->pages
+ i
, 1);
2863 obj_priv
->page_cpu_valid
[i
] = 1;
2866 /* It should now be out of any other write domains, and we can update
2867 * the domain values for our changes.
2869 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
2871 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
2877 * Pin an object to the GTT and evaluate the relocations landing in it.
2880 i915_gem_object_pin_and_relocate(struct drm_gem_object
*obj
,
2881 struct drm_file
*file_priv
,
2882 struct drm_i915_gem_exec_object
*entry
,
2883 struct drm_i915_gem_relocation_entry
*relocs
)
2885 struct drm_device
*dev
= obj
->dev
;
2886 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2887 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2889 void __iomem
*reloc_page
;
2891 /* Choose the GTT offset for our buffer and put it there. */
2892 ret
= i915_gem_object_pin(obj
, (uint32_t) entry
->alignment
);
2896 entry
->offset
= obj_priv
->gtt_offset
;
2898 /* Apply the relocations, using the GTT aperture to avoid cache
2899 * flushing requirements.
2901 for (i
= 0; i
< entry
->relocation_count
; i
++) {
2902 struct drm_i915_gem_relocation_entry
*reloc
= &relocs
[i
];
2903 struct drm_gem_object
*target_obj
;
2904 struct drm_i915_gem_object
*target_obj_priv
;
2905 uint32_t reloc_val
, reloc_offset
;
2906 uint32_t __iomem
*reloc_entry
;
2908 target_obj
= drm_gem_object_lookup(obj
->dev
, file_priv
,
2909 reloc
->target_handle
);
2910 if (target_obj
== NULL
) {
2911 i915_gem_object_unpin(obj
);
2914 target_obj_priv
= target_obj
->driver_private
;
2916 /* The target buffer should have appeared before us in the
2917 * exec_object list, so it should have a GTT space bound by now.
2919 if (target_obj_priv
->gtt_space
== NULL
) {
2920 DRM_ERROR("No GTT space found for object %d\n",
2921 reloc
->target_handle
);
2922 drm_gem_object_unreference(target_obj
);
2923 i915_gem_object_unpin(obj
);
2927 if (reloc
->offset
> obj
->size
- 4) {
2928 DRM_ERROR("Relocation beyond object bounds: "
2929 "obj %p target %d offset %d size %d.\n",
2930 obj
, reloc
->target_handle
,
2931 (int) reloc
->offset
, (int) obj
->size
);
2932 drm_gem_object_unreference(target_obj
);
2933 i915_gem_object_unpin(obj
);
2936 if (reloc
->offset
& 3) {
2937 DRM_ERROR("Relocation not 4-byte aligned: "
2938 "obj %p target %d offset %d.\n",
2939 obj
, reloc
->target_handle
,
2940 (int) reloc
->offset
);
2941 drm_gem_object_unreference(target_obj
);
2942 i915_gem_object_unpin(obj
);
2946 if (reloc
->write_domain
& I915_GEM_DOMAIN_CPU
||
2947 reloc
->read_domains
& I915_GEM_DOMAIN_CPU
) {
2948 DRM_ERROR("reloc with read/write CPU domains: "
2949 "obj %p target %d offset %d "
2950 "read %08x write %08x",
2951 obj
, reloc
->target_handle
,
2952 (int) reloc
->offset
,
2953 reloc
->read_domains
,
2954 reloc
->write_domain
);
2955 drm_gem_object_unreference(target_obj
);
2956 i915_gem_object_unpin(obj
);
2960 if (reloc
->write_domain
&& target_obj
->pending_write_domain
&&
2961 reloc
->write_domain
!= target_obj
->pending_write_domain
) {
2962 DRM_ERROR("Write domain conflict: "
2963 "obj %p target %d offset %d "
2964 "new %08x old %08x\n",
2965 obj
, reloc
->target_handle
,
2966 (int) reloc
->offset
,
2967 reloc
->write_domain
,
2968 target_obj
->pending_write_domain
);
2969 drm_gem_object_unreference(target_obj
);
2970 i915_gem_object_unpin(obj
);
2975 DRM_INFO("%s: obj %p offset %08x target %d "
2976 "read %08x write %08x gtt %08x "
2977 "presumed %08x delta %08x\n",
2980 (int) reloc
->offset
,
2981 (int) reloc
->target_handle
,
2982 (int) reloc
->read_domains
,
2983 (int) reloc
->write_domain
,
2984 (int) target_obj_priv
->gtt_offset
,
2985 (int) reloc
->presumed_offset
,
2989 target_obj
->pending_read_domains
|= reloc
->read_domains
;
2990 target_obj
->pending_write_domain
|= reloc
->write_domain
;
2992 /* If the relocation already has the right value in it, no
2993 * more work needs to be done.
2995 if (target_obj_priv
->gtt_offset
== reloc
->presumed_offset
) {
2996 drm_gem_object_unreference(target_obj
);
3000 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
3002 drm_gem_object_unreference(target_obj
);
3003 i915_gem_object_unpin(obj
);
3007 /* Map the page containing the relocation we're going to
3010 reloc_offset
= obj_priv
->gtt_offset
+ reloc
->offset
;
3011 reloc_page
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
3014 reloc_entry
= (uint32_t __iomem
*)(reloc_page
+
3015 (reloc_offset
& (PAGE_SIZE
- 1)));
3016 reloc_val
= target_obj_priv
->gtt_offset
+ reloc
->delta
;
3019 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3020 obj
, (unsigned int) reloc
->offset
,
3021 readl(reloc_entry
), reloc_val
);
3023 writel(reloc_val
, reloc_entry
);
3024 io_mapping_unmap_atomic(reloc_page
);
3026 /* The updated presumed offset for this entry will be
3027 * copied back out to the user.
3029 reloc
->presumed_offset
= target_obj_priv
->gtt_offset
;
3031 drm_gem_object_unreference(target_obj
);
3036 i915_gem_dump_object(obj
, 128, __func__
, ~0);
3041 /** Dispatch a batchbuffer to the ring
3044 i915_dispatch_gem_execbuffer(struct drm_device
*dev
,
3045 struct drm_i915_gem_execbuffer
*exec
,
3046 struct drm_clip_rect
*cliprects
,
3047 uint64_t exec_offset
)
3049 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3050 int nbox
= exec
->num_cliprects
;
3052 uint32_t exec_start
, exec_len
;
3055 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
3056 exec_len
= (uint32_t) exec
->batch_len
;
3058 count
= nbox
? nbox
: 1;
3060 for (i
= 0; i
< count
; i
++) {
3062 int ret
= i915_emit_box(dev
, cliprects
, i
,
3063 exec
->DR1
, exec
->DR4
);
3068 if (IS_I830(dev
) || IS_845G(dev
)) {
3070 OUT_RING(MI_BATCH_BUFFER
);
3071 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
3072 OUT_RING(exec_start
+ exec_len
- 4);
3077 if (IS_I965G(dev
)) {
3078 OUT_RING(MI_BATCH_BUFFER_START
|
3080 MI_BATCH_NON_SECURE_I965
);
3081 OUT_RING(exec_start
);
3083 OUT_RING(MI_BATCH_BUFFER_START
|
3085 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
3091 /* XXX breadcrumb */
3095 /* Throttle our rendering by waiting until the ring has completed our requests
3096 * emitted over 20 msec ago.
3098 * Note that if we were to use the current jiffies each time around the loop,
3099 * we wouldn't escape the function with any frames outstanding if the time to
3100 * render a frame was over 20ms.
3102 * This should get us reasonable parallelism between CPU and GPU but also
3103 * relatively low latency when blocking on a particular request to finish.
3106 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file_priv
)
3108 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
3110 unsigned long recent_enough
= jiffies
- msecs_to_jiffies(20);
3112 mutex_lock(&dev
->struct_mutex
);
3113 while (!list_empty(&i915_file_priv
->mm
.request_list
)) {
3114 struct drm_i915_gem_request
*request
;
3116 request
= list_first_entry(&i915_file_priv
->mm
.request_list
,
3117 struct drm_i915_gem_request
,
3120 if (time_after_eq(request
->emitted_jiffies
, recent_enough
))
3123 ret
= i915_wait_request(dev
, request
->seqno
);
3127 mutex_unlock(&dev
->struct_mutex
);
3133 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object
*exec_list
,
3134 uint32_t buffer_count
,
3135 struct drm_i915_gem_relocation_entry
**relocs
)
3137 uint32_t reloc_count
= 0, reloc_index
= 0, i
;
3141 for (i
= 0; i
< buffer_count
; i
++) {
3142 if (reloc_count
+ exec_list
[i
].relocation_count
< reloc_count
)
3144 reloc_count
+= exec_list
[i
].relocation_count
;
3147 *relocs
= drm_calloc_large(reloc_count
, sizeof(**relocs
));
3148 if (*relocs
== NULL
)
3151 for (i
= 0; i
< buffer_count
; i
++) {
3152 struct drm_i915_gem_relocation_entry __user
*user_relocs
;
3154 user_relocs
= (void __user
*)(uintptr_t)exec_list
[i
].relocs_ptr
;
3156 ret
= copy_from_user(&(*relocs
)[reloc_index
],
3158 exec_list
[i
].relocation_count
*
3161 drm_free_large(*relocs
);
3166 reloc_index
+= exec_list
[i
].relocation_count
;
3173 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object
*exec_list
,
3174 uint32_t buffer_count
,
3175 struct drm_i915_gem_relocation_entry
*relocs
)
3177 uint32_t reloc_count
= 0, i
;
3180 for (i
= 0; i
< buffer_count
; i
++) {
3181 struct drm_i915_gem_relocation_entry __user
*user_relocs
;
3184 user_relocs
= (void __user
*)(uintptr_t)exec_list
[i
].relocs_ptr
;
3186 unwritten
= copy_to_user(user_relocs
,
3187 &relocs
[reloc_count
],
3188 exec_list
[i
].relocation_count
*
3196 reloc_count
+= exec_list
[i
].relocation_count
;
3200 drm_free_large(relocs
);
3206 i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer
*exec
,
3207 uint64_t exec_offset
)
3209 uint32_t exec_start
, exec_len
;
3211 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
3212 exec_len
= (uint32_t) exec
->batch_len
;
3214 if ((exec_start
| exec_len
) & 0x7)
3224 i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
3225 struct drm_file
*file_priv
)
3227 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3228 struct drm_i915_gem_execbuffer
*args
= data
;
3229 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
3230 struct drm_gem_object
**object_list
= NULL
;
3231 struct drm_gem_object
*batch_obj
;
3232 struct drm_i915_gem_object
*obj_priv
;
3233 struct drm_clip_rect
*cliprects
= NULL
;
3234 struct drm_i915_gem_relocation_entry
*relocs
;
3235 int ret
, ret2
, i
, pinned
= 0;
3236 uint64_t exec_offset
;
3237 uint32_t seqno
, flush_domains
, reloc_index
;
3241 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3242 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
3245 if (args
->buffer_count
< 1) {
3246 DRM_ERROR("execbuf with %d buffers\n", args
->buffer_count
);
3249 /* Copy in the exec list from userland */
3250 exec_list
= drm_calloc_large(sizeof(*exec_list
), args
->buffer_count
);
3251 object_list
= drm_calloc_large(sizeof(*object_list
), args
->buffer_count
);
3252 if (exec_list
== NULL
|| object_list
== NULL
) {
3253 DRM_ERROR("Failed to allocate exec or object list "
3255 args
->buffer_count
);
3259 ret
= copy_from_user(exec_list
,
3260 (struct drm_i915_relocation_entry __user
*)
3261 (uintptr_t) args
->buffers_ptr
,
3262 sizeof(*exec_list
) * args
->buffer_count
);
3264 DRM_ERROR("copy %d exec entries failed %d\n",
3265 args
->buffer_count
, ret
);
3269 if (args
->num_cliprects
!= 0) {
3270 cliprects
= drm_calloc(args
->num_cliprects
, sizeof(*cliprects
),
3272 if (cliprects
== NULL
)
3275 ret
= copy_from_user(cliprects
,
3276 (struct drm_clip_rect __user
*)
3277 (uintptr_t) args
->cliprects_ptr
,
3278 sizeof(*cliprects
) * args
->num_cliprects
);
3280 DRM_ERROR("copy %d cliprects failed: %d\n",
3281 args
->num_cliprects
, ret
);
3286 ret
= i915_gem_get_relocs_from_user(exec_list
, args
->buffer_count
,
3291 mutex_lock(&dev
->struct_mutex
);
3293 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3295 if (dev_priv
->mm
.wedged
) {
3296 DRM_ERROR("Execbuf while wedged\n");
3297 mutex_unlock(&dev
->struct_mutex
);
3302 if (dev_priv
->mm
.suspended
) {
3303 DRM_ERROR("Execbuf while VT-switched.\n");
3304 mutex_unlock(&dev
->struct_mutex
);
3309 /* Look up object handles */
3310 for (i
= 0; i
< args
->buffer_count
; i
++) {
3311 object_list
[i
] = drm_gem_object_lookup(dev
, file_priv
,
3312 exec_list
[i
].handle
);
3313 if (object_list
[i
] == NULL
) {
3314 DRM_ERROR("Invalid object handle %d at index %d\n",
3315 exec_list
[i
].handle
, i
);
3320 obj_priv
= object_list
[i
]->driver_private
;
3321 if (obj_priv
->in_execbuffer
) {
3322 DRM_ERROR("Object %p appears more than once in object list\n",
3327 obj_priv
->in_execbuffer
= true;
3330 /* Pin and relocate */
3331 for (pin_tries
= 0; ; pin_tries
++) {
3335 for (i
= 0; i
< args
->buffer_count
; i
++) {
3336 object_list
[i
]->pending_read_domains
= 0;
3337 object_list
[i
]->pending_write_domain
= 0;
3338 ret
= i915_gem_object_pin_and_relocate(object_list
[i
],
3341 &relocs
[reloc_index
]);
3345 reloc_index
+= exec_list
[i
].relocation_count
;
3351 /* error other than GTT full, or we've already tried again */
3352 if (ret
!= -ENOMEM
|| pin_tries
>= 1) {
3353 if (ret
!= -ERESTARTSYS
)
3354 DRM_ERROR("Failed to pin buffers %d\n", ret
);
3358 /* unpin all of our buffers */
3359 for (i
= 0; i
< pinned
; i
++)
3360 i915_gem_object_unpin(object_list
[i
]);
3363 /* evict everyone we can from the aperture */
3364 ret
= i915_gem_evict_everything(dev
);
3369 /* Set the pending read domains for the batch buffer to COMMAND */
3370 batch_obj
= object_list
[args
->buffer_count
-1];
3371 if (batch_obj
->pending_write_domain
) {
3372 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3376 batch_obj
->pending_read_domains
|= I915_GEM_DOMAIN_COMMAND
;
3378 /* Sanity check the batch buffer, prior to moving objects */
3379 exec_offset
= exec_list
[args
->buffer_count
- 1].offset
;
3380 ret
= i915_gem_check_execbuffer (args
, exec_offset
);
3382 DRM_ERROR("execbuf with invalid offset/length\n");
3386 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3388 /* Zero the global flush/invalidate flags. These
3389 * will be modified as new domains are computed
3392 dev
->invalidate_domains
= 0;
3393 dev
->flush_domains
= 0;
3395 for (i
= 0; i
< args
->buffer_count
; i
++) {
3396 struct drm_gem_object
*obj
= object_list
[i
];
3398 /* Compute new gpu domains and update invalidate/flush */
3399 i915_gem_object_set_to_gpu_domain(obj
);
3402 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3404 if (dev
->invalidate_domains
| dev
->flush_domains
) {
3406 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3408 dev
->invalidate_domains
,
3409 dev
->flush_domains
);
3412 dev
->invalidate_domains
,
3413 dev
->flush_domains
);
3414 if (dev
->flush_domains
)
3415 (void)i915_add_request(dev
, file_priv
,
3416 dev
->flush_domains
);
3419 for (i
= 0; i
< args
->buffer_count
; i
++) {
3420 struct drm_gem_object
*obj
= object_list
[i
];
3422 obj
->write_domain
= obj
->pending_write_domain
;
3425 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3428 for (i
= 0; i
< args
->buffer_count
; i
++) {
3429 i915_gem_object_check_coherency(object_list
[i
],
3430 exec_list
[i
].handle
);
3435 i915_gem_dump_object(batch_obj
,
3441 /* Exec the batchbuffer */
3442 ret
= i915_dispatch_gem_execbuffer(dev
, args
, cliprects
, exec_offset
);
3444 DRM_ERROR("dispatch failed %d\n", ret
);
3449 * Ensure that the commands in the batch buffer are
3450 * finished before the interrupt fires
3452 flush_domains
= i915_retire_commands(dev
);
3454 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3457 * Get a seqno representing the execution of the current buffer,
3458 * which we can wait on. We would like to mitigate these interrupts,
3459 * likely by only creating seqnos occasionally (so that we have
3460 * *some* interrupts representing completion of buffers that we can
3461 * wait on when trying to clear up gtt space).
3463 seqno
= i915_add_request(dev
, file_priv
, flush_domains
);
3465 for (i
= 0; i
< args
->buffer_count
; i
++) {
3466 struct drm_gem_object
*obj
= object_list
[i
];
3468 i915_gem_object_move_to_active(obj
, seqno
);
3470 DRM_INFO("%s: move to exec list %p\n", __func__
, obj
);
3474 i915_dump_lru(dev
, __func__
);
3477 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3480 for (i
= 0; i
< pinned
; i
++)
3481 i915_gem_object_unpin(object_list
[i
]);
3483 for (i
= 0; i
< args
->buffer_count
; i
++) {
3484 if (object_list
[i
]) {
3485 obj_priv
= object_list
[i
]->driver_private
;
3486 obj_priv
->in_execbuffer
= false;
3488 drm_gem_object_unreference(object_list
[i
]);
3491 mutex_unlock(&dev
->struct_mutex
);
3494 /* Copy the new buffer offsets back to the user's exec list. */
3495 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
3496 (uintptr_t) args
->buffers_ptr
,
3498 sizeof(*exec_list
) * args
->buffer_count
);
3501 DRM_ERROR("failed to copy %d exec entries "
3502 "back to user (%d)\n",
3503 args
->buffer_count
, ret
);
3507 /* Copy the updated relocations out regardless of current error
3508 * state. Failure to update the relocs would mean that the next
3509 * time userland calls execbuf, it would do so with presumed offset
3510 * state that didn't match the actual object state.
3512 ret2
= i915_gem_put_relocs_to_user(exec_list
, args
->buffer_count
,
3515 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2
);
3522 drm_free_large(object_list
);
3523 drm_free_large(exec_list
);
3524 drm_free(cliprects
, sizeof(*cliprects
) * args
->num_cliprects
,
3531 i915_gem_object_pin(struct drm_gem_object
*obj
, uint32_t alignment
)
3533 struct drm_device
*dev
= obj
->dev
;
3534 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
3537 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3538 if (obj_priv
->gtt_space
== NULL
) {
3539 ret
= i915_gem_object_bind_to_gtt(obj
, alignment
);
3541 if (ret
!= -EBUSY
&& ret
!= -ERESTARTSYS
)
3542 DRM_ERROR("Failure to bind: %d\n", ret
);
3547 * Pre-965 chips need a fence register set up in order to
3548 * properly handle tiled surfaces.
3550 if (!IS_I965G(dev
) &&
3551 obj_priv
->fence_reg
== I915_FENCE_REG_NONE
&&
3552 obj_priv
->tiling_mode
!= I915_TILING_NONE
) {
3553 ret
= i915_gem_object_get_fence_reg(obj
, true);
3555 if (ret
!= -EBUSY
&& ret
!= -ERESTARTSYS
)
3556 DRM_ERROR("Failure to install fence: %d\n",
3561 obj_priv
->pin_count
++;
3563 /* If the object is not active and not pending a flush,
3564 * remove it from the inactive list
3566 if (obj_priv
->pin_count
== 1) {
3567 atomic_inc(&dev
->pin_count
);
3568 atomic_add(obj
->size
, &dev
->pin_memory
);
3569 if (!obj_priv
->active
&&
3570 (obj
->write_domain
& I915_GEM_GPU_DOMAINS
) == 0 &&
3571 !list_empty(&obj_priv
->list
))
3572 list_del_init(&obj_priv
->list
);
3574 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3580 i915_gem_object_unpin(struct drm_gem_object
*obj
)
3582 struct drm_device
*dev
= obj
->dev
;
3583 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3584 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
3586 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3587 obj_priv
->pin_count
--;
3588 BUG_ON(obj_priv
->pin_count
< 0);
3589 BUG_ON(obj_priv
->gtt_space
== NULL
);
3591 /* If the object is no longer pinned, and is
3592 * neither active nor being flushed, then stick it on
3595 if (obj_priv
->pin_count
== 0) {
3596 if (!obj_priv
->active
&&
3597 (obj
->write_domain
& I915_GEM_GPU_DOMAINS
) == 0)
3598 list_move_tail(&obj_priv
->list
,
3599 &dev_priv
->mm
.inactive_list
);
3600 atomic_dec(&dev
->pin_count
);
3601 atomic_sub(obj
->size
, &dev
->pin_memory
);
3603 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3607 i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
3608 struct drm_file
*file_priv
)
3610 struct drm_i915_gem_pin
*args
= data
;
3611 struct drm_gem_object
*obj
;
3612 struct drm_i915_gem_object
*obj_priv
;
3615 mutex_lock(&dev
->struct_mutex
);
3617 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
3619 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3621 mutex_unlock(&dev
->struct_mutex
);
3624 obj_priv
= obj
->driver_private
;
3626 if (obj_priv
->pin_filp
!= NULL
&& obj_priv
->pin_filp
!= file_priv
) {
3627 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3629 drm_gem_object_unreference(obj
);
3630 mutex_unlock(&dev
->struct_mutex
);
3634 obj_priv
->user_pin_count
++;
3635 obj_priv
->pin_filp
= file_priv
;
3636 if (obj_priv
->user_pin_count
== 1) {
3637 ret
= i915_gem_object_pin(obj
, args
->alignment
);
3639 drm_gem_object_unreference(obj
);
3640 mutex_unlock(&dev
->struct_mutex
);
3645 /* XXX - flush the CPU caches for pinned objects
3646 * as the X server doesn't manage domains yet
3648 i915_gem_object_flush_cpu_write_domain(obj
);
3649 args
->offset
= obj_priv
->gtt_offset
;
3650 drm_gem_object_unreference(obj
);
3651 mutex_unlock(&dev
->struct_mutex
);
3657 i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
3658 struct drm_file
*file_priv
)
3660 struct drm_i915_gem_pin
*args
= data
;
3661 struct drm_gem_object
*obj
;
3662 struct drm_i915_gem_object
*obj_priv
;
3664 mutex_lock(&dev
->struct_mutex
);
3666 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
3668 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3670 mutex_unlock(&dev
->struct_mutex
);
3674 obj_priv
= obj
->driver_private
;
3675 if (obj_priv
->pin_filp
!= file_priv
) {
3676 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3678 drm_gem_object_unreference(obj
);
3679 mutex_unlock(&dev
->struct_mutex
);
3682 obj_priv
->user_pin_count
--;
3683 if (obj_priv
->user_pin_count
== 0) {
3684 obj_priv
->pin_filp
= NULL
;
3685 i915_gem_object_unpin(obj
);
3688 drm_gem_object_unreference(obj
);
3689 mutex_unlock(&dev
->struct_mutex
);
3694 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
3695 struct drm_file
*file_priv
)
3697 struct drm_i915_gem_busy
*args
= data
;
3698 struct drm_gem_object
*obj
;
3699 struct drm_i915_gem_object
*obj_priv
;
3701 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
3703 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3708 mutex_lock(&dev
->struct_mutex
);
3709 /* Update the active list for the hardware's current position.
3710 * Otherwise this only updates on a delayed timer or when irqs are
3711 * actually unmasked, and our working set ends up being larger than
3714 i915_gem_retire_requests(dev
);
3716 obj_priv
= obj
->driver_private
;
3717 /* Don't count being on the flushing list against the object being
3718 * done. Otherwise, a buffer left on the flushing list but not getting
3719 * flushed (because nobody's flushing that domain) won't ever return
3720 * unbusy and get reused by libdrm's bo cache. The other expected
3721 * consumer of this interface, OpenGL's occlusion queries, also specs
3722 * that the objects get unbusy "eventually" without any interference.
3724 args
->busy
= obj_priv
->active
&& obj_priv
->last_rendering_seqno
!= 0;
3726 drm_gem_object_unreference(obj
);
3727 mutex_unlock(&dev
->struct_mutex
);
3732 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
3733 struct drm_file
*file_priv
)
3735 return i915_gem_ring_throttle(dev
, file_priv
);
3738 int i915_gem_init_object(struct drm_gem_object
*obj
)
3740 struct drm_i915_gem_object
*obj_priv
;
3742 obj_priv
= drm_calloc(1, sizeof(*obj_priv
), DRM_MEM_DRIVER
);
3743 if (obj_priv
== NULL
)
3747 * We've just allocated pages from the kernel,
3748 * so they've just been written by the CPU with
3749 * zeros. They'll need to be clflushed before we
3750 * use them with the GPU.
3752 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
3753 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
3755 obj_priv
->agp_type
= AGP_USER_MEMORY
;
3757 obj
->driver_private
= obj_priv
;
3758 obj_priv
->obj
= obj
;
3759 obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
3760 INIT_LIST_HEAD(&obj_priv
->list
);
3765 void i915_gem_free_object(struct drm_gem_object
*obj
)
3767 struct drm_device
*dev
= obj
->dev
;
3768 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
3770 while (obj_priv
->pin_count
> 0)
3771 i915_gem_object_unpin(obj
);
3773 if (obj_priv
->phys_obj
)
3774 i915_gem_detach_phys_object(dev
, obj
);
3776 i915_gem_object_unbind(obj
);
3778 i915_gem_free_mmap_offset(obj
);
3780 drm_free(obj_priv
->page_cpu_valid
, 1, DRM_MEM_DRIVER
);
3781 kfree(obj_priv
->bit_17
);
3782 drm_free(obj
->driver_private
, 1, DRM_MEM_DRIVER
);
3785 /** Unbinds all objects that are on the given buffer list. */
3787 i915_gem_evict_from_list(struct drm_device
*dev
, struct list_head
*head
)
3789 struct drm_gem_object
*obj
;
3790 struct drm_i915_gem_object
*obj_priv
;
3793 while (!list_empty(head
)) {
3794 obj_priv
= list_first_entry(head
,
3795 struct drm_i915_gem_object
,
3797 obj
= obj_priv
->obj
;
3799 if (obj_priv
->pin_count
!= 0) {
3800 DRM_ERROR("Pinned object in unbind list\n");
3801 mutex_unlock(&dev
->struct_mutex
);
3805 ret
= i915_gem_object_unbind(obj
);
3807 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3809 mutex_unlock(&dev
->struct_mutex
);
3819 i915_gem_idle(struct drm_device
*dev
)
3821 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3822 uint32_t seqno
, cur_seqno
, last_seqno
;
3825 mutex_lock(&dev
->struct_mutex
);
3827 if (dev_priv
->mm
.suspended
|| dev_priv
->ring
.ring_obj
== NULL
) {
3828 mutex_unlock(&dev
->struct_mutex
);
3832 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3833 * We need to replace this with a semaphore, or something.
3835 dev_priv
->mm
.suspended
= 1;
3837 /* Cancel the retire work handler, wait for it to finish if running
3839 mutex_unlock(&dev
->struct_mutex
);
3840 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
3841 mutex_lock(&dev
->struct_mutex
);
3843 i915_kernel_lost_context(dev
);
3845 /* Flush the GPU along with all non-CPU write domains
3847 i915_gem_flush(dev
, I915_GEM_GPU_DOMAINS
, I915_GEM_GPU_DOMAINS
);
3848 seqno
= i915_add_request(dev
, NULL
, I915_GEM_GPU_DOMAINS
);
3851 mutex_unlock(&dev
->struct_mutex
);
3855 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
3859 cur_seqno
= i915_get_gem_seqno(dev
);
3860 if (i915_seqno_passed(cur_seqno
, seqno
))
3862 if (last_seqno
== cur_seqno
) {
3863 if (stuck
++ > 100) {
3864 DRM_ERROR("hardware wedged\n");
3865 dev_priv
->mm
.wedged
= 1;
3866 DRM_WAKEUP(&dev_priv
->irq_queue
);
3871 last_seqno
= cur_seqno
;
3873 dev_priv
->mm
.waiting_gem_seqno
= 0;
3875 i915_gem_retire_requests(dev
);
3877 spin_lock(&dev_priv
->mm
.active_list_lock
);
3878 if (!dev_priv
->mm
.wedged
) {
3879 /* Active and flushing should now be empty as we've
3880 * waited for a sequence higher than any pending execbuffer
3882 WARN_ON(!list_empty(&dev_priv
->mm
.active_list
));
3883 WARN_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
3884 /* Request should now be empty as we've also waited
3885 * for the last request in the list
3887 WARN_ON(!list_empty(&dev_priv
->mm
.request_list
));
3890 /* Empty the active and flushing lists to inactive. If there's
3891 * anything left at this point, it means that we're wedged and
3892 * nothing good's going to happen by leaving them there. So strip
3893 * the GPU domains and just stuff them onto inactive.
3895 while (!list_empty(&dev_priv
->mm
.active_list
)) {
3896 struct drm_i915_gem_object
*obj_priv
;
3898 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
3899 struct drm_i915_gem_object
,
3901 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
3902 i915_gem_object_move_to_inactive(obj_priv
->obj
);
3904 spin_unlock(&dev_priv
->mm
.active_list_lock
);
3906 while (!list_empty(&dev_priv
->mm
.flushing_list
)) {
3907 struct drm_i915_gem_object
*obj_priv
;
3909 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
3910 struct drm_i915_gem_object
,
3912 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
3913 i915_gem_object_move_to_inactive(obj_priv
->obj
);
3917 /* Move all inactive buffers out of the GTT. */
3918 ret
= i915_gem_evict_from_list(dev
, &dev_priv
->mm
.inactive_list
);
3919 WARN_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
3921 mutex_unlock(&dev
->struct_mutex
);
3925 i915_gem_cleanup_ringbuffer(dev
);
3926 mutex_unlock(&dev
->struct_mutex
);
3932 i915_gem_init_hws(struct drm_device
*dev
)
3934 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3935 struct drm_gem_object
*obj
;
3936 struct drm_i915_gem_object
*obj_priv
;
3939 /* If we need a physical address for the status page, it's already
3940 * initialized at driver load time.
3942 if (!I915_NEED_GFX_HWS(dev
))
3945 obj
= drm_gem_object_alloc(dev
, 4096);
3947 DRM_ERROR("Failed to allocate status page\n");
3950 obj_priv
= obj
->driver_private
;
3951 obj_priv
->agp_type
= AGP_USER_CACHED_MEMORY
;
3953 ret
= i915_gem_object_pin(obj
, 4096);
3955 drm_gem_object_unreference(obj
);
3959 dev_priv
->status_gfx_addr
= obj_priv
->gtt_offset
;
3961 dev_priv
->hw_status_page
= kmap(obj_priv
->pages
[0]);
3962 if (dev_priv
->hw_status_page
== NULL
) {
3963 DRM_ERROR("Failed to map status page.\n");
3964 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
3965 i915_gem_object_unpin(obj
);
3966 drm_gem_object_unreference(obj
);
3969 dev_priv
->hws_obj
= obj
;
3970 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
3971 I915_WRITE(HWS_PGA
, dev_priv
->status_gfx_addr
);
3972 I915_READ(HWS_PGA
); /* posting read */
3973 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv
->status_gfx_addr
);
3979 i915_gem_cleanup_hws(struct drm_device
*dev
)
3981 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3982 struct drm_gem_object
*obj
;
3983 struct drm_i915_gem_object
*obj_priv
;
3985 if (dev_priv
->hws_obj
== NULL
)
3988 obj
= dev_priv
->hws_obj
;
3989 obj_priv
= obj
->driver_private
;
3991 kunmap(obj_priv
->pages
[0]);
3992 i915_gem_object_unpin(obj
);
3993 drm_gem_object_unreference(obj
);
3994 dev_priv
->hws_obj
= NULL
;
3996 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
3997 dev_priv
->hw_status_page
= NULL
;
3999 /* Write high address into HWS_PGA when disabling. */
4000 I915_WRITE(HWS_PGA
, 0x1ffff000);
4004 i915_gem_init_ringbuffer(struct drm_device
*dev
)
4006 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4007 struct drm_gem_object
*obj
;
4008 struct drm_i915_gem_object
*obj_priv
;
4009 drm_i915_ring_buffer_t
*ring
= &dev_priv
->ring
;
4013 ret
= i915_gem_init_hws(dev
);
4017 obj
= drm_gem_object_alloc(dev
, 128 * 1024);
4019 DRM_ERROR("Failed to allocate ringbuffer\n");
4020 i915_gem_cleanup_hws(dev
);
4023 obj_priv
= obj
->driver_private
;
4025 ret
= i915_gem_object_pin(obj
, 4096);
4027 drm_gem_object_unreference(obj
);
4028 i915_gem_cleanup_hws(dev
);
4032 /* Set up the kernel mapping for the ring. */
4033 ring
->Size
= obj
->size
;
4034 ring
->tail_mask
= obj
->size
- 1;
4036 ring
->map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
4037 ring
->map
.size
= obj
->size
;
4039 ring
->map
.flags
= 0;
4042 drm_core_ioremap_wc(&ring
->map
, dev
);
4043 if (ring
->map
.handle
== NULL
) {
4044 DRM_ERROR("Failed to map ringbuffer.\n");
4045 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
4046 i915_gem_object_unpin(obj
);
4047 drm_gem_object_unreference(obj
);
4048 i915_gem_cleanup_hws(dev
);
4051 ring
->ring_obj
= obj
;
4052 ring
->virtual_start
= ring
->map
.handle
;
4054 /* Stop the ring if it's running. */
4055 I915_WRITE(PRB0_CTL
, 0);
4056 I915_WRITE(PRB0_TAIL
, 0);
4057 I915_WRITE(PRB0_HEAD
, 0);
4059 /* Initialize the ring. */
4060 I915_WRITE(PRB0_START
, obj_priv
->gtt_offset
);
4061 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
4063 /* G45 ring initialization fails to reset head to zero */
4065 DRM_ERROR("Ring head not reset to zero "
4066 "ctl %08x head %08x tail %08x start %08x\n",
4067 I915_READ(PRB0_CTL
),
4068 I915_READ(PRB0_HEAD
),
4069 I915_READ(PRB0_TAIL
),
4070 I915_READ(PRB0_START
));
4071 I915_WRITE(PRB0_HEAD
, 0);
4073 DRM_ERROR("Ring head forced to zero "
4074 "ctl %08x head %08x tail %08x start %08x\n",
4075 I915_READ(PRB0_CTL
),
4076 I915_READ(PRB0_HEAD
),
4077 I915_READ(PRB0_TAIL
),
4078 I915_READ(PRB0_START
));
4081 I915_WRITE(PRB0_CTL
,
4082 ((obj
->size
- 4096) & RING_NR_PAGES
) |
4086 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
4088 /* If the head is still not zero, the ring is dead */
4090 DRM_ERROR("Ring initialization failed "
4091 "ctl %08x head %08x tail %08x start %08x\n",
4092 I915_READ(PRB0_CTL
),
4093 I915_READ(PRB0_HEAD
),
4094 I915_READ(PRB0_TAIL
),
4095 I915_READ(PRB0_START
));
4099 /* Update our cache of the ring state */
4100 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
4101 i915_kernel_lost_context(dev
);
4103 ring
->head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
4104 ring
->tail
= I915_READ(PRB0_TAIL
) & TAIL_ADDR
;
4105 ring
->space
= ring
->head
- (ring
->tail
+ 8);
4106 if (ring
->space
< 0)
4107 ring
->space
+= ring
->Size
;
4114 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
)
4116 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4118 if (dev_priv
->ring
.ring_obj
== NULL
)
4121 drm_core_ioremapfree(&dev_priv
->ring
.map
, dev
);
4123 i915_gem_object_unpin(dev_priv
->ring
.ring_obj
);
4124 drm_gem_object_unreference(dev_priv
->ring
.ring_obj
);
4125 dev_priv
->ring
.ring_obj
= NULL
;
4126 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
4128 i915_gem_cleanup_hws(dev
);
4132 i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
4133 struct drm_file
*file_priv
)
4135 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4138 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
4141 if (dev_priv
->mm
.wedged
) {
4142 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4143 dev_priv
->mm
.wedged
= 0;
4146 mutex_lock(&dev
->struct_mutex
);
4147 dev_priv
->mm
.suspended
= 0;
4149 ret
= i915_gem_init_ringbuffer(dev
);
4151 mutex_unlock(&dev
->struct_mutex
);
4155 spin_lock(&dev_priv
->mm
.active_list_lock
);
4156 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
4157 spin_unlock(&dev_priv
->mm
.active_list_lock
);
4159 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
4160 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
4161 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
4162 mutex_unlock(&dev
->struct_mutex
);
4164 drm_irq_install(dev
);
4170 i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
4171 struct drm_file
*file_priv
)
4175 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
4178 ret
= i915_gem_idle(dev
);
4179 drm_irq_uninstall(dev
);
4185 i915_gem_lastclose(struct drm_device
*dev
)
4189 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
4192 ret
= i915_gem_idle(dev
);
4194 DRM_ERROR("failed to idle hardware: %d\n", ret
);
4198 i915_gem_load(struct drm_device
*dev
)
4200 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4202 spin_lock_init(&dev_priv
->mm
.active_list_lock
);
4203 INIT_LIST_HEAD(&dev_priv
->mm
.active_list
);
4204 INIT_LIST_HEAD(&dev_priv
->mm
.flushing_list
);
4205 INIT_LIST_HEAD(&dev_priv
->mm
.inactive_list
);
4206 INIT_LIST_HEAD(&dev_priv
->mm
.request_list
);
4207 INIT_DELAYED_WORK(&dev_priv
->mm
.retire_work
,
4208 i915_gem_retire_work_handler
);
4209 dev_priv
->mm
.next_gem_seqno
= 1;
4211 /* Old X drivers will take 0-2 for front, back, depth buffers */
4212 dev_priv
->fence_reg_start
= 3;
4214 if (IS_I965G(dev
) || IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
4215 dev_priv
->num_fence_regs
= 16;
4217 dev_priv
->num_fence_regs
= 8;
4219 i915_gem_detect_bit_6_swizzle(dev
);
4223 * Create a physically contiguous memory object for this object
4224 * e.g. for cursor + overlay regs
4226 int i915_gem_init_phys_object(struct drm_device
*dev
,
4229 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4230 struct drm_i915_gem_phys_object
*phys_obj
;
4233 if (dev_priv
->mm
.phys_objs
[id
- 1] || !size
)
4236 phys_obj
= drm_calloc(1, sizeof(struct drm_i915_gem_phys_object
), DRM_MEM_DRIVER
);
4242 phys_obj
->handle
= drm_pci_alloc(dev
, size
, 0, 0xffffffff);
4243 if (!phys_obj
->handle
) {
4248 set_memory_wc((unsigned long)phys_obj
->handle
->vaddr
, phys_obj
->handle
->size
/ PAGE_SIZE
);
4251 dev_priv
->mm
.phys_objs
[id
- 1] = phys_obj
;
4255 drm_free(phys_obj
, sizeof(struct drm_i915_gem_phys_object
), DRM_MEM_DRIVER
);
4259 void i915_gem_free_phys_object(struct drm_device
*dev
, int id
)
4261 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4262 struct drm_i915_gem_phys_object
*phys_obj
;
4264 if (!dev_priv
->mm
.phys_objs
[id
- 1])
4267 phys_obj
= dev_priv
->mm
.phys_objs
[id
- 1];
4268 if (phys_obj
->cur_obj
) {
4269 i915_gem_detach_phys_object(dev
, phys_obj
->cur_obj
);
4273 set_memory_wb((unsigned long)phys_obj
->handle
->vaddr
, phys_obj
->handle
->size
/ PAGE_SIZE
);
4275 drm_pci_free(dev
, phys_obj
->handle
);
4277 dev_priv
->mm
.phys_objs
[id
- 1] = NULL
;
4280 void i915_gem_free_all_phys_object(struct drm_device
*dev
)
4284 for (i
= I915_GEM_PHYS_CURSOR_0
; i
<= I915_MAX_PHYS_OBJECT
; i
++)
4285 i915_gem_free_phys_object(dev
, i
);
4288 void i915_gem_detach_phys_object(struct drm_device
*dev
,
4289 struct drm_gem_object
*obj
)
4291 struct drm_i915_gem_object
*obj_priv
;
4296 obj_priv
= obj
->driver_private
;
4297 if (!obj_priv
->phys_obj
)
4300 ret
= i915_gem_object_get_pages(obj
);
4304 page_count
= obj
->size
/ PAGE_SIZE
;
4306 for (i
= 0; i
< page_count
; i
++) {
4307 char *dst
= kmap_atomic(obj_priv
->pages
[i
], KM_USER0
);
4308 char *src
= obj_priv
->phys_obj
->handle
->vaddr
+ (i
* PAGE_SIZE
);
4310 memcpy(dst
, src
, PAGE_SIZE
);
4311 kunmap_atomic(dst
, KM_USER0
);
4313 drm_clflush_pages(obj_priv
->pages
, page_count
);
4314 drm_agp_chipset_flush(dev
);
4316 obj_priv
->phys_obj
->cur_obj
= NULL
;
4317 obj_priv
->phys_obj
= NULL
;
4321 i915_gem_attach_phys_object(struct drm_device
*dev
,
4322 struct drm_gem_object
*obj
, int id
)
4324 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4325 struct drm_i915_gem_object
*obj_priv
;
4330 if (id
> I915_MAX_PHYS_OBJECT
)
4333 obj_priv
= obj
->driver_private
;
4335 if (obj_priv
->phys_obj
) {
4336 if (obj_priv
->phys_obj
->id
== id
)
4338 i915_gem_detach_phys_object(dev
, obj
);
4342 /* create a new object */
4343 if (!dev_priv
->mm
.phys_objs
[id
- 1]) {
4344 ret
= i915_gem_init_phys_object(dev
, id
,
4347 DRM_ERROR("failed to init phys object %d size: %zu\n", id
, obj
->size
);
4352 /* bind to the object */
4353 obj_priv
->phys_obj
= dev_priv
->mm
.phys_objs
[id
- 1];
4354 obj_priv
->phys_obj
->cur_obj
= obj
;
4356 ret
= i915_gem_object_get_pages(obj
);
4358 DRM_ERROR("failed to get page list\n");
4362 page_count
= obj
->size
/ PAGE_SIZE
;
4364 for (i
= 0; i
< page_count
; i
++) {
4365 char *src
= kmap_atomic(obj_priv
->pages
[i
], KM_USER0
);
4366 char *dst
= obj_priv
->phys_obj
->handle
->vaddr
+ (i
* PAGE_SIZE
);
4368 memcpy(dst
, src
, PAGE_SIZE
);
4369 kunmap_atomic(src
, KM_USER0
);
4378 i915_gem_phys_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
4379 struct drm_i915_gem_pwrite
*args
,
4380 struct drm_file
*file_priv
)
4382 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
4385 char __user
*user_data
;
4387 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
4388 obj_addr
= obj_priv
->phys_obj
->handle
->vaddr
+ args
->offset
;
4390 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr
, args
->size
);
4391 ret
= copy_from_user(obj_addr
, user_data
, args
->size
);
4395 drm_agp_chipset_flush(dev
);
4399 void i915_gem_release(struct drm_device
* dev
, struct drm_file
*file_priv
)
4401 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
4403 /* Clean up our request list when the client is going away, so that
4404 * later retire_requests won't dereference our soon-to-be-gone
4407 mutex_lock(&dev
->struct_mutex
);
4408 while (!list_empty(&i915_file_priv
->mm
.request_list
))
4409 list_del_init(i915_file_priv
->mm
.request_list
.next
);
4410 mutex_unlock(&dev
->struct_mutex
);