2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
33 #include <linux/pci.h>
35 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
37 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
);
38 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
);
39 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
);
40 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
,
42 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
45 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
);
46 static int i915_gem_object_get_pages(struct drm_gem_object
*obj
);
47 static void i915_gem_object_put_pages(struct drm_gem_object
*obj
);
48 static int i915_gem_object_wait_rendering(struct drm_gem_object
*obj
);
49 static int i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
,
51 static int i915_gem_object_get_fence_reg(struct drm_gem_object
*obj
, bool write
);
52 static void i915_gem_clear_fence_reg(struct drm_gem_object
*obj
);
53 static int i915_gem_evict_something(struct drm_device
*dev
);
54 static int i915_gem_phys_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
55 struct drm_i915_gem_pwrite
*args
,
56 struct drm_file
*file_priv
);
58 int i915_gem_do_init(struct drm_device
*dev
, unsigned long start
,
61 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
64 (start
& (PAGE_SIZE
- 1)) != 0 ||
65 (end
& (PAGE_SIZE
- 1)) != 0) {
69 drm_mm_init(&dev_priv
->mm
.gtt_space
, start
,
72 dev
->gtt_total
= (uint32_t) (end
- start
);
78 i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
79 struct drm_file
*file_priv
)
81 struct drm_i915_gem_init
*args
= data
;
84 mutex_lock(&dev
->struct_mutex
);
85 ret
= i915_gem_do_init(dev
, args
->gtt_start
, args
->gtt_end
);
86 mutex_unlock(&dev
->struct_mutex
);
92 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
93 struct drm_file
*file_priv
)
95 struct drm_i915_gem_get_aperture
*args
= data
;
97 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
100 args
->aper_size
= dev
->gtt_total
;
101 args
->aper_available_size
= (args
->aper_size
-
102 atomic_read(&dev
->pin_memory
));
109 * Creates a new mm object and returns a handle to it.
112 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
113 struct drm_file
*file_priv
)
115 struct drm_i915_gem_create
*args
= data
;
116 struct drm_gem_object
*obj
;
119 args
->size
= roundup(args
->size
, PAGE_SIZE
);
121 /* Allocate the new object */
122 obj
= drm_gem_object_alloc(dev
, args
->size
);
126 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
127 mutex_lock(&dev
->struct_mutex
);
128 drm_gem_object_handle_unreference(obj
);
129 mutex_unlock(&dev
->struct_mutex
);
134 args
->handle
= handle
;
140 fast_shmem_read(struct page
**pages
,
141 loff_t page_base
, int page_offset
,
148 vaddr
= kmap_atomic(pages
[page_base
>> PAGE_SHIFT
], KM_USER0
);
151 ret
= __copy_to_user_inatomic(data
, vaddr
+ page_offset
, length
);
152 kunmap_atomic(vaddr
, KM_USER0
);
158 slow_shmem_copy(struct page
*dst_page
,
160 struct page
*src_page
,
164 char *dst_vaddr
, *src_vaddr
;
166 dst_vaddr
= kmap_atomic(dst_page
, KM_USER0
);
167 if (dst_vaddr
== NULL
)
170 src_vaddr
= kmap_atomic(src_page
, KM_USER1
);
171 if (src_vaddr
== NULL
) {
172 kunmap_atomic(dst_vaddr
, KM_USER0
);
176 memcpy(dst_vaddr
+ dst_offset
, src_vaddr
+ src_offset
, length
);
178 kunmap_atomic(src_vaddr
, KM_USER1
);
179 kunmap_atomic(dst_vaddr
, KM_USER0
);
185 * This is the fast shmem pread path, which attempts to copy_from_user directly
186 * from the backing pages of the object to the user's address space. On a
187 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
190 i915_gem_shmem_pread_fast(struct drm_device
*dev
, struct drm_gem_object
*obj
,
191 struct drm_i915_gem_pread
*args
,
192 struct drm_file
*file_priv
)
194 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
196 loff_t offset
, page_base
;
197 char __user
*user_data
;
198 int page_offset
, page_length
;
201 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
204 mutex_lock(&dev
->struct_mutex
);
206 ret
= i915_gem_object_get_pages(obj
);
210 ret
= i915_gem_object_set_cpu_read_domain_range(obj
, args
->offset
,
215 obj_priv
= obj
->driver_private
;
216 offset
= args
->offset
;
219 /* Operation in this page
221 * page_base = page offset within aperture
222 * page_offset = offset within page
223 * page_length = bytes to copy for this page
225 page_base
= (offset
& ~(PAGE_SIZE
-1));
226 page_offset
= offset
& (PAGE_SIZE
-1);
227 page_length
= remain
;
228 if ((page_offset
+ remain
) > PAGE_SIZE
)
229 page_length
= PAGE_SIZE
- page_offset
;
231 ret
= fast_shmem_read(obj_priv
->pages
,
232 page_base
, page_offset
,
233 user_data
, page_length
);
237 remain
-= page_length
;
238 user_data
+= page_length
;
239 offset
+= page_length
;
243 i915_gem_object_put_pages(obj
);
245 mutex_unlock(&dev
->struct_mutex
);
251 * This is the fallback shmem pread path, which allocates temporary storage
252 * in kernel space to copy_to_user into outside of the struct_mutex, so we
253 * can copy out of the object's backing pages while holding the struct mutex
254 * and not take page faults.
257 i915_gem_shmem_pread_slow(struct drm_device
*dev
, struct drm_gem_object
*obj
,
258 struct drm_i915_gem_pread
*args
,
259 struct drm_file
*file_priv
)
261 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
262 struct mm_struct
*mm
= current
->mm
;
263 struct page
**user_pages
;
265 loff_t offset
, pinned_pages
, i
;
266 loff_t first_data_page
, last_data_page
, num_pages
;
267 int shmem_page_index
, shmem_page_offset
;
268 int data_page_index
, data_page_offset
;
271 uint64_t data_ptr
= args
->data_ptr
;
275 /* Pin the user pages containing the data. We can't fault while
276 * holding the struct mutex, yet we want to hold it while
277 * dereferencing the user data.
279 first_data_page
= data_ptr
/ PAGE_SIZE
;
280 last_data_page
= (data_ptr
+ args
->size
- 1) / PAGE_SIZE
;
281 num_pages
= last_data_page
- first_data_page
+ 1;
283 user_pages
= kcalloc(num_pages
, sizeof(struct page
*), GFP_KERNEL
);
284 if (user_pages
== NULL
)
287 down_read(&mm
->mmap_sem
);
288 pinned_pages
= get_user_pages(current
, mm
, (uintptr_t)args
->data_ptr
,
289 num_pages
, 0, 0, user_pages
, NULL
);
290 up_read(&mm
->mmap_sem
);
291 if (pinned_pages
< num_pages
) {
293 goto fail_put_user_pages
;
296 mutex_lock(&dev
->struct_mutex
);
298 ret
= i915_gem_object_get_pages(obj
);
302 ret
= i915_gem_object_set_cpu_read_domain_range(obj
, args
->offset
,
307 obj_priv
= obj
->driver_private
;
308 offset
= args
->offset
;
311 /* Operation in this page
313 * shmem_page_index = page number within shmem file
314 * shmem_page_offset = offset within page in shmem file
315 * data_page_index = page number in get_user_pages return
316 * data_page_offset = offset with data_page_index page.
317 * page_length = bytes to copy for this page
319 shmem_page_index
= offset
/ PAGE_SIZE
;
320 shmem_page_offset
= offset
& ~PAGE_MASK
;
321 data_page_index
= data_ptr
/ PAGE_SIZE
- first_data_page
;
322 data_page_offset
= data_ptr
& ~PAGE_MASK
;
324 page_length
= remain
;
325 if ((shmem_page_offset
+ page_length
) > PAGE_SIZE
)
326 page_length
= PAGE_SIZE
- shmem_page_offset
;
327 if ((data_page_offset
+ page_length
) > PAGE_SIZE
)
328 page_length
= PAGE_SIZE
- data_page_offset
;
330 ret
= slow_shmem_copy(user_pages
[data_page_index
],
332 obj_priv
->pages
[shmem_page_index
],
338 remain
-= page_length
;
339 data_ptr
+= page_length
;
340 offset
+= page_length
;
344 i915_gem_object_put_pages(obj
);
346 mutex_unlock(&dev
->struct_mutex
);
348 for (i
= 0; i
< pinned_pages
; i
++) {
349 SetPageDirty(user_pages
[i
]);
350 page_cache_release(user_pages
[i
]);
358 * Reads data from the object referenced by handle.
360 * On error, the contents of *data are undefined.
363 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
364 struct drm_file
*file_priv
)
366 struct drm_i915_gem_pread
*args
= data
;
367 struct drm_gem_object
*obj
;
368 struct drm_i915_gem_object
*obj_priv
;
371 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
374 obj_priv
= obj
->driver_private
;
376 /* Bounds check source.
378 * XXX: This could use review for overflow issues...
380 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
381 args
->offset
+ args
->size
> obj
->size
) {
382 drm_gem_object_unreference(obj
);
386 ret
= i915_gem_shmem_pread_fast(dev
, obj
, args
, file_priv
);
388 ret
= i915_gem_shmem_pread_slow(dev
, obj
, args
, file_priv
);
390 drm_gem_object_unreference(obj
);
395 /* This is the fast write path which cannot handle
396 * page faults in the source data
400 fast_user_write(struct io_mapping
*mapping
,
401 loff_t page_base
, int page_offset
,
402 char __user
*user_data
,
406 unsigned long unwritten
;
408 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
);
409 unwritten
= __copy_from_user_inatomic_nocache(vaddr_atomic
+ page_offset
,
411 io_mapping_unmap_atomic(vaddr_atomic
);
417 /* Here's the write path which can sleep for
422 slow_kernel_write(struct io_mapping
*mapping
,
423 loff_t gtt_base
, int gtt_offset
,
424 struct page
*user_page
, int user_offset
,
427 char *src_vaddr
, *dst_vaddr
;
428 unsigned long unwritten
;
430 dst_vaddr
= io_mapping_map_atomic_wc(mapping
, gtt_base
);
431 src_vaddr
= kmap_atomic(user_page
, KM_USER1
);
432 unwritten
= __copy_from_user_inatomic_nocache(dst_vaddr
+ gtt_offset
,
433 src_vaddr
+ user_offset
,
435 kunmap_atomic(src_vaddr
, KM_USER1
);
436 io_mapping_unmap_atomic(dst_vaddr
);
443 fast_shmem_write(struct page
**pages
,
444 loff_t page_base
, int page_offset
,
450 vaddr
= kmap_atomic(pages
[page_base
>> PAGE_SHIFT
], KM_USER0
);
453 __copy_from_user_inatomic(vaddr
+ page_offset
, data
, length
);
454 kunmap_atomic(vaddr
, KM_USER0
);
460 * This is the fast pwrite path, where we copy the data directly from the
461 * user into the GTT, uncached.
464 i915_gem_gtt_pwrite_fast(struct drm_device
*dev
, struct drm_gem_object
*obj
,
465 struct drm_i915_gem_pwrite
*args
,
466 struct drm_file
*file_priv
)
468 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
469 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
471 loff_t offset
, page_base
;
472 char __user
*user_data
;
473 int page_offset
, page_length
;
476 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
478 if (!access_ok(VERIFY_READ
, user_data
, remain
))
482 mutex_lock(&dev
->struct_mutex
);
483 ret
= i915_gem_object_pin(obj
, 0);
485 mutex_unlock(&dev
->struct_mutex
);
488 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
492 obj_priv
= obj
->driver_private
;
493 offset
= obj_priv
->gtt_offset
+ args
->offset
;
496 /* Operation in this page
498 * page_base = page offset within aperture
499 * page_offset = offset within page
500 * page_length = bytes to copy for this page
502 page_base
= (offset
& ~(PAGE_SIZE
-1));
503 page_offset
= offset
& (PAGE_SIZE
-1);
504 page_length
= remain
;
505 if ((page_offset
+ remain
) > PAGE_SIZE
)
506 page_length
= PAGE_SIZE
- page_offset
;
508 ret
= fast_user_write (dev_priv
->mm
.gtt_mapping
, page_base
,
509 page_offset
, user_data
, page_length
);
511 /* If we get a fault while copying data, then (presumably) our
512 * source page isn't available. Return the error and we'll
513 * retry in the slow path.
518 remain
-= page_length
;
519 user_data
+= page_length
;
520 offset
+= page_length
;
524 i915_gem_object_unpin(obj
);
525 mutex_unlock(&dev
->struct_mutex
);
531 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
532 * the memory and maps it using kmap_atomic for copying.
534 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
535 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
538 i915_gem_gtt_pwrite_slow(struct drm_device
*dev
, struct drm_gem_object
*obj
,
539 struct drm_i915_gem_pwrite
*args
,
540 struct drm_file
*file_priv
)
542 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
543 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
545 loff_t gtt_page_base
, offset
;
546 loff_t first_data_page
, last_data_page
, num_pages
;
547 loff_t pinned_pages
, i
;
548 struct page
**user_pages
;
549 struct mm_struct
*mm
= current
->mm
;
550 int gtt_page_offset
, data_page_offset
, data_page_index
, page_length
;
552 uint64_t data_ptr
= args
->data_ptr
;
556 /* Pin the user pages containing the data. We can't fault while
557 * holding the struct mutex, and all of the pwrite implementations
558 * want to hold it while dereferencing the user data.
560 first_data_page
= data_ptr
/ PAGE_SIZE
;
561 last_data_page
= (data_ptr
+ args
->size
- 1) / PAGE_SIZE
;
562 num_pages
= last_data_page
- first_data_page
+ 1;
564 user_pages
= kcalloc(num_pages
, sizeof(struct page
*), GFP_KERNEL
);
565 if (user_pages
== NULL
)
568 down_read(&mm
->mmap_sem
);
569 pinned_pages
= get_user_pages(current
, mm
, (uintptr_t)args
->data_ptr
,
570 num_pages
, 0, 0, user_pages
, NULL
);
571 up_read(&mm
->mmap_sem
);
572 if (pinned_pages
< num_pages
) {
574 goto out_unpin_pages
;
577 mutex_lock(&dev
->struct_mutex
);
578 ret
= i915_gem_object_pin(obj
, 0);
582 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
584 goto out_unpin_object
;
586 obj_priv
= obj
->driver_private
;
587 offset
= obj_priv
->gtt_offset
+ args
->offset
;
590 /* Operation in this page
592 * gtt_page_base = page offset within aperture
593 * gtt_page_offset = offset within page in aperture
594 * data_page_index = page number in get_user_pages return
595 * data_page_offset = offset with data_page_index page.
596 * page_length = bytes to copy for this page
598 gtt_page_base
= offset
& PAGE_MASK
;
599 gtt_page_offset
= offset
& ~PAGE_MASK
;
600 data_page_index
= data_ptr
/ PAGE_SIZE
- first_data_page
;
601 data_page_offset
= data_ptr
& ~PAGE_MASK
;
603 page_length
= remain
;
604 if ((gtt_page_offset
+ page_length
) > PAGE_SIZE
)
605 page_length
= PAGE_SIZE
- gtt_page_offset
;
606 if ((data_page_offset
+ page_length
) > PAGE_SIZE
)
607 page_length
= PAGE_SIZE
- data_page_offset
;
609 ret
= slow_kernel_write(dev_priv
->mm
.gtt_mapping
,
610 gtt_page_base
, gtt_page_offset
,
611 user_pages
[data_page_index
],
615 /* If we get a fault while copying data, then (presumably) our
616 * source page isn't available. Return the error and we'll
617 * retry in the slow path.
620 goto out_unpin_object
;
622 remain
-= page_length
;
623 offset
+= page_length
;
624 data_ptr
+= page_length
;
628 i915_gem_object_unpin(obj
);
630 mutex_unlock(&dev
->struct_mutex
);
632 for (i
= 0; i
< pinned_pages
; i
++)
633 page_cache_release(user_pages
[i
]);
640 * This is the fast shmem pwrite path, which attempts to directly
641 * copy_from_user into the kmapped pages backing the object.
644 i915_gem_shmem_pwrite_fast(struct drm_device
*dev
, struct drm_gem_object
*obj
,
645 struct drm_i915_gem_pwrite
*args
,
646 struct drm_file
*file_priv
)
648 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
650 loff_t offset
, page_base
;
651 char __user
*user_data
;
652 int page_offset
, page_length
;
655 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
658 mutex_lock(&dev
->struct_mutex
);
660 ret
= i915_gem_object_get_pages(obj
);
664 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
668 obj_priv
= obj
->driver_private
;
669 offset
= args
->offset
;
673 /* Operation in this page
675 * page_base = page offset within aperture
676 * page_offset = offset within page
677 * page_length = bytes to copy for this page
679 page_base
= (offset
& ~(PAGE_SIZE
-1));
680 page_offset
= offset
& (PAGE_SIZE
-1);
681 page_length
= remain
;
682 if ((page_offset
+ remain
) > PAGE_SIZE
)
683 page_length
= PAGE_SIZE
- page_offset
;
685 ret
= fast_shmem_write(obj_priv
->pages
,
686 page_base
, page_offset
,
687 user_data
, page_length
);
691 remain
-= page_length
;
692 user_data
+= page_length
;
693 offset
+= page_length
;
697 i915_gem_object_put_pages(obj
);
699 mutex_unlock(&dev
->struct_mutex
);
705 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
706 * the memory and maps it using kmap_atomic for copying.
708 * This avoids taking mmap_sem for faulting on the user's address while the
709 * struct_mutex is held.
712 i915_gem_shmem_pwrite_slow(struct drm_device
*dev
, struct drm_gem_object
*obj
,
713 struct drm_i915_gem_pwrite
*args
,
714 struct drm_file
*file_priv
)
716 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
717 struct mm_struct
*mm
= current
->mm
;
718 struct page
**user_pages
;
720 loff_t offset
, pinned_pages
, i
;
721 loff_t first_data_page
, last_data_page
, num_pages
;
722 int shmem_page_index
, shmem_page_offset
;
723 int data_page_index
, data_page_offset
;
726 uint64_t data_ptr
= args
->data_ptr
;
730 /* Pin the user pages containing the data. We can't fault while
731 * holding the struct mutex, and all of the pwrite implementations
732 * want to hold it while dereferencing the user data.
734 first_data_page
= data_ptr
/ PAGE_SIZE
;
735 last_data_page
= (data_ptr
+ args
->size
- 1) / PAGE_SIZE
;
736 num_pages
= last_data_page
- first_data_page
+ 1;
738 user_pages
= kcalloc(num_pages
, sizeof(struct page
*), GFP_KERNEL
);
739 if (user_pages
== NULL
)
742 down_read(&mm
->mmap_sem
);
743 pinned_pages
= get_user_pages(current
, mm
, (uintptr_t)args
->data_ptr
,
744 num_pages
, 0, 0, user_pages
, NULL
);
745 up_read(&mm
->mmap_sem
);
746 if (pinned_pages
< num_pages
) {
748 goto fail_put_user_pages
;
751 mutex_lock(&dev
->struct_mutex
);
753 ret
= i915_gem_object_get_pages(obj
);
757 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
761 obj_priv
= obj
->driver_private
;
762 offset
= args
->offset
;
766 /* Operation in this page
768 * shmem_page_index = page number within shmem file
769 * shmem_page_offset = offset within page in shmem file
770 * data_page_index = page number in get_user_pages return
771 * data_page_offset = offset with data_page_index page.
772 * page_length = bytes to copy for this page
774 shmem_page_index
= offset
/ PAGE_SIZE
;
775 shmem_page_offset
= offset
& ~PAGE_MASK
;
776 data_page_index
= data_ptr
/ PAGE_SIZE
- first_data_page
;
777 data_page_offset
= data_ptr
& ~PAGE_MASK
;
779 page_length
= remain
;
780 if ((shmem_page_offset
+ page_length
) > PAGE_SIZE
)
781 page_length
= PAGE_SIZE
- shmem_page_offset
;
782 if ((data_page_offset
+ page_length
) > PAGE_SIZE
)
783 page_length
= PAGE_SIZE
- data_page_offset
;
785 ret
= slow_shmem_copy(obj_priv
->pages
[shmem_page_index
],
787 user_pages
[data_page_index
],
793 remain
-= page_length
;
794 data_ptr
+= page_length
;
795 offset
+= page_length
;
799 i915_gem_object_put_pages(obj
);
801 mutex_unlock(&dev
->struct_mutex
);
803 for (i
= 0; i
< pinned_pages
; i
++)
804 page_cache_release(user_pages
[i
]);
811 * Writes data to the object referenced by handle.
813 * On error, the contents of the buffer that were to be modified are undefined.
816 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
817 struct drm_file
*file_priv
)
819 struct drm_i915_gem_pwrite
*args
= data
;
820 struct drm_gem_object
*obj
;
821 struct drm_i915_gem_object
*obj_priv
;
824 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
827 obj_priv
= obj
->driver_private
;
829 /* Bounds check destination.
831 * XXX: This could use review for overflow issues...
833 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
||
834 args
->offset
+ args
->size
> obj
->size
) {
835 drm_gem_object_unreference(obj
);
839 /* We can only do the GTT pwrite on untiled buffers, as otherwise
840 * it would end up going through the fenced access, and we'll get
841 * different detiling behavior between reading and writing.
842 * pread/pwrite currently are reading and writing from the CPU
843 * perspective, requiring manual detiling by the client.
845 if (obj_priv
->phys_obj
)
846 ret
= i915_gem_phys_pwrite(dev
, obj
, args
, file_priv
);
847 else if (obj_priv
->tiling_mode
== I915_TILING_NONE
&&
848 dev
->gtt_total
!= 0) {
849 ret
= i915_gem_gtt_pwrite_fast(dev
, obj
, args
, file_priv
);
850 if (ret
== -EFAULT
) {
851 ret
= i915_gem_gtt_pwrite_slow(dev
, obj
, args
,
855 ret
= i915_gem_shmem_pwrite_fast(dev
, obj
, args
, file_priv
);
856 if (ret
== -EFAULT
) {
857 ret
= i915_gem_shmem_pwrite_slow(dev
, obj
, args
,
864 DRM_INFO("pwrite failed %d\n", ret
);
867 drm_gem_object_unreference(obj
);
873 * Called when user space prepares to use an object with the CPU, either
874 * through the mmap ioctl's mapping or a GTT mapping.
877 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
878 struct drm_file
*file_priv
)
880 struct drm_i915_gem_set_domain
*args
= data
;
881 struct drm_gem_object
*obj
;
882 uint32_t read_domains
= args
->read_domains
;
883 uint32_t write_domain
= args
->write_domain
;
886 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
889 /* Only handle setting domains to types used by the CPU. */
890 if (write_domain
& ~(I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
))
893 if (read_domains
& ~(I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
))
896 /* Having something in the write domain implies it's in the read
897 * domain, and only that read domain. Enforce that in the request.
899 if (write_domain
!= 0 && read_domains
!= write_domain
)
902 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
906 mutex_lock(&dev
->struct_mutex
);
908 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
909 obj
, obj
->size
, read_domains
, write_domain
);
911 if (read_domains
& I915_GEM_DOMAIN_GTT
) {
912 ret
= i915_gem_object_set_to_gtt_domain(obj
, write_domain
!= 0);
914 /* Silently promote "you're not bound, there was nothing to do"
915 * to success, since the client was just asking us to
916 * make sure everything was done.
921 ret
= i915_gem_object_set_to_cpu_domain(obj
, write_domain
!= 0);
924 drm_gem_object_unreference(obj
);
925 mutex_unlock(&dev
->struct_mutex
);
930 * Called when user space has done writes to this buffer
933 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
934 struct drm_file
*file_priv
)
936 struct drm_i915_gem_sw_finish
*args
= data
;
937 struct drm_gem_object
*obj
;
938 struct drm_i915_gem_object
*obj_priv
;
941 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
944 mutex_lock(&dev
->struct_mutex
);
945 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
947 mutex_unlock(&dev
->struct_mutex
);
952 DRM_INFO("%s: sw_finish %d (%p %d)\n",
953 __func__
, args
->handle
, obj
, obj
->size
);
955 obj_priv
= obj
->driver_private
;
957 /* Pinned buffers may be scanout, so flush the cache */
958 if (obj_priv
->pin_count
)
959 i915_gem_object_flush_cpu_write_domain(obj
);
961 drm_gem_object_unreference(obj
);
962 mutex_unlock(&dev
->struct_mutex
);
967 * Maps the contents of an object, returning the address it is mapped
970 * While the mapping holds a reference on the contents of the object, it doesn't
971 * imply a ref on the object itself.
974 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
975 struct drm_file
*file_priv
)
977 struct drm_i915_gem_mmap
*args
= data
;
978 struct drm_gem_object
*obj
;
982 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
985 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
989 offset
= args
->offset
;
991 down_write(¤t
->mm
->mmap_sem
);
992 addr
= do_mmap(obj
->filp
, 0, args
->size
,
993 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
995 up_write(¤t
->mm
->mmap_sem
);
996 mutex_lock(&dev
->struct_mutex
);
997 drm_gem_object_unreference(obj
);
998 mutex_unlock(&dev
->struct_mutex
);
999 if (IS_ERR((void *)addr
))
1002 args
->addr_ptr
= (uint64_t) addr
;
1008 * i915_gem_fault - fault a page into the GTT
1009 * vma: VMA in question
1012 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1013 * from userspace. The fault handler takes care of binding the object to
1014 * the GTT (if needed), allocating and programming a fence register (again,
1015 * only if needed based on whether the old reg is still valid or the object
1016 * is tiled) and inserting a new PTE into the faulting process.
1018 * Note that the faulting process may involve evicting existing objects
1019 * from the GTT and/or fence registers to make room. So performance may
1020 * suffer if the GTT working set is large or there are few fence registers
1023 int i915_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1025 struct drm_gem_object
*obj
= vma
->vm_private_data
;
1026 struct drm_device
*dev
= obj
->dev
;
1027 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1028 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1029 pgoff_t page_offset
;
1032 bool write
= !!(vmf
->flags
& FAULT_FLAG_WRITE
);
1034 /* We don't use vmf->pgoff since that has the fake offset */
1035 page_offset
= ((unsigned long)vmf
->virtual_address
- vma
->vm_start
) >>
1038 /* Now bind it into the GTT if needed */
1039 mutex_lock(&dev
->struct_mutex
);
1040 if (!obj_priv
->gtt_space
) {
1041 ret
= i915_gem_object_bind_to_gtt(obj
, obj_priv
->gtt_alignment
);
1043 mutex_unlock(&dev
->struct_mutex
);
1044 return VM_FAULT_SIGBUS
;
1046 list_add(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
1049 /* Need a new fence register? */
1050 if (obj_priv
->fence_reg
== I915_FENCE_REG_NONE
&&
1051 obj_priv
->tiling_mode
!= I915_TILING_NONE
) {
1052 ret
= i915_gem_object_get_fence_reg(obj
, write
);
1054 mutex_unlock(&dev
->struct_mutex
);
1055 return VM_FAULT_SIGBUS
;
1059 pfn
= ((dev
->agp
->base
+ obj_priv
->gtt_offset
) >> PAGE_SHIFT
) +
1062 /* Finally, remap it using the new GTT offset */
1063 ret
= vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
1065 mutex_unlock(&dev
->struct_mutex
);
1070 return VM_FAULT_OOM
;
1072 return VM_FAULT_SIGBUS
;
1074 return VM_FAULT_NOPAGE
;
1079 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1080 * @obj: obj in question
1082 * GEM memory mapping works by handing back to userspace a fake mmap offset
1083 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1084 * up the object based on the offset and sets up the various memory mapping
1087 * This routine allocates and attaches a fake offset for @obj.
1090 i915_gem_create_mmap_offset(struct drm_gem_object
*obj
)
1092 struct drm_device
*dev
= obj
->dev
;
1093 struct drm_gem_mm
*mm
= dev
->mm_private
;
1094 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1095 struct drm_map_list
*list
;
1096 struct drm_local_map
*map
;
1099 /* Set the object up for mmap'ing */
1100 list
= &obj
->map_list
;
1101 list
->map
= drm_calloc(1, sizeof(struct drm_map_list
),
1107 map
->type
= _DRM_GEM
;
1108 map
->size
= obj
->size
;
1111 /* Get a DRM GEM mmap offset allocated... */
1112 list
->file_offset_node
= drm_mm_search_free(&mm
->offset_manager
,
1113 obj
->size
/ PAGE_SIZE
, 0, 0);
1114 if (!list
->file_offset_node
) {
1115 DRM_ERROR("failed to allocate offset for bo %d\n", obj
->name
);
1120 list
->file_offset_node
= drm_mm_get_block(list
->file_offset_node
,
1121 obj
->size
/ PAGE_SIZE
, 0);
1122 if (!list
->file_offset_node
) {
1127 list
->hash
.key
= list
->file_offset_node
->start
;
1128 if (drm_ht_insert_item(&mm
->offset_hash
, &list
->hash
)) {
1129 DRM_ERROR("failed to add to map hash\n");
1133 /* By now we should be all set, any drm_mmap request on the offset
1134 * below will get to our mmap & fault handler */
1135 obj_priv
->mmap_offset
= ((uint64_t) list
->hash
.key
) << PAGE_SHIFT
;
1140 drm_mm_put_block(list
->file_offset_node
);
1142 drm_free(list
->map
, sizeof(struct drm_map_list
), DRM_MEM_DRIVER
);
1148 i915_gem_free_mmap_offset(struct drm_gem_object
*obj
)
1150 struct drm_device
*dev
= obj
->dev
;
1151 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1152 struct drm_gem_mm
*mm
= dev
->mm_private
;
1153 struct drm_map_list
*list
;
1155 list
= &obj
->map_list
;
1156 drm_ht_remove_item(&mm
->offset_hash
, &list
->hash
);
1158 if (list
->file_offset_node
) {
1159 drm_mm_put_block(list
->file_offset_node
);
1160 list
->file_offset_node
= NULL
;
1164 drm_free(list
->map
, sizeof(struct drm_map
), DRM_MEM_DRIVER
);
1168 obj_priv
->mmap_offset
= 0;
1172 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1173 * @obj: object to check
1175 * Return the required GTT alignment for an object, taking into account
1176 * potential fence register mapping if needed.
1179 i915_gem_get_gtt_alignment(struct drm_gem_object
*obj
)
1181 struct drm_device
*dev
= obj
->dev
;
1182 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1186 * Minimum alignment is 4k (GTT page size), but might be greater
1187 * if a fence register is needed for the object.
1189 if (IS_I965G(dev
) || obj_priv
->tiling_mode
== I915_TILING_NONE
)
1193 * Previous chips need to be aligned to the size of the smallest
1194 * fence register that can contain the object.
1201 for (i
= start
; i
< obj
->size
; i
<<= 1)
1208 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1210 * @data: GTT mapping ioctl data
1211 * @file_priv: GEM object info
1213 * Simply returns the fake offset to userspace so it can mmap it.
1214 * The mmap call will end up in drm_gem_mmap(), which will set things
1215 * up so we can get faults in the handler above.
1217 * The fault handler will take care of binding the object into the GTT
1218 * (since it may have been evicted to make room for something), allocating
1219 * a fence register, and mapping the appropriate aperture address into
1223 i915_gem_mmap_gtt_ioctl(struct drm_device
*dev
, void *data
,
1224 struct drm_file
*file_priv
)
1226 struct drm_i915_gem_mmap_gtt
*args
= data
;
1227 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1228 struct drm_gem_object
*obj
;
1229 struct drm_i915_gem_object
*obj_priv
;
1232 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
1235 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
1239 mutex_lock(&dev
->struct_mutex
);
1241 obj_priv
= obj
->driver_private
;
1243 if (!obj_priv
->mmap_offset
) {
1244 ret
= i915_gem_create_mmap_offset(obj
);
1246 drm_gem_object_unreference(obj
);
1247 mutex_unlock(&dev
->struct_mutex
);
1252 args
->offset
= obj_priv
->mmap_offset
;
1254 obj_priv
->gtt_alignment
= i915_gem_get_gtt_alignment(obj
);
1256 /* Make sure the alignment is correct for fence regs etc */
1257 if (obj_priv
->agp_mem
&&
1258 (obj_priv
->gtt_offset
& (obj_priv
->gtt_alignment
- 1))) {
1259 drm_gem_object_unreference(obj
);
1260 mutex_unlock(&dev
->struct_mutex
);
1265 * Pull it into the GTT so that we have a page list (makes the
1266 * initial fault faster and any subsequent flushing possible).
1268 if (!obj_priv
->agp_mem
) {
1269 ret
= i915_gem_object_bind_to_gtt(obj
, obj_priv
->gtt_alignment
);
1271 drm_gem_object_unreference(obj
);
1272 mutex_unlock(&dev
->struct_mutex
);
1275 list_add(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
1278 drm_gem_object_unreference(obj
);
1279 mutex_unlock(&dev
->struct_mutex
);
1285 i915_gem_object_put_pages(struct drm_gem_object
*obj
)
1287 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1288 int page_count
= obj
->size
/ PAGE_SIZE
;
1291 BUG_ON(obj_priv
->pages_refcount
== 0);
1293 if (--obj_priv
->pages_refcount
!= 0)
1296 for (i
= 0; i
< page_count
; i
++)
1297 if (obj_priv
->pages
[i
] != NULL
) {
1298 if (obj_priv
->dirty
)
1299 set_page_dirty(obj_priv
->pages
[i
]);
1300 mark_page_accessed(obj_priv
->pages
[i
]);
1301 page_cache_release(obj_priv
->pages
[i
]);
1303 obj_priv
->dirty
= 0;
1305 drm_free(obj_priv
->pages
,
1306 page_count
* sizeof(struct page
*),
1308 obj_priv
->pages
= NULL
;
1312 i915_gem_object_move_to_active(struct drm_gem_object
*obj
, uint32_t seqno
)
1314 struct drm_device
*dev
= obj
->dev
;
1315 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1316 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1318 /* Add a reference if we're newly entering the active list. */
1319 if (!obj_priv
->active
) {
1320 drm_gem_object_reference(obj
);
1321 obj_priv
->active
= 1;
1323 /* Move from whatever list we were on to the tail of execution. */
1324 list_move_tail(&obj_priv
->list
,
1325 &dev_priv
->mm
.active_list
);
1326 obj_priv
->last_rendering_seqno
= seqno
;
1330 i915_gem_object_move_to_flushing(struct drm_gem_object
*obj
)
1332 struct drm_device
*dev
= obj
->dev
;
1333 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1334 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1336 BUG_ON(!obj_priv
->active
);
1337 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.flushing_list
);
1338 obj_priv
->last_rendering_seqno
= 0;
1342 i915_gem_object_move_to_inactive(struct drm_gem_object
*obj
)
1344 struct drm_device
*dev
= obj
->dev
;
1345 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1346 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1348 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1349 if (obj_priv
->pin_count
!= 0)
1350 list_del_init(&obj_priv
->list
);
1352 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
1354 obj_priv
->last_rendering_seqno
= 0;
1355 if (obj_priv
->active
) {
1356 obj_priv
->active
= 0;
1357 drm_gem_object_unreference(obj
);
1359 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1363 * Creates a new sequence number, emitting a write of it to the status page
1364 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1366 * Must be called with struct_lock held.
1368 * Returned sequence numbers are nonzero on success.
1371 i915_add_request(struct drm_device
*dev
, uint32_t flush_domains
)
1373 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1374 struct drm_i915_gem_request
*request
;
1379 request
= drm_calloc(1, sizeof(*request
), DRM_MEM_DRIVER
);
1380 if (request
== NULL
)
1383 /* Grab the seqno we're going to make this request be, and bump the
1384 * next (skipping 0 so it can be the reserved no-seqno value).
1386 seqno
= dev_priv
->mm
.next_gem_seqno
;
1387 dev_priv
->mm
.next_gem_seqno
++;
1388 if (dev_priv
->mm
.next_gem_seqno
== 0)
1389 dev_priv
->mm
.next_gem_seqno
++;
1392 OUT_RING(MI_STORE_DWORD_INDEX
);
1393 OUT_RING(I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
1396 OUT_RING(MI_USER_INTERRUPT
);
1399 DRM_DEBUG("%d\n", seqno
);
1401 request
->seqno
= seqno
;
1402 request
->emitted_jiffies
= jiffies
;
1403 was_empty
= list_empty(&dev_priv
->mm
.request_list
);
1404 list_add_tail(&request
->list
, &dev_priv
->mm
.request_list
);
1406 /* Associate any objects on the flushing list matching the write
1407 * domain we're flushing with our flush.
1409 if (flush_domains
!= 0) {
1410 struct drm_i915_gem_object
*obj_priv
, *next
;
1412 list_for_each_entry_safe(obj_priv
, next
,
1413 &dev_priv
->mm
.flushing_list
, list
) {
1414 struct drm_gem_object
*obj
= obj_priv
->obj
;
1416 if ((obj
->write_domain
& flush_domains
) ==
1417 obj
->write_domain
) {
1418 obj
->write_domain
= 0;
1419 i915_gem_object_move_to_active(obj
, seqno
);
1425 if (was_empty
&& !dev_priv
->mm
.suspended
)
1426 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
1431 * Command execution barrier
1433 * Ensures that all commands in the ring are finished
1434 * before signalling the CPU
1437 i915_retire_commands(struct drm_device
*dev
)
1439 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1440 uint32_t cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
1441 uint32_t flush_domains
= 0;
1444 /* The sampler always gets flushed on i965 (sigh) */
1446 flush_domains
|= I915_GEM_DOMAIN_SAMPLER
;
1449 OUT_RING(0); /* noop */
1451 return flush_domains
;
1455 * Moves buffers associated only with the given active seqno from the active
1456 * to inactive list, potentially freeing them.
1459 i915_gem_retire_request(struct drm_device
*dev
,
1460 struct drm_i915_gem_request
*request
)
1462 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1464 /* Move any buffers on the active list that are no longer referenced
1465 * by the ringbuffer to the flushing/inactive lists as appropriate.
1467 while (!list_empty(&dev_priv
->mm
.active_list
)) {
1468 struct drm_gem_object
*obj
;
1469 struct drm_i915_gem_object
*obj_priv
;
1471 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
1472 struct drm_i915_gem_object
,
1474 obj
= obj_priv
->obj
;
1476 /* If the seqno being retired doesn't match the oldest in the
1477 * list, then the oldest in the list must still be newer than
1480 if (obj_priv
->last_rendering_seqno
!= request
->seqno
)
1484 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1485 __func__
, request
->seqno
, obj
);
1488 if (obj
->write_domain
!= 0)
1489 i915_gem_object_move_to_flushing(obj
);
1491 i915_gem_object_move_to_inactive(obj
);
1496 * Returns true if seq1 is later than seq2.
1499 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
1501 return (int32_t)(seq1
- seq2
) >= 0;
1505 i915_get_gem_seqno(struct drm_device
*dev
)
1507 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1509 return READ_HWSP(dev_priv
, I915_GEM_HWS_INDEX
);
1513 * This function clears the request list as sequence numbers are passed.
1516 i915_gem_retire_requests(struct drm_device
*dev
)
1518 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1521 if (!dev_priv
->hw_status_page
)
1524 seqno
= i915_get_gem_seqno(dev
);
1526 while (!list_empty(&dev_priv
->mm
.request_list
)) {
1527 struct drm_i915_gem_request
*request
;
1528 uint32_t retiring_seqno
;
1530 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1531 struct drm_i915_gem_request
,
1533 retiring_seqno
= request
->seqno
;
1535 if (i915_seqno_passed(seqno
, retiring_seqno
) ||
1536 dev_priv
->mm
.wedged
) {
1537 i915_gem_retire_request(dev
, request
);
1539 list_del(&request
->list
);
1540 drm_free(request
, sizeof(*request
), DRM_MEM_DRIVER
);
1547 i915_gem_retire_work_handler(struct work_struct
*work
)
1549 drm_i915_private_t
*dev_priv
;
1550 struct drm_device
*dev
;
1552 dev_priv
= container_of(work
, drm_i915_private_t
,
1553 mm
.retire_work
.work
);
1554 dev
= dev_priv
->dev
;
1556 mutex_lock(&dev
->struct_mutex
);
1557 i915_gem_retire_requests(dev
);
1558 if (!dev_priv
->mm
.suspended
&&
1559 !list_empty(&dev_priv
->mm
.request_list
))
1560 schedule_delayed_work(&dev_priv
->mm
.retire_work
, HZ
);
1561 mutex_unlock(&dev
->struct_mutex
);
1565 * Waits for a sequence number to be signaled, and cleans up the
1566 * request and object lists appropriately for that event.
1569 i915_wait_request(struct drm_device
*dev
, uint32_t seqno
)
1571 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1576 if (!i915_seqno_passed(i915_get_gem_seqno(dev
), seqno
)) {
1577 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
1578 i915_user_irq_get(dev
);
1579 ret
= wait_event_interruptible(dev_priv
->irq_queue
,
1580 i915_seqno_passed(i915_get_gem_seqno(dev
),
1582 dev_priv
->mm
.wedged
);
1583 i915_user_irq_put(dev
);
1584 dev_priv
->mm
.waiting_gem_seqno
= 0;
1586 if (dev_priv
->mm
.wedged
)
1589 if (ret
&& ret
!= -ERESTARTSYS
)
1590 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1591 __func__
, ret
, seqno
, i915_get_gem_seqno(dev
));
1593 /* Directly dispatch request retiring. While we have the work queue
1594 * to handle this, the waiter on a request often wants an associated
1595 * buffer to have made it to the inactive list, and we would need
1596 * a separate wait queue to handle that.
1599 i915_gem_retire_requests(dev
);
1605 i915_gem_flush(struct drm_device
*dev
,
1606 uint32_t invalidate_domains
,
1607 uint32_t flush_domains
)
1609 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1614 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__
,
1615 invalidate_domains
, flush_domains
);
1618 if (flush_domains
& I915_GEM_DOMAIN_CPU
)
1619 drm_agp_chipset_flush(dev
);
1621 if ((invalidate_domains
| flush_domains
) & ~(I915_GEM_DOMAIN_CPU
|
1622 I915_GEM_DOMAIN_GTT
)) {
1624 * read/write caches:
1626 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1627 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1628 * also flushed at 2d versus 3d pipeline switches.
1632 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1633 * MI_READ_FLUSH is set, and is always flushed on 965.
1635 * I915_GEM_DOMAIN_COMMAND may not exist?
1637 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1638 * invalidated when MI_EXE_FLUSH is set.
1640 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1641 * invalidated with every MI_FLUSH.
1645 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1646 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1647 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1648 * are flushed at any MI_FLUSH.
1651 cmd
= MI_FLUSH
| MI_NO_WRITE_FLUSH
;
1652 if ((invalidate_domains
|flush_domains
) &
1653 I915_GEM_DOMAIN_RENDER
)
1654 cmd
&= ~MI_NO_WRITE_FLUSH
;
1655 if (!IS_I965G(dev
)) {
1657 * On the 965, the sampler cache always gets flushed
1658 * and this bit is reserved.
1660 if (invalidate_domains
& I915_GEM_DOMAIN_SAMPLER
)
1661 cmd
|= MI_READ_FLUSH
;
1663 if (invalidate_domains
& I915_GEM_DOMAIN_INSTRUCTION
)
1664 cmd
|= MI_EXE_FLUSH
;
1667 DRM_INFO("%s: queue flush %08x to ring\n", __func__
, cmd
);
1671 OUT_RING(0); /* noop */
1677 * Ensures that all rendering to the object has completed and the object is
1678 * safe to unbind from the GTT or access from the CPU.
1681 i915_gem_object_wait_rendering(struct drm_gem_object
*obj
)
1683 struct drm_device
*dev
= obj
->dev
;
1684 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1687 /* This function only exists to support waiting for existing rendering,
1688 * not for emitting required flushes.
1690 BUG_ON((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) != 0);
1692 /* If there is rendering queued on the buffer being evicted, wait for
1695 if (obj_priv
->active
) {
1697 DRM_INFO("%s: object %p wait for seqno %08x\n",
1698 __func__
, obj
, obj_priv
->last_rendering_seqno
);
1700 ret
= i915_wait_request(dev
, obj_priv
->last_rendering_seqno
);
1709 * Unbinds an object from the GTT aperture.
1712 i915_gem_object_unbind(struct drm_gem_object
*obj
)
1714 struct drm_device
*dev
= obj
->dev
;
1715 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1720 DRM_INFO("%s:%d %p\n", __func__
, __LINE__
, obj
);
1721 DRM_INFO("gtt_space %p\n", obj_priv
->gtt_space
);
1723 if (obj_priv
->gtt_space
== NULL
)
1726 if (obj_priv
->pin_count
!= 0) {
1727 DRM_ERROR("Attempting to unbind pinned buffer\n");
1731 /* Move the object to the CPU domain to ensure that
1732 * any possible CPU writes while it's not in the GTT
1733 * are flushed when we go to remap it. This will
1734 * also ensure that all pending GPU writes are finished
1737 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
1739 if (ret
!= -ERESTARTSYS
)
1740 DRM_ERROR("set_domain failed: %d\n", ret
);
1744 if (obj_priv
->agp_mem
!= NULL
) {
1745 drm_unbind_agp(obj_priv
->agp_mem
);
1746 drm_free_agp(obj_priv
->agp_mem
, obj
->size
/ PAGE_SIZE
);
1747 obj_priv
->agp_mem
= NULL
;
1750 BUG_ON(obj_priv
->active
);
1752 /* blow away mappings if mapped through GTT */
1753 offset
= ((loff_t
) obj
->map_list
.hash
.key
) << PAGE_SHIFT
;
1754 if (dev
->dev_mapping
)
1755 unmap_mapping_range(dev
->dev_mapping
, offset
, obj
->size
, 1);
1757 if (obj_priv
->fence_reg
!= I915_FENCE_REG_NONE
)
1758 i915_gem_clear_fence_reg(obj
);
1760 i915_gem_object_put_pages(obj
);
1762 if (obj_priv
->gtt_space
) {
1763 atomic_dec(&dev
->gtt_count
);
1764 atomic_sub(obj
->size
, &dev
->gtt_memory
);
1766 drm_mm_put_block(obj_priv
->gtt_space
);
1767 obj_priv
->gtt_space
= NULL
;
1770 /* Remove ourselves from the LRU list if present. */
1771 if (!list_empty(&obj_priv
->list
))
1772 list_del_init(&obj_priv
->list
);
1778 i915_gem_evict_something(struct drm_device
*dev
)
1780 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1781 struct drm_gem_object
*obj
;
1782 struct drm_i915_gem_object
*obj_priv
;
1786 /* If there's an inactive buffer available now, grab it
1789 if (!list_empty(&dev_priv
->mm
.inactive_list
)) {
1790 obj_priv
= list_first_entry(&dev_priv
->mm
.inactive_list
,
1791 struct drm_i915_gem_object
,
1793 obj
= obj_priv
->obj
;
1794 BUG_ON(obj_priv
->pin_count
!= 0);
1796 DRM_INFO("%s: evicting %p\n", __func__
, obj
);
1798 BUG_ON(obj_priv
->active
);
1800 /* Wait on the rendering and unbind the buffer. */
1801 ret
= i915_gem_object_unbind(obj
);
1805 /* If we didn't get anything, but the ring is still processing
1806 * things, wait for one of those things to finish and hopefully
1807 * leave us a buffer to evict.
1809 if (!list_empty(&dev_priv
->mm
.request_list
)) {
1810 struct drm_i915_gem_request
*request
;
1812 request
= list_first_entry(&dev_priv
->mm
.request_list
,
1813 struct drm_i915_gem_request
,
1816 ret
= i915_wait_request(dev
, request
->seqno
);
1820 /* if waiting caused an object to become inactive,
1821 * then loop around and wait for it. Otherwise, we
1822 * assume that waiting freed and unbound something,
1823 * so there should now be some space in the GTT
1825 if (!list_empty(&dev_priv
->mm
.inactive_list
))
1830 /* If we didn't have anything on the request list but there
1831 * are buffers awaiting a flush, emit one and try again.
1832 * When we wait on it, those buffers waiting for that flush
1833 * will get moved to inactive.
1835 if (!list_empty(&dev_priv
->mm
.flushing_list
)) {
1836 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
1837 struct drm_i915_gem_object
,
1839 obj
= obj_priv
->obj
;
1844 i915_add_request(dev
, obj
->write_domain
);
1850 DRM_ERROR("inactive empty %d request empty %d "
1851 "flushing empty %d\n",
1852 list_empty(&dev_priv
->mm
.inactive_list
),
1853 list_empty(&dev_priv
->mm
.request_list
),
1854 list_empty(&dev_priv
->mm
.flushing_list
));
1855 /* If we didn't do any of the above, there's nothing to be done
1856 * and we just can't fit it in.
1864 i915_gem_evict_everything(struct drm_device
*dev
)
1869 ret
= i915_gem_evict_something(dev
);
1879 i915_gem_object_get_pages(struct drm_gem_object
*obj
)
1881 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1883 struct address_space
*mapping
;
1884 struct inode
*inode
;
1888 if (obj_priv
->pages_refcount
++ != 0)
1891 /* Get the list of pages out of our struct file. They'll be pinned
1892 * at this point until we release them.
1894 page_count
= obj
->size
/ PAGE_SIZE
;
1895 BUG_ON(obj_priv
->pages
!= NULL
);
1896 obj_priv
->pages
= drm_calloc(page_count
, sizeof(struct page
*),
1898 if (obj_priv
->pages
== NULL
) {
1899 DRM_ERROR("Faled to allocate page list\n");
1900 obj_priv
->pages_refcount
--;
1904 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
1905 mapping
= inode
->i_mapping
;
1906 for (i
= 0; i
< page_count
; i
++) {
1907 page
= read_mapping_page(mapping
, i
, NULL
);
1909 ret
= PTR_ERR(page
);
1910 DRM_ERROR("read_mapping_page failed: %d\n", ret
);
1911 i915_gem_object_put_pages(obj
);
1914 obj_priv
->pages
[i
] = page
;
1919 static void i965_write_fence_reg(struct drm_i915_fence_reg
*reg
)
1921 struct drm_gem_object
*obj
= reg
->obj
;
1922 struct drm_device
*dev
= obj
->dev
;
1923 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1924 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1925 int regnum
= obj_priv
->fence_reg
;
1928 val
= (uint64_t)((obj_priv
->gtt_offset
+ obj
->size
- 4096) &
1930 val
|= obj_priv
->gtt_offset
& 0xfffff000;
1931 val
|= ((obj_priv
->stride
/ 128) - 1) << I965_FENCE_PITCH_SHIFT
;
1932 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
1933 val
|= 1 << I965_FENCE_TILING_Y_SHIFT
;
1934 val
|= I965_FENCE_REG_VALID
;
1936 I915_WRITE64(FENCE_REG_965_0
+ (regnum
* 8), val
);
1939 static void i915_write_fence_reg(struct drm_i915_fence_reg
*reg
)
1941 struct drm_gem_object
*obj
= reg
->obj
;
1942 struct drm_device
*dev
= obj
->dev
;
1943 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1944 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1945 int regnum
= obj_priv
->fence_reg
;
1947 uint32_t fence_reg
, val
;
1950 if ((obj_priv
->gtt_offset
& ~I915_FENCE_START_MASK
) ||
1951 (obj_priv
->gtt_offset
& (obj
->size
- 1))) {
1952 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
1953 __func__
, obj_priv
->gtt_offset
, obj
->size
);
1957 if (obj_priv
->tiling_mode
== I915_TILING_Y
&&
1958 HAS_128_BYTE_Y_TILING(dev
))
1963 /* Note: pitch better be a power of two tile widths */
1964 pitch_val
= obj_priv
->stride
/ tile_width
;
1965 pitch_val
= ffs(pitch_val
) - 1;
1967 val
= obj_priv
->gtt_offset
;
1968 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
1969 val
|= 1 << I830_FENCE_TILING_Y_SHIFT
;
1970 val
|= I915_FENCE_SIZE_BITS(obj
->size
);
1971 val
|= pitch_val
<< I830_FENCE_PITCH_SHIFT
;
1972 val
|= I830_FENCE_REG_VALID
;
1975 fence_reg
= FENCE_REG_830_0
+ (regnum
* 4);
1977 fence_reg
= FENCE_REG_945_8
+ ((regnum
- 8) * 4);
1978 I915_WRITE(fence_reg
, val
);
1981 static void i830_write_fence_reg(struct drm_i915_fence_reg
*reg
)
1983 struct drm_gem_object
*obj
= reg
->obj
;
1984 struct drm_device
*dev
= obj
->dev
;
1985 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1986 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
1987 int regnum
= obj_priv
->fence_reg
;
1991 if ((obj_priv
->gtt_offset
& ~I915_FENCE_START_MASK
) ||
1992 (obj_priv
->gtt_offset
& (obj
->size
- 1))) {
1993 WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
1994 __func__
, obj_priv
->gtt_offset
);
1998 pitch_val
= (obj_priv
->stride
/ 128) - 1;
2000 val
= obj_priv
->gtt_offset
;
2001 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
2002 val
|= 1 << I830_FENCE_TILING_Y_SHIFT
;
2003 val
|= I830_FENCE_SIZE_BITS(obj
->size
);
2004 val
|= pitch_val
<< I830_FENCE_PITCH_SHIFT
;
2005 val
|= I830_FENCE_REG_VALID
;
2007 I915_WRITE(FENCE_REG_830_0
+ (regnum
* 4), val
);
2012 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2013 * @obj: object to map through a fence reg
2014 * @write: object is about to be written
2016 * When mapping objects through the GTT, userspace wants to be able to write
2017 * to them without having to worry about swizzling if the object is tiled.
2019 * This function walks the fence regs looking for a free one for @obj,
2020 * stealing one if it can't find any.
2022 * It then sets up the reg based on the object's properties: address, pitch
2023 * and tiling format.
2026 i915_gem_object_get_fence_reg(struct drm_gem_object
*obj
, bool write
)
2028 struct drm_device
*dev
= obj
->dev
;
2029 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2030 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2031 struct drm_i915_fence_reg
*reg
= NULL
;
2032 struct drm_i915_gem_object
*old_obj_priv
= NULL
;
2035 switch (obj_priv
->tiling_mode
) {
2036 case I915_TILING_NONE
:
2037 WARN(1, "allocating a fence for non-tiled object?\n");
2040 if (!obj_priv
->stride
)
2042 WARN((obj_priv
->stride
& (512 - 1)),
2043 "object 0x%08x is X tiled but has non-512B pitch\n",
2044 obj_priv
->gtt_offset
);
2047 if (!obj_priv
->stride
)
2049 WARN((obj_priv
->stride
& (128 - 1)),
2050 "object 0x%08x is Y tiled but has non-128B pitch\n",
2051 obj_priv
->gtt_offset
);
2055 /* First try to find a free reg */
2058 for (i
= dev_priv
->fence_reg_start
; i
< dev_priv
->num_fence_regs
; i
++) {
2059 reg
= &dev_priv
->fence_regs
[i
];
2063 old_obj_priv
= reg
->obj
->driver_private
;
2064 if (!old_obj_priv
->pin_count
)
2068 /* None available, try to steal one or wait for a user to finish */
2069 if (i
== dev_priv
->num_fence_regs
) {
2070 uint32_t seqno
= dev_priv
->mm
.next_gem_seqno
;
2076 for (i
= dev_priv
->fence_reg_start
;
2077 i
< dev_priv
->num_fence_regs
; i
++) {
2078 uint32_t this_seqno
;
2080 reg
= &dev_priv
->fence_regs
[i
];
2081 old_obj_priv
= reg
->obj
->driver_private
;
2083 if (old_obj_priv
->pin_count
)
2086 /* i915 uses fences for GPU access to tiled buffers */
2087 if (IS_I965G(dev
) || !old_obj_priv
->active
)
2090 /* find the seqno of the first available fence */
2091 this_seqno
= old_obj_priv
->last_rendering_seqno
;
2092 if (this_seqno
!= 0 &&
2093 reg
->obj
->write_domain
== 0 &&
2094 i915_seqno_passed(seqno
, this_seqno
))
2099 * Now things get ugly... we have to wait for one of the
2100 * objects to finish before trying again.
2102 if (i
== dev_priv
->num_fence_regs
) {
2103 if (seqno
== dev_priv
->mm
.next_gem_seqno
) {
2105 I915_GEM_GPU_DOMAINS
,
2106 I915_GEM_GPU_DOMAINS
);
2107 seqno
= i915_add_request(dev
,
2108 I915_GEM_GPU_DOMAINS
);
2113 ret
= i915_wait_request(dev
, seqno
);
2119 BUG_ON(old_obj_priv
->active
||
2120 (reg
->obj
->write_domain
& I915_GEM_GPU_DOMAINS
));
2123 * Zap this virtual mapping so we can set up a fence again
2124 * for this object next time we need it.
2126 offset
= ((loff_t
) reg
->obj
->map_list
.hash
.key
) << PAGE_SHIFT
;
2127 if (dev
->dev_mapping
)
2128 unmap_mapping_range(dev
->dev_mapping
, offset
,
2130 old_obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
2133 obj_priv
->fence_reg
= i
;
2137 i965_write_fence_reg(reg
);
2138 else if (IS_I9XX(dev
))
2139 i915_write_fence_reg(reg
);
2141 i830_write_fence_reg(reg
);
2147 * i915_gem_clear_fence_reg - clear out fence register info
2148 * @obj: object to clear
2150 * Zeroes out the fence register itself and clears out the associated
2151 * data structures in dev_priv and obj_priv.
2154 i915_gem_clear_fence_reg(struct drm_gem_object
*obj
)
2156 struct drm_device
*dev
= obj
->dev
;
2157 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2158 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2161 I915_WRITE64(FENCE_REG_965_0
+ (obj_priv
->fence_reg
* 8), 0);
2165 if (obj_priv
->fence_reg
< 8)
2166 fence_reg
= FENCE_REG_830_0
+ obj_priv
->fence_reg
* 4;
2168 fence_reg
= FENCE_REG_945_8
+ (obj_priv
->fence_reg
-
2171 I915_WRITE(fence_reg
, 0);
2174 dev_priv
->fence_regs
[obj_priv
->fence_reg
].obj
= NULL
;
2175 obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
2179 * Finds free space in the GTT aperture and binds the object there.
2182 i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
, unsigned alignment
)
2184 struct drm_device
*dev
= obj
->dev
;
2185 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2186 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2187 struct drm_mm_node
*free_space
;
2188 int page_count
, ret
;
2190 if (dev_priv
->mm
.suspended
)
2193 alignment
= i915_gem_get_gtt_alignment(obj
);
2194 if (alignment
& (PAGE_SIZE
- 1)) {
2195 DRM_ERROR("Invalid object alignment requested %u\n", alignment
);
2200 free_space
= drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
2201 obj
->size
, alignment
, 0);
2202 if (free_space
!= NULL
) {
2203 obj_priv
->gtt_space
= drm_mm_get_block(free_space
, obj
->size
,
2205 if (obj_priv
->gtt_space
!= NULL
) {
2206 obj_priv
->gtt_space
->private = obj
;
2207 obj_priv
->gtt_offset
= obj_priv
->gtt_space
->start
;
2210 if (obj_priv
->gtt_space
== NULL
) {
2211 /* If the gtt is empty and we're still having trouble
2212 * fitting our object in, we're out of memory.
2215 DRM_INFO("%s: GTT full, evicting something\n", __func__
);
2217 if (list_empty(&dev_priv
->mm
.inactive_list
) &&
2218 list_empty(&dev_priv
->mm
.flushing_list
) &&
2219 list_empty(&dev_priv
->mm
.active_list
)) {
2220 DRM_ERROR("GTT full, but LRU list empty\n");
2224 ret
= i915_gem_evict_something(dev
);
2226 if (ret
!= -ERESTARTSYS
)
2227 DRM_ERROR("Failed to evict a buffer %d\n", ret
);
2234 DRM_INFO("Binding object of size %d at 0x%08x\n",
2235 obj
->size
, obj_priv
->gtt_offset
);
2237 ret
= i915_gem_object_get_pages(obj
);
2239 drm_mm_put_block(obj_priv
->gtt_space
);
2240 obj_priv
->gtt_space
= NULL
;
2244 page_count
= obj
->size
/ PAGE_SIZE
;
2245 /* Create an AGP memory structure pointing at our pages, and bind it
2248 obj_priv
->agp_mem
= drm_agp_bind_pages(dev
,
2251 obj_priv
->gtt_offset
,
2252 obj_priv
->agp_type
);
2253 if (obj_priv
->agp_mem
== NULL
) {
2254 i915_gem_object_put_pages(obj
);
2255 drm_mm_put_block(obj_priv
->gtt_space
);
2256 obj_priv
->gtt_space
= NULL
;
2259 atomic_inc(&dev
->gtt_count
);
2260 atomic_add(obj
->size
, &dev
->gtt_memory
);
2262 /* Assert that the object is not currently in any GPU domain. As it
2263 * wasn't in the GTT, there shouldn't be any way it could have been in
2266 BUG_ON(obj
->read_domains
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
2267 BUG_ON(obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
2273 i915_gem_clflush_object(struct drm_gem_object
*obj
)
2275 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2277 /* If we don't have a page list set up, then we're not pinned
2278 * to GPU, and we can ignore the cache flush because it'll happen
2279 * again at bind time.
2281 if (obj_priv
->pages
== NULL
)
2284 drm_clflush_pages(obj_priv
->pages
, obj
->size
/ PAGE_SIZE
);
2287 /** Flushes any GPU write domain for the object if it's dirty. */
2289 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
)
2291 struct drm_device
*dev
= obj
->dev
;
2294 if ((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) == 0)
2297 /* Queue the GPU write cache flushing we need. */
2298 i915_gem_flush(dev
, 0, obj
->write_domain
);
2299 seqno
= i915_add_request(dev
, obj
->write_domain
);
2300 obj
->write_domain
= 0;
2301 i915_gem_object_move_to_active(obj
, seqno
);
2304 /** Flushes the GTT write domain for the object if it's dirty. */
2306 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
)
2308 if (obj
->write_domain
!= I915_GEM_DOMAIN_GTT
)
2311 /* No actual flushing is required for the GTT write domain. Writes
2312 * to it immediately go to main memory as far as we know, so there's
2313 * no chipset flush. It also doesn't land in render cache.
2315 obj
->write_domain
= 0;
2318 /** Flushes the CPU write domain for the object if it's dirty. */
2320 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
)
2322 struct drm_device
*dev
= obj
->dev
;
2324 if (obj
->write_domain
!= I915_GEM_DOMAIN_CPU
)
2327 i915_gem_clflush_object(obj
);
2328 drm_agp_chipset_flush(dev
);
2329 obj
->write_domain
= 0;
2333 * Moves a single object to the GTT read, and possibly write domain.
2335 * This function returns when the move is complete, including waiting on
2339 i915_gem_object_set_to_gtt_domain(struct drm_gem_object
*obj
, int write
)
2341 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2344 /* Not valid to be called on unbound objects. */
2345 if (obj_priv
->gtt_space
== NULL
)
2348 i915_gem_object_flush_gpu_write_domain(obj
);
2349 /* Wait on any GPU rendering and flushing to occur. */
2350 ret
= i915_gem_object_wait_rendering(obj
);
2354 /* If we're writing through the GTT domain, then CPU and GPU caches
2355 * will need to be invalidated at next use.
2358 obj
->read_domains
&= I915_GEM_DOMAIN_GTT
;
2360 i915_gem_object_flush_cpu_write_domain(obj
);
2362 /* It should now be out of any other write domains, and we can update
2363 * the domain values for our changes.
2365 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_GTT
) != 0);
2366 obj
->read_domains
|= I915_GEM_DOMAIN_GTT
;
2368 obj
->write_domain
= I915_GEM_DOMAIN_GTT
;
2369 obj_priv
->dirty
= 1;
2376 * Moves a single object to the CPU read, and possibly write domain.
2378 * This function returns when the move is complete, including waiting on
2382 i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
, int write
)
2386 i915_gem_object_flush_gpu_write_domain(obj
);
2387 /* Wait on any GPU rendering and flushing to occur. */
2388 ret
= i915_gem_object_wait_rendering(obj
);
2392 i915_gem_object_flush_gtt_write_domain(obj
);
2394 /* If we have a partially-valid cache of the object in the CPU,
2395 * finish invalidating it and free the per-page flags.
2397 i915_gem_object_set_to_full_cpu_read_domain(obj
);
2399 /* Flush the CPU cache if it's still invalid. */
2400 if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0) {
2401 i915_gem_clflush_object(obj
);
2403 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
2406 /* It should now be out of any other write domains, and we can update
2407 * the domain values for our changes.
2409 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
2411 /* If we're writing through the CPU, then the GPU read domains will
2412 * need to be invalidated at next use.
2415 obj
->read_domains
&= I915_GEM_DOMAIN_CPU
;
2416 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
2423 * Set the next domain for the specified object. This
2424 * may not actually perform the necessary flushing/invaliding though,
2425 * as that may want to be batched with other set_domain operations
2427 * This is (we hope) the only really tricky part of gem. The goal
2428 * is fairly simple -- track which caches hold bits of the object
2429 * and make sure they remain coherent. A few concrete examples may
2430 * help to explain how it works. For shorthand, we use the notation
2431 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2432 * a pair of read and write domain masks.
2434 * Case 1: the batch buffer
2440 * 5. Unmapped from GTT
2443 * Let's take these a step at a time
2446 * Pages allocated from the kernel may still have
2447 * cache contents, so we set them to (CPU, CPU) always.
2448 * 2. Written by CPU (using pwrite)
2449 * The pwrite function calls set_domain (CPU, CPU) and
2450 * this function does nothing (as nothing changes)
2452 * This function asserts that the object is not
2453 * currently in any GPU-based read or write domains
2455 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2456 * As write_domain is zero, this function adds in the
2457 * current read domains (CPU+COMMAND, 0).
2458 * flush_domains is set to CPU.
2459 * invalidate_domains is set to COMMAND
2460 * clflush is run to get data out of the CPU caches
2461 * then i915_dev_set_domain calls i915_gem_flush to
2462 * emit an MI_FLUSH and drm_agp_chipset_flush
2463 * 5. Unmapped from GTT
2464 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2465 * flush_domains and invalidate_domains end up both zero
2466 * so no flushing/invalidating happens
2470 * Case 2: The shared render buffer
2474 * 3. Read/written by GPU
2475 * 4. set_domain to (CPU,CPU)
2476 * 5. Read/written by CPU
2477 * 6. Read/written by GPU
2480 * Same as last example, (CPU, CPU)
2482 * Nothing changes (assertions find that it is not in the GPU)
2483 * 3. Read/written by GPU
2484 * execbuffer calls set_domain (RENDER, RENDER)
2485 * flush_domains gets CPU
2486 * invalidate_domains gets GPU
2488 * MI_FLUSH and drm_agp_chipset_flush
2489 * 4. set_domain (CPU, CPU)
2490 * flush_domains gets GPU
2491 * invalidate_domains gets CPU
2492 * wait_rendering (obj) to make sure all drawing is complete.
2493 * This will include an MI_FLUSH to get the data from GPU
2495 * clflush (obj) to invalidate the CPU cache
2496 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2497 * 5. Read/written by CPU
2498 * cache lines are loaded and dirtied
2499 * 6. Read written by GPU
2500 * Same as last GPU access
2502 * Case 3: The constant buffer
2507 * 4. Updated (written) by CPU again
2516 * flush_domains = CPU
2517 * invalidate_domains = RENDER
2520 * drm_agp_chipset_flush
2521 * 4. Updated (written) by CPU again
2523 * flush_domains = 0 (no previous write domain)
2524 * invalidate_domains = 0 (no new read domains)
2527 * flush_domains = CPU
2528 * invalidate_domains = RENDER
2531 * drm_agp_chipset_flush
2534 i915_gem_object_set_to_gpu_domain(struct drm_gem_object
*obj
)
2536 struct drm_device
*dev
= obj
->dev
;
2537 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2538 uint32_t invalidate_domains
= 0;
2539 uint32_t flush_domains
= 0;
2541 BUG_ON(obj
->pending_read_domains
& I915_GEM_DOMAIN_CPU
);
2542 BUG_ON(obj
->pending_write_domain
== I915_GEM_DOMAIN_CPU
);
2545 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2547 obj
->read_domains
, obj
->pending_read_domains
,
2548 obj
->write_domain
, obj
->pending_write_domain
);
2551 * If the object isn't moving to a new write domain,
2552 * let the object stay in multiple read domains
2554 if (obj
->pending_write_domain
== 0)
2555 obj
->pending_read_domains
|= obj
->read_domains
;
2557 obj_priv
->dirty
= 1;
2560 * Flush the current write domain if
2561 * the new read domains don't match. Invalidate
2562 * any read domains which differ from the old
2565 if (obj
->write_domain
&&
2566 obj
->write_domain
!= obj
->pending_read_domains
) {
2567 flush_domains
|= obj
->write_domain
;
2568 invalidate_domains
|=
2569 obj
->pending_read_domains
& ~obj
->write_domain
;
2572 * Invalidate any read caches which may have
2573 * stale data. That is, any new read domains.
2575 invalidate_domains
|= obj
->pending_read_domains
& ~obj
->read_domains
;
2576 if ((flush_domains
| invalidate_domains
) & I915_GEM_DOMAIN_CPU
) {
2578 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2579 __func__
, flush_domains
, invalidate_domains
);
2581 i915_gem_clflush_object(obj
);
2584 /* The actual obj->write_domain will be updated with
2585 * pending_write_domain after we emit the accumulated flush for all
2586 * of our domain changes in execbuffers (which clears objects'
2587 * write_domains). So if we have a current write domain that we
2588 * aren't changing, set pending_write_domain to that.
2590 if (flush_domains
== 0 && obj
->pending_write_domain
== 0)
2591 obj
->pending_write_domain
= obj
->write_domain
;
2592 obj
->read_domains
= obj
->pending_read_domains
;
2594 dev
->invalidate_domains
|= invalidate_domains
;
2595 dev
->flush_domains
|= flush_domains
;
2597 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2599 obj
->read_domains
, obj
->write_domain
,
2600 dev
->invalidate_domains
, dev
->flush_domains
);
2605 * Moves the object from a partially CPU read to a full one.
2607 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2608 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2611 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
)
2613 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2615 if (!obj_priv
->page_cpu_valid
)
2618 /* If we're partially in the CPU read domain, finish moving it in.
2620 if (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) {
2623 for (i
= 0; i
<= (obj
->size
- 1) / PAGE_SIZE
; i
++) {
2624 if (obj_priv
->page_cpu_valid
[i
])
2626 drm_clflush_pages(obj_priv
->pages
+ i
, 1);
2630 /* Free the page_cpu_valid mappings which are now stale, whether
2631 * or not we've got I915_GEM_DOMAIN_CPU.
2633 drm_free(obj_priv
->page_cpu_valid
, obj
->size
/ PAGE_SIZE
,
2635 obj_priv
->page_cpu_valid
= NULL
;
2639 * Set the CPU read domain on a range of the object.
2641 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2642 * not entirely valid. The page_cpu_valid member of the object flags which
2643 * pages have been flushed, and will be respected by
2644 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2645 * of the whole object.
2647 * This function returns when the move is complete, including waiting on
2651 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
2652 uint64_t offset
, uint64_t size
)
2654 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2657 if (offset
== 0 && size
== obj
->size
)
2658 return i915_gem_object_set_to_cpu_domain(obj
, 0);
2660 i915_gem_object_flush_gpu_write_domain(obj
);
2661 /* Wait on any GPU rendering and flushing to occur. */
2662 ret
= i915_gem_object_wait_rendering(obj
);
2665 i915_gem_object_flush_gtt_write_domain(obj
);
2667 /* If we're already fully in the CPU read domain, we're done. */
2668 if (obj_priv
->page_cpu_valid
== NULL
&&
2669 (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) != 0)
2672 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2673 * newly adding I915_GEM_DOMAIN_CPU
2675 if (obj_priv
->page_cpu_valid
== NULL
) {
2676 obj_priv
->page_cpu_valid
= drm_calloc(1, obj
->size
/ PAGE_SIZE
,
2678 if (obj_priv
->page_cpu_valid
== NULL
)
2680 } else if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0)
2681 memset(obj_priv
->page_cpu_valid
, 0, obj
->size
/ PAGE_SIZE
);
2683 /* Flush the cache on any pages that are still invalid from the CPU's
2686 for (i
= offset
/ PAGE_SIZE
; i
<= (offset
+ size
- 1) / PAGE_SIZE
;
2688 if (obj_priv
->page_cpu_valid
[i
])
2691 drm_clflush_pages(obj_priv
->pages
+ i
, 1);
2693 obj_priv
->page_cpu_valid
[i
] = 1;
2696 /* It should now be out of any other write domains, and we can update
2697 * the domain values for our changes.
2699 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
2701 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
2707 * Pin an object to the GTT and evaluate the relocations landing in it.
2710 i915_gem_object_pin_and_relocate(struct drm_gem_object
*obj
,
2711 struct drm_file
*file_priv
,
2712 struct drm_i915_gem_exec_object
*entry
,
2713 struct drm_i915_gem_relocation_entry
*relocs
)
2715 struct drm_device
*dev
= obj
->dev
;
2716 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2717 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
2719 void __iomem
*reloc_page
;
2721 /* Choose the GTT offset for our buffer and put it there. */
2722 ret
= i915_gem_object_pin(obj
, (uint32_t) entry
->alignment
);
2726 entry
->offset
= obj_priv
->gtt_offset
;
2728 /* Apply the relocations, using the GTT aperture to avoid cache
2729 * flushing requirements.
2731 for (i
= 0; i
< entry
->relocation_count
; i
++) {
2732 struct drm_i915_gem_relocation_entry
*reloc
= &relocs
[i
];
2733 struct drm_gem_object
*target_obj
;
2734 struct drm_i915_gem_object
*target_obj_priv
;
2735 uint32_t reloc_val
, reloc_offset
;
2736 uint32_t __iomem
*reloc_entry
;
2738 target_obj
= drm_gem_object_lookup(obj
->dev
, file_priv
,
2739 reloc
->target_handle
);
2740 if (target_obj
== NULL
) {
2741 i915_gem_object_unpin(obj
);
2744 target_obj_priv
= target_obj
->driver_private
;
2746 /* The target buffer should have appeared before us in the
2747 * exec_object list, so it should have a GTT space bound by now.
2749 if (target_obj_priv
->gtt_space
== NULL
) {
2750 DRM_ERROR("No GTT space found for object %d\n",
2751 reloc
->target_handle
);
2752 drm_gem_object_unreference(target_obj
);
2753 i915_gem_object_unpin(obj
);
2757 if (reloc
->offset
> obj
->size
- 4) {
2758 DRM_ERROR("Relocation beyond object bounds: "
2759 "obj %p target %d offset %d size %d.\n",
2760 obj
, reloc
->target_handle
,
2761 (int) reloc
->offset
, (int) obj
->size
);
2762 drm_gem_object_unreference(target_obj
);
2763 i915_gem_object_unpin(obj
);
2766 if (reloc
->offset
& 3) {
2767 DRM_ERROR("Relocation not 4-byte aligned: "
2768 "obj %p target %d offset %d.\n",
2769 obj
, reloc
->target_handle
,
2770 (int) reloc
->offset
);
2771 drm_gem_object_unreference(target_obj
);
2772 i915_gem_object_unpin(obj
);
2776 if (reloc
->write_domain
& I915_GEM_DOMAIN_CPU
||
2777 reloc
->read_domains
& I915_GEM_DOMAIN_CPU
) {
2778 DRM_ERROR("reloc with read/write CPU domains: "
2779 "obj %p target %d offset %d "
2780 "read %08x write %08x",
2781 obj
, reloc
->target_handle
,
2782 (int) reloc
->offset
,
2783 reloc
->read_domains
,
2784 reloc
->write_domain
);
2785 drm_gem_object_unreference(target_obj
);
2786 i915_gem_object_unpin(obj
);
2790 if (reloc
->write_domain
&& target_obj
->pending_write_domain
&&
2791 reloc
->write_domain
!= target_obj
->pending_write_domain
) {
2792 DRM_ERROR("Write domain conflict: "
2793 "obj %p target %d offset %d "
2794 "new %08x old %08x\n",
2795 obj
, reloc
->target_handle
,
2796 (int) reloc
->offset
,
2797 reloc
->write_domain
,
2798 target_obj
->pending_write_domain
);
2799 drm_gem_object_unreference(target_obj
);
2800 i915_gem_object_unpin(obj
);
2805 DRM_INFO("%s: obj %p offset %08x target %d "
2806 "read %08x write %08x gtt %08x "
2807 "presumed %08x delta %08x\n",
2810 (int) reloc
->offset
,
2811 (int) reloc
->target_handle
,
2812 (int) reloc
->read_domains
,
2813 (int) reloc
->write_domain
,
2814 (int) target_obj_priv
->gtt_offset
,
2815 (int) reloc
->presumed_offset
,
2819 target_obj
->pending_read_domains
|= reloc
->read_domains
;
2820 target_obj
->pending_write_domain
|= reloc
->write_domain
;
2822 /* If the relocation already has the right value in it, no
2823 * more work needs to be done.
2825 if (target_obj_priv
->gtt_offset
== reloc
->presumed_offset
) {
2826 drm_gem_object_unreference(target_obj
);
2830 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
2832 drm_gem_object_unreference(target_obj
);
2833 i915_gem_object_unpin(obj
);
2837 /* Map the page containing the relocation we're going to
2840 reloc_offset
= obj_priv
->gtt_offset
+ reloc
->offset
;
2841 reloc_page
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
2844 reloc_entry
= (uint32_t __iomem
*)(reloc_page
+
2845 (reloc_offset
& (PAGE_SIZE
- 1)));
2846 reloc_val
= target_obj_priv
->gtt_offset
+ reloc
->delta
;
2849 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
2850 obj
, (unsigned int) reloc
->offset
,
2851 readl(reloc_entry
), reloc_val
);
2853 writel(reloc_val
, reloc_entry
);
2854 io_mapping_unmap_atomic(reloc_page
);
2856 /* The updated presumed offset for this entry will be
2857 * copied back out to the user.
2859 reloc
->presumed_offset
= target_obj_priv
->gtt_offset
;
2861 drm_gem_object_unreference(target_obj
);
2866 i915_gem_dump_object(obj
, 128, __func__
, ~0);
2871 /** Dispatch a batchbuffer to the ring
2874 i915_dispatch_gem_execbuffer(struct drm_device
*dev
,
2875 struct drm_i915_gem_execbuffer
*exec
,
2876 struct drm_clip_rect
*cliprects
,
2877 uint64_t exec_offset
)
2879 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2880 int nbox
= exec
->num_cliprects
;
2882 uint32_t exec_start
, exec_len
;
2885 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
2886 exec_len
= (uint32_t) exec
->batch_len
;
2888 if ((exec_start
| exec_len
) & 0x7) {
2889 DRM_ERROR("alignment\n");
2896 count
= nbox
? nbox
: 1;
2898 for (i
= 0; i
< count
; i
++) {
2900 int ret
= i915_emit_box(dev
, cliprects
, i
,
2901 exec
->DR1
, exec
->DR4
);
2906 if (IS_I830(dev
) || IS_845G(dev
)) {
2908 OUT_RING(MI_BATCH_BUFFER
);
2909 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
2910 OUT_RING(exec_start
+ exec_len
- 4);
2915 if (IS_I965G(dev
)) {
2916 OUT_RING(MI_BATCH_BUFFER_START
|
2918 MI_BATCH_NON_SECURE_I965
);
2919 OUT_RING(exec_start
);
2921 OUT_RING(MI_BATCH_BUFFER_START
|
2923 OUT_RING(exec_start
| MI_BATCH_NON_SECURE
);
2929 /* XXX breadcrumb */
2933 /* Throttle our rendering by waiting until the ring has completed our requests
2934 * emitted over 20 msec ago.
2936 * This should get us reasonable parallelism between CPU and GPU but also
2937 * relatively low latency when blocking on a particular request to finish.
2940 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file_priv
)
2942 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
2946 mutex_lock(&dev
->struct_mutex
);
2947 seqno
= i915_file_priv
->mm
.last_gem_throttle_seqno
;
2948 i915_file_priv
->mm
.last_gem_throttle_seqno
=
2949 i915_file_priv
->mm
.last_gem_seqno
;
2951 ret
= i915_wait_request(dev
, seqno
);
2952 mutex_unlock(&dev
->struct_mutex
);
2957 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object
*exec_list
,
2958 uint32_t buffer_count
,
2959 struct drm_i915_gem_relocation_entry
**relocs
)
2961 uint32_t reloc_count
= 0, reloc_index
= 0, i
;
2965 for (i
= 0; i
< buffer_count
; i
++) {
2966 if (reloc_count
+ exec_list
[i
].relocation_count
< reloc_count
)
2968 reloc_count
+= exec_list
[i
].relocation_count
;
2971 *relocs
= drm_calloc(reloc_count
, sizeof(**relocs
), DRM_MEM_DRIVER
);
2972 if (*relocs
== NULL
)
2975 for (i
= 0; i
< buffer_count
; i
++) {
2976 struct drm_i915_gem_relocation_entry __user
*user_relocs
;
2978 user_relocs
= (void __user
*)(uintptr_t)exec_list
[i
].relocs_ptr
;
2980 ret
= copy_from_user(&(*relocs
)[reloc_index
],
2982 exec_list
[i
].relocation_count
*
2985 drm_free(*relocs
, reloc_count
* sizeof(**relocs
),
2991 reloc_index
+= exec_list
[i
].relocation_count
;
2998 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object
*exec_list
,
2999 uint32_t buffer_count
,
3000 struct drm_i915_gem_relocation_entry
*relocs
)
3002 uint32_t reloc_count
= 0, i
;
3005 for (i
= 0; i
< buffer_count
; i
++) {
3006 struct drm_i915_gem_relocation_entry __user
*user_relocs
;
3008 user_relocs
= (void __user
*)(uintptr_t)exec_list
[i
].relocs_ptr
;
3011 ret
= copy_to_user(user_relocs
,
3012 &relocs
[reloc_count
],
3013 exec_list
[i
].relocation_count
*
3017 reloc_count
+= exec_list
[i
].relocation_count
;
3020 drm_free(relocs
, reloc_count
* sizeof(*relocs
), DRM_MEM_DRIVER
);
3026 i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
3027 struct drm_file
*file_priv
)
3029 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3030 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
3031 struct drm_i915_gem_execbuffer
*args
= data
;
3032 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
3033 struct drm_gem_object
**object_list
= NULL
;
3034 struct drm_gem_object
*batch_obj
;
3035 struct drm_i915_gem_object
*obj_priv
;
3036 struct drm_clip_rect
*cliprects
= NULL
;
3037 struct drm_i915_gem_relocation_entry
*relocs
;
3038 int ret
, ret2
, i
, pinned
= 0;
3039 uint64_t exec_offset
;
3040 uint32_t seqno
, flush_domains
, reloc_index
;
3044 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3045 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
3048 if (args
->buffer_count
< 1) {
3049 DRM_ERROR("execbuf with %d buffers\n", args
->buffer_count
);
3052 /* Copy in the exec list from userland */
3053 exec_list
= drm_calloc(sizeof(*exec_list
), args
->buffer_count
,
3055 object_list
= drm_calloc(sizeof(*object_list
), args
->buffer_count
,
3057 if (exec_list
== NULL
|| object_list
== NULL
) {
3058 DRM_ERROR("Failed to allocate exec or object list "
3060 args
->buffer_count
);
3064 ret
= copy_from_user(exec_list
,
3065 (struct drm_i915_relocation_entry __user
*)
3066 (uintptr_t) args
->buffers_ptr
,
3067 sizeof(*exec_list
) * args
->buffer_count
);
3069 DRM_ERROR("copy %d exec entries failed %d\n",
3070 args
->buffer_count
, ret
);
3074 if (args
->num_cliprects
!= 0) {
3075 cliprects
= drm_calloc(args
->num_cliprects
, sizeof(*cliprects
),
3077 if (cliprects
== NULL
)
3080 ret
= copy_from_user(cliprects
,
3081 (struct drm_clip_rect __user
*)
3082 (uintptr_t) args
->cliprects_ptr
,
3083 sizeof(*cliprects
) * args
->num_cliprects
);
3085 DRM_ERROR("copy %d cliprects failed: %d\n",
3086 args
->num_cliprects
, ret
);
3091 ret
= i915_gem_get_relocs_from_user(exec_list
, args
->buffer_count
,
3096 mutex_lock(&dev
->struct_mutex
);
3098 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3100 if (dev_priv
->mm
.wedged
) {
3101 DRM_ERROR("Execbuf while wedged\n");
3102 mutex_unlock(&dev
->struct_mutex
);
3107 if (dev_priv
->mm
.suspended
) {
3108 DRM_ERROR("Execbuf while VT-switched.\n");
3109 mutex_unlock(&dev
->struct_mutex
);
3114 /* Look up object handles */
3115 for (i
= 0; i
< args
->buffer_count
; i
++) {
3116 object_list
[i
] = drm_gem_object_lookup(dev
, file_priv
,
3117 exec_list
[i
].handle
);
3118 if (object_list
[i
] == NULL
) {
3119 DRM_ERROR("Invalid object handle %d at index %d\n",
3120 exec_list
[i
].handle
, i
);
3125 obj_priv
= object_list
[i
]->driver_private
;
3126 if (obj_priv
->in_execbuffer
) {
3127 DRM_ERROR("Object %p appears more than once in object list\n",
3132 obj_priv
->in_execbuffer
= true;
3135 /* Pin and relocate */
3136 for (pin_tries
= 0; ; pin_tries
++) {
3140 for (i
= 0; i
< args
->buffer_count
; i
++) {
3141 object_list
[i
]->pending_read_domains
= 0;
3142 object_list
[i
]->pending_write_domain
= 0;
3143 ret
= i915_gem_object_pin_and_relocate(object_list
[i
],
3146 &relocs
[reloc_index
]);
3150 reloc_index
+= exec_list
[i
].relocation_count
;
3156 /* error other than GTT full, or we've already tried again */
3157 if (ret
!= -ENOMEM
|| pin_tries
>= 1) {
3158 if (ret
!= -ERESTARTSYS
)
3159 DRM_ERROR("Failed to pin buffers %d\n", ret
);
3163 /* unpin all of our buffers */
3164 for (i
= 0; i
< pinned
; i
++)
3165 i915_gem_object_unpin(object_list
[i
]);
3168 /* evict everyone we can from the aperture */
3169 ret
= i915_gem_evict_everything(dev
);
3174 /* Set the pending read domains for the batch buffer to COMMAND */
3175 batch_obj
= object_list
[args
->buffer_count
-1];
3176 batch_obj
->pending_read_domains
= I915_GEM_DOMAIN_COMMAND
;
3177 batch_obj
->pending_write_domain
= 0;
3179 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3181 /* Zero the global flush/invalidate flags. These
3182 * will be modified as new domains are computed
3185 dev
->invalidate_domains
= 0;
3186 dev
->flush_domains
= 0;
3188 for (i
= 0; i
< args
->buffer_count
; i
++) {
3189 struct drm_gem_object
*obj
= object_list
[i
];
3191 /* Compute new gpu domains and update invalidate/flush */
3192 i915_gem_object_set_to_gpu_domain(obj
);
3195 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3197 if (dev
->invalidate_domains
| dev
->flush_domains
) {
3199 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3201 dev
->invalidate_domains
,
3202 dev
->flush_domains
);
3205 dev
->invalidate_domains
,
3206 dev
->flush_domains
);
3207 if (dev
->flush_domains
)
3208 (void)i915_add_request(dev
, dev
->flush_domains
);
3211 for (i
= 0; i
< args
->buffer_count
; i
++) {
3212 struct drm_gem_object
*obj
= object_list
[i
];
3214 obj
->write_domain
= obj
->pending_write_domain
;
3217 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3220 for (i
= 0; i
< args
->buffer_count
; i
++) {
3221 i915_gem_object_check_coherency(object_list
[i
],
3222 exec_list
[i
].handle
);
3226 exec_offset
= exec_list
[args
->buffer_count
- 1].offset
;
3229 i915_gem_dump_object(object_list
[args
->buffer_count
- 1],
3235 /* Exec the batchbuffer */
3236 ret
= i915_dispatch_gem_execbuffer(dev
, args
, cliprects
, exec_offset
);
3238 DRM_ERROR("dispatch failed %d\n", ret
);
3243 * Ensure that the commands in the batch buffer are
3244 * finished before the interrupt fires
3246 flush_domains
= i915_retire_commands(dev
);
3248 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3251 * Get a seqno representing the execution of the current buffer,
3252 * which we can wait on. We would like to mitigate these interrupts,
3253 * likely by only creating seqnos occasionally (so that we have
3254 * *some* interrupts representing completion of buffers that we can
3255 * wait on when trying to clear up gtt space).
3257 seqno
= i915_add_request(dev
, flush_domains
);
3259 i915_file_priv
->mm
.last_gem_seqno
= seqno
;
3260 for (i
= 0; i
< args
->buffer_count
; i
++) {
3261 struct drm_gem_object
*obj
= object_list
[i
];
3263 i915_gem_object_move_to_active(obj
, seqno
);
3265 DRM_INFO("%s: move to exec list %p\n", __func__
, obj
);
3269 i915_dump_lru(dev
, __func__
);
3272 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3275 for (i
= 0; i
< pinned
; i
++)
3276 i915_gem_object_unpin(object_list
[i
]);
3278 for (i
= 0; i
< args
->buffer_count
; i
++) {
3279 if (object_list
[i
]) {
3280 obj_priv
= object_list
[i
]->driver_private
;
3281 obj_priv
->in_execbuffer
= false;
3283 drm_gem_object_unreference(object_list
[i
]);
3286 mutex_unlock(&dev
->struct_mutex
);
3289 /* Copy the new buffer offsets back to the user's exec list. */
3290 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
3291 (uintptr_t) args
->buffers_ptr
,
3293 sizeof(*exec_list
) * args
->buffer_count
);
3295 DRM_ERROR("failed to copy %d exec entries "
3296 "back to user (%d)\n",
3297 args
->buffer_count
, ret
);
3300 /* Copy the updated relocations out regardless of current error
3301 * state. Failure to update the relocs would mean that the next
3302 * time userland calls execbuf, it would do so with presumed offset
3303 * state that didn't match the actual object state.
3305 ret2
= i915_gem_put_relocs_to_user(exec_list
, args
->buffer_count
,
3308 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2
);
3315 drm_free(object_list
, sizeof(*object_list
) * args
->buffer_count
,
3317 drm_free(exec_list
, sizeof(*exec_list
) * args
->buffer_count
,
3319 drm_free(cliprects
, sizeof(*cliprects
) * args
->num_cliprects
,
3326 i915_gem_object_pin(struct drm_gem_object
*obj
, uint32_t alignment
)
3328 struct drm_device
*dev
= obj
->dev
;
3329 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
3332 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3333 if (obj_priv
->gtt_space
== NULL
) {
3334 ret
= i915_gem_object_bind_to_gtt(obj
, alignment
);
3336 if (ret
!= -EBUSY
&& ret
!= -ERESTARTSYS
)
3337 DRM_ERROR("Failure to bind: %d\n", ret
);
3342 * Pre-965 chips need a fence register set up in order to
3343 * properly handle tiled surfaces.
3345 if (!IS_I965G(dev
) &&
3346 obj_priv
->fence_reg
== I915_FENCE_REG_NONE
&&
3347 obj_priv
->tiling_mode
!= I915_TILING_NONE
) {
3348 ret
= i915_gem_object_get_fence_reg(obj
, true);
3350 if (ret
!= -EBUSY
&& ret
!= -ERESTARTSYS
)
3351 DRM_ERROR("Failure to install fence: %d\n",
3356 obj_priv
->pin_count
++;
3358 /* If the object is not active and not pending a flush,
3359 * remove it from the inactive list
3361 if (obj_priv
->pin_count
== 1) {
3362 atomic_inc(&dev
->pin_count
);
3363 atomic_add(obj
->size
, &dev
->pin_memory
);
3364 if (!obj_priv
->active
&&
3365 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
3366 I915_GEM_DOMAIN_GTT
)) == 0 &&
3367 !list_empty(&obj_priv
->list
))
3368 list_del_init(&obj_priv
->list
);
3370 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3376 i915_gem_object_unpin(struct drm_gem_object
*obj
)
3378 struct drm_device
*dev
= obj
->dev
;
3379 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3380 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
3382 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3383 obj_priv
->pin_count
--;
3384 BUG_ON(obj_priv
->pin_count
< 0);
3385 BUG_ON(obj_priv
->gtt_space
== NULL
);
3387 /* If the object is no longer pinned, and is
3388 * neither active nor being flushed, then stick it on
3391 if (obj_priv
->pin_count
== 0) {
3392 if (!obj_priv
->active
&&
3393 (obj
->write_domain
& ~(I915_GEM_DOMAIN_CPU
|
3394 I915_GEM_DOMAIN_GTT
)) == 0)
3395 list_move_tail(&obj_priv
->list
,
3396 &dev_priv
->mm
.inactive_list
);
3397 atomic_dec(&dev
->pin_count
);
3398 atomic_sub(obj
->size
, &dev
->pin_memory
);
3400 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3404 i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
3405 struct drm_file
*file_priv
)
3407 struct drm_i915_gem_pin
*args
= data
;
3408 struct drm_gem_object
*obj
;
3409 struct drm_i915_gem_object
*obj_priv
;
3412 mutex_lock(&dev
->struct_mutex
);
3414 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
3416 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3418 mutex_unlock(&dev
->struct_mutex
);
3421 obj_priv
= obj
->driver_private
;
3423 if (obj_priv
->pin_filp
!= NULL
&& obj_priv
->pin_filp
!= file_priv
) {
3424 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3426 drm_gem_object_unreference(obj
);
3427 mutex_unlock(&dev
->struct_mutex
);
3431 obj_priv
->user_pin_count
++;
3432 obj_priv
->pin_filp
= file_priv
;
3433 if (obj_priv
->user_pin_count
== 1) {
3434 ret
= i915_gem_object_pin(obj
, args
->alignment
);
3436 drm_gem_object_unreference(obj
);
3437 mutex_unlock(&dev
->struct_mutex
);
3442 /* XXX - flush the CPU caches for pinned objects
3443 * as the X server doesn't manage domains yet
3445 i915_gem_object_flush_cpu_write_domain(obj
);
3446 args
->offset
= obj_priv
->gtt_offset
;
3447 drm_gem_object_unreference(obj
);
3448 mutex_unlock(&dev
->struct_mutex
);
3454 i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
3455 struct drm_file
*file_priv
)
3457 struct drm_i915_gem_pin
*args
= data
;
3458 struct drm_gem_object
*obj
;
3459 struct drm_i915_gem_object
*obj_priv
;
3461 mutex_lock(&dev
->struct_mutex
);
3463 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
3465 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3467 mutex_unlock(&dev
->struct_mutex
);
3471 obj_priv
= obj
->driver_private
;
3472 if (obj_priv
->pin_filp
!= file_priv
) {
3473 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3475 drm_gem_object_unreference(obj
);
3476 mutex_unlock(&dev
->struct_mutex
);
3479 obj_priv
->user_pin_count
--;
3480 if (obj_priv
->user_pin_count
== 0) {
3481 obj_priv
->pin_filp
= NULL
;
3482 i915_gem_object_unpin(obj
);
3485 drm_gem_object_unreference(obj
);
3486 mutex_unlock(&dev
->struct_mutex
);
3491 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
3492 struct drm_file
*file_priv
)
3494 struct drm_i915_gem_busy
*args
= data
;
3495 struct drm_gem_object
*obj
;
3496 struct drm_i915_gem_object
*obj_priv
;
3498 mutex_lock(&dev
->struct_mutex
);
3499 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
3501 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3503 mutex_unlock(&dev
->struct_mutex
);
3507 /* Update the active list for the hardware's current position.
3508 * Otherwise this only updates on a delayed timer or when irqs are
3509 * actually unmasked, and our working set ends up being larger than
3512 i915_gem_retire_requests(dev
);
3514 obj_priv
= obj
->driver_private
;
3515 /* Don't count being on the flushing list against the object being
3516 * done. Otherwise, a buffer left on the flushing list but not getting
3517 * flushed (because nobody's flushing that domain) won't ever return
3518 * unbusy and get reused by libdrm's bo cache. The other expected
3519 * consumer of this interface, OpenGL's occlusion queries, also specs
3520 * that the objects get unbusy "eventually" without any interference.
3522 args
->busy
= obj_priv
->active
&& obj_priv
->last_rendering_seqno
!= 0;
3524 drm_gem_object_unreference(obj
);
3525 mutex_unlock(&dev
->struct_mutex
);
3530 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
3531 struct drm_file
*file_priv
)
3533 return i915_gem_ring_throttle(dev
, file_priv
);
3536 int i915_gem_init_object(struct drm_gem_object
*obj
)
3538 struct drm_i915_gem_object
*obj_priv
;
3540 obj_priv
= drm_calloc(1, sizeof(*obj_priv
), DRM_MEM_DRIVER
);
3541 if (obj_priv
== NULL
)
3545 * We've just allocated pages from the kernel,
3546 * so they've just been written by the CPU with
3547 * zeros. They'll need to be clflushed before we
3548 * use them with the GPU.
3550 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
3551 obj
->read_domains
= I915_GEM_DOMAIN_CPU
;
3553 obj_priv
->agp_type
= AGP_USER_MEMORY
;
3555 obj
->driver_private
= obj_priv
;
3556 obj_priv
->obj
= obj
;
3557 obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
3558 INIT_LIST_HEAD(&obj_priv
->list
);
3563 void i915_gem_free_object(struct drm_gem_object
*obj
)
3565 struct drm_device
*dev
= obj
->dev
;
3566 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
3568 while (obj_priv
->pin_count
> 0)
3569 i915_gem_object_unpin(obj
);
3571 if (obj_priv
->phys_obj
)
3572 i915_gem_detach_phys_object(dev
, obj
);
3574 i915_gem_object_unbind(obj
);
3576 i915_gem_free_mmap_offset(obj
);
3578 drm_free(obj_priv
->page_cpu_valid
, 1, DRM_MEM_DRIVER
);
3579 drm_free(obj
->driver_private
, 1, DRM_MEM_DRIVER
);
3582 /** Unbinds all objects that are on the given buffer list. */
3584 i915_gem_evict_from_list(struct drm_device
*dev
, struct list_head
*head
)
3586 struct drm_gem_object
*obj
;
3587 struct drm_i915_gem_object
*obj_priv
;
3590 while (!list_empty(head
)) {
3591 obj_priv
= list_first_entry(head
,
3592 struct drm_i915_gem_object
,
3594 obj
= obj_priv
->obj
;
3596 if (obj_priv
->pin_count
!= 0) {
3597 DRM_ERROR("Pinned object in unbind list\n");
3598 mutex_unlock(&dev
->struct_mutex
);
3602 ret
= i915_gem_object_unbind(obj
);
3604 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3606 mutex_unlock(&dev
->struct_mutex
);
3616 i915_gem_idle(struct drm_device
*dev
)
3618 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3619 uint32_t seqno
, cur_seqno
, last_seqno
;
3622 mutex_lock(&dev
->struct_mutex
);
3624 if (dev_priv
->mm
.suspended
|| dev_priv
->ring
.ring_obj
== NULL
) {
3625 mutex_unlock(&dev
->struct_mutex
);
3629 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3630 * We need to replace this with a semaphore, or something.
3632 dev_priv
->mm
.suspended
= 1;
3634 /* Cancel the retire work handler, wait for it to finish if running
3636 mutex_unlock(&dev
->struct_mutex
);
3637 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
3638 mutex_lock(&dev
->struct_mutex
);
3640 i915_kernel_lost_context(dev
);
3642 /* Flush the GPU along with all non-CPU write domains
3644 i915_gem_flush(dev
, ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
),
3645 ~(I915_GEM_DOMAIN_CPU
|I915_GEM_DOMAIN_GTT
));
3646 seqno
= i915_add_request(dev
, ~I915_GEM_DOMAIN_CPU
);
3649 mutex_unlock(&dev
->struct_mutex
);
3653 dev_priv
->mm
.waiting_gem_seqno
= seqno
;
3657 cur_seqno
= i915_get_gem_seqno(dev
);
3658 if (i915_seqno_passed(cur_seqno
, seqno
))
3660 if (last_seqno
== cur_seqno
) {
3661 if (stuck
++ > 100) {
3662 DRM_ERROR("hardware wedged\n");
3663 dev_priv
->mm
.wedged
= 1;
3664 DRM_WAKEUP(&dev_priv
->irq_queue
);
3669 last_seqno
= cur_seqno
;
3671 dev_priv
->mm
.waiting_gem_seqno
= 0;
3673 i915_gem_retire_requests(dev
);
3675 if (!dev_priv
->mm
.wedged
) {
3676 /* Active and flushing should now be empty as we've
3677 * waited for a sequence higher than any pending execbuffer
3679 WARN_ON(!list_empty(&dev_priv
->mm
.active_list
));
3680 WARN_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
3681 /* Request should now be empty as we've also waited
3682 * for the last request in the list
3684 WARN_ON(!list_empty(&dev_priv
->mm
.request_list
));
3687 /* Empty the active and flushing lists to inactive. If there's
3688 * anything left at this point, it means that we're wedged and
3689 * nothing good's going to happen by leaving them there. So strip
3690 * the GPU domains and just stuff them onto inactive.
3692 while (!list_empty(&dev_priv
->mm
.active_list
)) {
3693 struct drm_i915_gem_object
*obj_priv
;
3695 obj_priv
= list_first_entry(&dev_priv
->mm
.active_list
,
3696 struct drm_i915_gem_object
,
3698 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
3699 i915_gem_object_move_to_inactive(obj_priv
->obj
);
3702 while (!list_empty(&dev_priv
->mm
.flushing_list
)) {
3703 struct drm_i915_gem_object
*obj_priv
;
3705 obj_priv
= list_first_entry(&dev_priv
->mm
.flushing_list
,
3706 struct drm_i915_gem_object
,
3708 obj_priv
->obj
->write_domain
&= ~I915_GEM_GPU_DOMAINS
;
3709 i915_gem_object_move_to_inactive(obj_priv
->obj
);
3713 /* Move all inactive buffers out of the GTT. */
3714 ret
= i915_gem_evict_from_list(dev
, &dev_priv
->mm
.inactive_list
);
3715 WARN_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
3717 mutex_unlock(&dev
->struct_mutex
);
3721 i915_gem_cleanup_ringbuffer(dev
);
3722 mutex_unlock(&dev
->struct_mutex
);
3728 i915_gem_init_hws(struct drm_device
*dev
)
3730 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3731 struct drm_gem_object
*obj
;
3732 struct drm_i915_gem_object
*obj_priv
;
3735 /* If we need a physical address for the status page, it's already
3736 * initialized at driver load time.
3738 if (!I915_NEED_GFX_HWS(dev
))
3741 obj
= drm_gem_object_alloc(dev
, 4096);
3743 DRM_ERROR("Failed to allocate status page\n");
3746 obj_priv
= obj
->driver_private
;
3747 obj_priv
->agp_type
= AGP_USER_CACHED_MEMORY
;
3749 ret
= i915_gem_object_pin(obj
, 4096);
3751 drm_gem_object_unreference(obj
);
3755 dev_priv
->status_gfx_addr
= obj_priv
->gtt_offset
;
3757 dev_priv
->hw_status_page
= kmap(obj_priv
->pages
[0]);
3758 if (dev_priv
->hw_status_page
== NULL
) {
3759 DRM_ERROR("Failed to map status page.\n");
3760 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
3761 i915_gem_object_unpin(obj
);
3762 drm_gem_object_unreference(obj
);
3765 dev_priv
->hws_obj
= obj
;
3766 memset(dev_priv
->hw_status_page
, 0, PAGE_SIZE
);
3767 I915_WRITE(HWS_PGA
, dev_priv
->status_gfx_addr
);
3768 I915_READ(HWS_PGA
); /* posting read */
3769 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv
->status_gfx_addr
);
3775 i915_gem_cleanup_hws(struct drm_device
*dev
)
3777 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3778 struct drm_gem_object
*obj
;
3779 struct drm_i915_gem_object
*obj_priv
;
3781 if (dev_priv
->hws_obj
== NULL
)
3784 obj
= dev_priv
->hws_obj
;
3785 obj_priv
= obj
->driver_private
;
3787 kunmap(obj_priv
->pages
[0]);
3788 i915_gem_object_unpin(obj
);
3789 drm_gem_object_unreference(obj
);
3790 dev_priv
->hws_obj
= NULL
;
3792 memset(&dev_priv
->hws_map
, 0, sizeof(dev_priv
->hws_map
));
3793 dev_priv
->hw_status_page
= NULL
;
3795 /* Write high address into HWS_PGA when disabling. */
3796 I915_WRITE(HWS_PGA
, 0x1ffff000);
3800 i915_gem_init_ringbuffer(struct drm_device
*dev
)
3802 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3803 struct drm_gem_object
*obj
;
3804 struct drm_i915_gem_object
*obj_priv
;
3805 drm_i915_ring_buffer_t
*ring
= &dev_priv
->ring
;
3809 ret
= i915_gem_init_hws(dev
);
3813 obj
= drm_gem_object_alloc(dev
, 128 * 1024);
3815 DRM_ERROR("Failed to allocate ringbuffer\n");
3816 i915_gem_cleanup_hws(dev
);
3819 obj_priv
= obj
->driver_private
;
3821 ret
= i915_gem_object_pin(obj
, 4096);
3823 drm_gem_object_unreference(obj
);
3824 i915_gem_cleanup_hws(dev
);
3828 /* Set up the kernel mapping for the ring. */
3829 ring
->Size
= obj
->size
;
3830 ring
->tail_mask
= obj
->size
- 1;
3832 ring
->map
.offset
= dev
->agp
->base
+ obj_priv
->gtt_offset
;
3833 ring
->map
.size
= obj
->size
;
3835 ring
->map
.flags
= 0;
3838 drm_core_ioremap_wc(&ring
->map
, dev
);
3839 if (ring
->map
.handle
== NULL
) {
3840 DRM_ERROR("Failed to map ringbuffer.\n");
3841 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
3842 i915_gem_object_unpin(obj
);
3843 drm_gem_object_unreference(obj
);
3844 i915_gem_cleanup_hws(dev
);
3847 ring
->ring_obj
= obj
;
3848 ring
->virtual_start
= ring
->map
.handle
;
3850 /* Stop the ring if it's running. */
3851 I915_WRITE(PRB0_CTL
, 0);
3852 I915_WRITE(PRB0_TAIL
, 0);
3853 I915_WRITE(PRB0_HEAD
, 0);
3855 /* Initialize the ring. */
3856 I915_WRITE(PRB0_START
, obj_priv
->gtt_offset
);
3857 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
3859 /* G45 ring initialization fails to reset head to zero */
3861 DRM_ERROR("Ring head not reset to zero "
3862 "ctl %08x head %08x tail %08x start %08x\n",
3863 I915_READ(PRB0_CTL
),
3864 I915_READ(PRB0_HEAD
),
3865 I915_READ(PRB0_TAIL
),
3866 I915_READ(PRB0_START
));
3867 I915_WRITE(PRB0_HEAD
, 0);
3869 DRM_ERROR("Ring head forced to zero "
3870 "ctl %08x head %08x tail %08x start %08x\n",
3871 I915_READ(PRB0_CTL
),
3872 I915_READ(PRB0_HEAD
),
3873 I915_READ(PRB0_TAIL
),
3874 I915_READ(PRB0_START
));
3877 I915_WRITE(PRB0_CTL
,
3878 ((obj
->size
- 4096) & RING_NR_PAGES
) |
3882 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
3884 /* If the head is still not zero, the ring is dead */
3886 DRM_ERROR("Ring initialization failed "
3887 "ctl %08x head %08x tail %08x start %08x\n",
3888 I915_READ(PRB0_CTL
),
3889 I915_READ(PRB0_HEAD
),
3890 I915_READ(PRB0_TAIL
),
3891 I915_READ(PRB0_START
));
3895 /* Update our cache of the ring state */
3896 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
3897 i915_kernel_lost_context(dev
);
3899 ring
->head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
3900 ring
->tail
= I915_READ(PRB0_TAIL
) & TAIL_ADDR
;
3901 ring
->space
= ring
->head
- (ring
->tail
+ 8);
3902 if (ring
->space
< 0)
3903 ring
->space
+= ring
->Size
;
3910 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
)
3912 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3914 if (dev_priv
->ring
.ring_obj
== NULL
)
3917 drm_core_ioremapfree(&dev_priv
->ring
.map
, dev
);
3919 i915_gem_object_unpin(dev_priv
->ring
.ring_obj
);
3920 drm_gem_object_unreference(dev_priv
->ring
.ring_obj
);
3921 dev_priv
->ring
.ring_obj
= NULL
;
3922 memset(&dev_priv
->ring
, 0, sizeof(dev_priv
->ring
));
3924 i915_gem_cleanup_hws(dev
);
3928 i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
3929 struct drm_file
*file_priv
)
3931 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3934 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3937 if (dev_priv
->mm
.wedged
) {
3938 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3939 dev_priv
->mm
.wedged
= 0;
3942 mutex_lock(&dev
->struct_mutex
);
3943 dev_priv
->mm
.suspended
= 0;
3945 ret
= i915_gem_init_ringbuffer(dev
);
3949 BUG_ON(!list_empty(&dev_priv
->mm
.active_list
));
3950 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
3951 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
3952 BUG_ON(!list_empty(&dev_priv
->mm
.request_list
));
3953 mutex_unlock(&dev
->struct_mutex
);
3955 drm_irq_install(dev
);
3961 i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
3962 struct drm_file
*file_priv
)
3966 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3969 ret
= i915_gem_idle(dev
);
3970 drm_irq_uninstall(dev
);
3976 i915_gem_lastclose(struct drm_device
*dev
)
3980 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
3983 ret
= i915_gem_idle(dev
);
3985 DRM_ERROR("failed to idle hardware: %d\n", ret
);
3989 i915_gem_load(struct drm_device
*dev
)
3991 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3993 INIT_LIST_HEAD(&dev_priv
->mm
.active_list
);
3994 INIT_LIST_HEAD(&dev_priv
->mm
.flushing_list
);
3995 INIT_LIST_HEAD(&dev_priv
->mm
.inactive_list
);
3996 INIT_LIST_HEAD(&dev_priv
->mm
.request_list
);
3997 INIT_DELAYED_WORK(&dev_priv
->mm
.retire_work
,
3998 i915_gem_retire_work_handler
);
3999 dev_priv
->mm
.next_gem_seqno
= 1;
4001 /* Old X drivers will take 0-2 for front, back, depth buffers */
4002 dev_priv
->fence_reg_start
= 3;
4004 if (IS_I965G(dev
) || IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
4005 dev_priv
->num_fence_regs
= 16;
4007 dev_priv
->num_fence_regs
= 8;
4009 i915_gem_detect_bit_6_swizzle(dev
);
4013 * Create a physically contiguous memory object for this object
4014 * e.g. for cursor + overlay regs
4016 int i915_gem_init_phys_object(struct drm_device
*dev
,
4019 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4020 struct drm_i915_gem_phys_object
*phys_obj
;
4023 if (dev_priv
->mm
.phys_objs
[id
- 1] || !size
)
4026 phys_obj
= drm_calloc(1, sizeof(struct drm_i915_gem_phys_object
), DRM_MEM_DRIVER
);
4032 phys_obj
->handle
= drm_pci_alloc(dev
, size
, 0, 0xffffffff);
4033 if (!phys_obj
->handle
) {
4038 set_memory_wc((unsigned long)phys_obj
->handle
->vaddr
, phys_obj
->handle
->size
/ PAGE_SIZE
);
4041 dev_priv
->mm
.phys_objs
[id
- 1] = phys_obj
;
4045 drm_free(phys_obj
, sizeof(struct drm_i915_gem_phys_object
), DRM_MEM_DRIVER
);
4049 void i915_gem_free_phys_object(struct drm_device
*dev
, int id
)
4051 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4052 struct drm_i915_gem_phys_object
*phys_obj
;
4054 if (!dev_priv
->mm
.phys_objs
[id
- 1])
4057 phys_obj
= dev_priv
->mm
.phys_objs
[id
- 1];
4058 if (phys_obj
->cur_obj
) {
4059 i915_gem_detach_phys_object(dev
, phys_obj
->cur_obj
);
4063 set_memory_wb((unsigned long)phys_obj
->handle
->vaddr
, phys_obj
->handle
->size
/ PAGE_SIZE
);
4065 drm_pci_free(dev
, phys_obj
->handle
);
4067 dev_priv
->mm
.phys_objs
[id
- 1] = NULL
;
4070 void i915_gem_free_all_phys_object(struct drm_device
*dev
)
4074 for (i
= I915_GEM_PHYS_CURSOR_0
; i
<= I915_MAX_PHYS_OBJECT
; i
++)
4075 i915_gem_free_phys_object(dev
, i
);
4078 void i915_gem_detach_phys_object(struct drm_device
*dev
,
4079 struct drm_gem_object
*obj
)
4081 struct drm_i915_gem_object
*obj_priv
;
4086 obj_priv
= obj
->driver_private
;
4087 if (!obj_priv
->phys_obj
)
4090 ret
= i915_gem_object_get_pages(obj
);
4094 page_count
= obj
->size
/ PAGE_SIZE
;
4096 for (i
= 0; i
< page_count
; i
++) {
4097 char *dst
= kmap_atomic(obj_priv
->pages
[i
], KM_USER0
);
4098 char *src
= obj_priv
->phys_obj
->handle
->vaddr
+ (i
* PAGE_SIZE
);
4100 memcpy(dst
, src
, PAGE_SIZE
);
4101 kunmap_atomic(dst
, KM_USER0
);
4103 drm_clflush_pages(obj_priv
->pages
, page_count
);
4104 drm_agp_chipset_flush(dev
);
4106 obj_priv
->phys_obj
->cur_obj
= NULL
;
4107 obj_priv
->phys_obj
= NULL
;
4111 i915_gem_attach_phys_object(struct drm_device
*dev
,
4112 struct drm_gem_object
*obj
, int id
)
4114 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4115 struct drm_i915_gem_object
*obj_priv
;
4120 if (id
> I915_MAX_PHYS_OBJECT
)
4123 obj_priv
= obj
->driver_private
;
4125 if (obj_priv
->phys_obj
) {
4126 if (obj_priv
->phys_obj
->id
== id
)
4128 i915_gem_detach_phys_object(dev
, obj
);
4132 /* create a new object */
4133 if (!dev_priv
->mm
.phys_objs
[id
- 1]) {
4134 ret
= i915_gem_init_phys_object(dev
, id
,
4137 DRM_ERROR("failed to init phys object %d size: %zu\n", id
, obj
->size
);
4142 /* bind to the object */
4143 obj_priv
->phys_obj
= dev_priv
->mm
.phys_objs
[id
- 1];
4144 obj_priv
->phys_obj
->cur_obj
= obj
;
4146 ret
= i915_gem_object_get_pages(obj
);
4148 DRM_ERROR("failed to get page list\n");
4152 page_count
= obj
->size
/ PAGE_SIZE
;
4154 for (i
= 0; i
< page_count
; i
++) {
4155 char *src
= kmap_atomic(obj_priv
->pages
[i
], KM_USER0
);
4156 char *dst
= obj_priv
->phys_obj
->handle
->vaddr
+ (i
* PAGE_SIZE
);
4158 memcpy(dst
, src
, PAGE_SIZE
);
4159 kunmap_atomic(src
, KM_USER0
);
4168 i915_gem_phys_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
4169 struct drm_i915_gem_pwrite
*args
,
4170 struct drm_file
*file_priv
)
4172 struct drm_i915_gem_object
*obj_priv
= obj
->driver_private
;
4175 char __user
*user_data
;
4177 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
4178 obj_addr
= obj_priv
->phys_obj
->handle
->vaddr
+ args
->offset
;
4180 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr
, args
->size
);
4181 ret
= copy_from_user(obj_addr
, user_data
, args
->size
);
4185 drm_agp_chipset_flush(dev
);