drm/i915: add i915_lp_ring_sync helper
[linux-2.6/btrfs-unstable.git] / drivers / gpu / drm / i915 / i915_gem.c
blob7d1e9adf0f4c32fc79b87bfab0e0f85cb6bb74f5
1 /*
2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/swap.h>
35 #include <linux/pci.h>
37 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
39 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
42 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
43 int write);
44 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
45 uint64_t offset,
46 uint64_t size);
47 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
48 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment);
51 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
52 static int i915_gem_evict_something(struct drm_device *dev, int min_size);
53 static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
54 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
55 struct drm_i915_gem_pwrite *args,
56 struct drm_file *file_priv);
58 static LIST_HEAD(shrink_list);
59 static DEFINE_SPINLOCK(shrink_list_lock);
61 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
62 unsigned long end)
64 drm_i915_private_t *dev_priv = dev->dev_private;
66 if (start >= end ||
67 (start & (PAGE_SIZE - 1)) != 0 ||
68 (end & (PAGE_SIZE - 1)) != 0) {
69 return -EINVAL;
72 drm_mm_init(&dev_priv->mm.gtt_space, start,
73 end - start);
75 dev->gtt_total = (uint32_t) (end - start);
77 return 0;
80 int
81 i915_gem_init_ioctl(struct drm_device *dev, void *data,
82 struct drm_file *file_priv)
84 struct drm_i915_gem_init *args = data;
85 int ret;
87 mutex_lock(&dev->struct_mutex);
88 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
89 mutex_unlock(&dev->struct_mutex);
91 return ret;
94 int
95 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
96 struct drm_file *file_priv)
98 struct drm_i915_gem_get_aperture *args = data;
100 if (!(dev->driver->driver_features & DRIVER_GEM))
101 return -ENODEV;
103 args->aper_size = dev->gtt_total;
104 args->aper_available_size = (args->aper_size -
105 atomic_read(&dev->pin_memory));
107 return 0;
112 * Creates a new mm object and returns a handle to it.
115 i915_gem_create_ioctl(struct drm_device *dev, void *data,
116 struct drm_file *file_priv)
118 struct drm_i915_gem_create *args = data;
119 struct drm_gem_object *obj;
120 int ret;
121 u32 handle;
123 args->size = roundup(args->size, PAGE_SIZE);
125 /* Allocate the new object */
126 obj = drm_gem_object_alloc(dev, args->size);
127 if (obj == NULL)
128 return -ENOMEM;
130 ret = drm_gem_handle_create(file_priv, obj, &handle);
131 mutex_lock(&dev->struct_mutex);
132 drm_gem_object_handle_unreference(obj);
133 mutex_unlock(&dev->struct_mutex);
135 if (ret)
136 return ret;
138 args->handle = handle;
140 return 0;
143 static inline int
144 fast_shmem_read(struct page **pages,
145 loff_t page_base, int page_offset,
146 char __user *data,
147 int length)
149 char __iomem *vaddr;
150 int unwritten;
152 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
153 if (vaddr == NULL)
154 return -ENOMEM;
155 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
156 kunmap_atomic(vaddr, KM_USER0);
158 if (unwritten)
159 return -EFAULT;
161 return 0;
164 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
166 drm_i915_private_t *dev_priv = obj->dev->dev_private;
167 struct drm_i915_gem_object *obj_priv = obj->driver_private;
169 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
170 obj_priv->tiling_mode != I915_TILING_NONE;
173 static inline int
174 slow_shmem_copy(struct page *dst_page,
175 int dst_offset,
176 struct page *src_page,
177 int src_offset,
178 int length)
180 char *dst_vaddr, *src_vaddr;
182 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
183 if (dst_vaddr == NULL)
184 return -ENOMEM;
186 src_vaddr = kmap_atomic(src_page, KM_USER1);
187 if (src_vaddr == NULL) {
188 kunmap_atomic(dst_vaddr, KM_USER0);
189 return -ENOMEM;
192 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
194 kunmap_atomic(src_vaddr, KM_USER1);
195 kunmap_atomic(dst_vaddr, KM_USER0);
197 return 0;
200 static inline int
201 slow_shmem_bit17_copy(struct page *gpu_page,
202 int gpu_offset,
203 struct page *cpu_page,
204 int cpu_offset,
205 int length,
206 int is_read)
208 char *gpu_vaddr, *cpu_vaddr;
210 /* Use the unswizzled path if this page isn't affected. */
211 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
212 if (is_read)
213 return slow_shmem_copy(cpu_page, cpu_offset,
214 gpu_page, gpu_offset, length);
215 else
216 return slow_shmem_copy(gpu_page, gpu_offset,
217 cpu_page, cpu_offset, length);
220 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
221 if (gpu_vaddr == NULL)
222 return -ENOMEM;
224 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
225 if (cpu_vaddr == NULL) {
226 kunmap_atomic(gpu_vaddr, KM_USER0);
227 return -ENOMEM;
230 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
231 * XORing with the other bits (A9 for Y, A9 and A10 for X)
233 while (length > 0) {
234 int cacheline_end = ALIGN(gpu_offset + 1, 64);
235 int this_length = min(cacheline_end - gpu_offset, length);
236 int swizzled_gpu_offset = gpu_offset ^ 64;
238 if (is_read) {
239 memcpy(cpu_vaddr + cpu_offset,
240 gpu_vaddr + swizzled_gpu_offset,
241 this_length);
242 } else {
243 memcpy(gpu_vaddr + swizzled_gpu_offset,
244 cpu_vaddr + cpu_offset,
245 this_length);
247 cpu_offset += this_length;
248 gpu_offset += this_length;
249 length -= this_length;
252 kunmap_atomic(cpu_vaddr, KM_USER1);
253 kunmap_atomic(gpu_vaddr, KM_USER0);
255 return 0;
259 * This is the fast shmem pread path, which attempts to copy_from_user directly
260 * from the backing pages of the object to the user's address space. On a
261 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
263 static int
264 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
265 struct drm_i915_gem_pread *args,
266 struct drm_file *file_priv)
268 struct drm_i915_gem_object *obj_priv = obj->driver_private;
269 ssize_t remain;
270 loff_t offset, page_base;
271 char __user *user_data;
272 int page_offset, page_length;
273 int ret;
275 user_data = (char __user *) (uintptr_t) args->data_ptr;
276 remain = args->size;
278 mutex_lock(&dev->struct_mutex);
280 ret = i915_gem_object_get_pages(obj);
281 if (ret != 0)
282 goto fail_unlock;
284 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
285 args->size);
286 if (ret != 0)
287 goto fail_put_pages;
289 obj_priv = obj->driver_private;
290 offset = args->offset;
292 while (remain > 0) {
293 /* Operation in this page
295 * page_base = page offset within aperture
296 * page_offset = offset within page
297 * page_length = bytes to copy for this page
299 page_base = (offset & ~(PAGE_SIZE-1));
300 page_offset = offset & (PAGE_SIZE-1);
301 page_length = remain;
302 if ((page_offset + remain) > PAGE_SIZE)
303 page_length = PAGE_SIZE - page_offset;
305 ret = fast_shmem_read(obj_priv->pages,
306 page_base, page_offset,
307 user_data, page_length);
308 if (ret)
309 goto fail_put_pages;
311 remain -= page_length;
312 user_data += page_length;
313 offset += page_length;
316 fail_put_pages:
317 i915_gem_object_put_pages(obj);
318 fail_unlock:
319 mutex_unlock(&dev->struct_mutex);
321 return ret;
324 static inline gfp_t
325 i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
330 static inline void
331 i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
336 static int
337 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
339 int ret;
341 ret = i915_gem_object_get_pages(obj);
343 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers.
346 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev;
348 gfp_t gfp;
350 ret = i915_gem_evict_something(dev, obj->size);
351 if (ret)
352 return ret;
354 gfp = i915_gem_object_get_page_gfp_mask(obj);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
360 return ret;
364 * This is the fallback shmem pread path, which allocates temporary storage
365 * in kernel space to copy_to_user into outside of the struct_mutex, so we
366 * can copy out of the object's backing pages while holding the struct mutex
367 * and not take page faults.
369 static int
370 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
371 struct drm_i915_gem_pread *args,
372 struct drm_file *file_priv)
374 struct drm_i915_gem_object *obj_priv = obj->driver_private;
375 struct mm_struct *mm = current->mm;
376 struct page **user_pages;
377 ssize_t remain;
378 loff_t offset, pinned_pages, i;
379 loff_t first_data_page, last_data_page, num_pages;
380 int shmem_page_index, shmem_page_offset;
381 int data_page_index, data_page_offset;
382 int page_length;
383 int ret;
384 uint64_t data_ptr = args->data_ptr;
385 int do_bit17_swizzling;
387 remain = args->size;
389 /* Pin the user pages containing the data. We can't fault while
390 * holding the struct mutex, yet we want to hold it while
391 * dereferencing the user data.
393 first_data_page = data_ptr / PAGE_SIZE;
394 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
395 num_pages = last_data_page - first_data_page + 1;
397 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
398 if (user_pages == NULL)
399 return -ENOMEM;
401 down_read(&mm->mmap_sem);
402 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
403 num_pages, 1, 0, user_pages, NULL);
404 up_read(&mm->mmap_sem);
405 if (pinned_pages < num_pages) {
406 ret = -EFAULT;
407 goto fail_put_user_pages;
410 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
412 mutex_lock(&dev->struct_mutex);
414 ret = i915_gem_object_get_pages_or_evict(obj);
415 if (ret)
416 goto fail_unlock;
418 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
419 args->size);
420 if (ret != 0)
421 goto fail_put_pages;
423 obj_priv = obj->driver_private;
424 offset = args->offset;
426 while (remain > 0) {
427 /* Operation in this page
429 * shmem_page_index = page number within shmem file
430 * shmem_page_offset = offset within page in shmem file
431 * data_page_index = page number in get_user_pages return
432 * data_page_offset = offset with data_page_index page.
433 * page_length = bytes to copy for this page
435 shmem_page_index = offset / PAGE_SIZE;
436 shmem_page_offset = offset & ~PAGE_MASK;
437 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
438 data_page_offset = data_ptr & ~PAGE_MASK;
440 page_length = remain;
441 if ((shmem_page_offset + page_length) > PAGE_SIZE)
442 page_length = PAGE_SIZE - shmem_page_offset;
443 if ((data_page_offset + page_length) > PAGE_SIZE)
444 page_length = PAGE_SIZE - data_page_offset;
446 if (do_bit17_swizzling) {
447 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
448 shmem_page_offset,
449 user_pages[data_page_index],
450 data_page_offset,
451 page_length,
453 } else {
454 ret = slow_shmem_copy(user_pages[data_page_index],
455 data_page_offset,
456 obj_priv->pages[shmem_page_index],
457 shmem_page_offset,
458 page_length);
460 if (ret)
461 goto fail_put_pages;
463 remain -= page_length;
464 data_ptr += page_length;
465 offset += page_length;
468 fail_put_pages:
469 i915_gem_object_put_pages(obj);
470 fail_unlock:
471 mutex_unlock(&dev->struct_mutex);
472 fail_put_user_pages:
473 for (i = 0; i < pinned_pages; i++) {
474 SetPageDirty(user_pages[i]);
475 page_cache_release(user_pages[i]);
477 drm_free_large(user_pages);
479 return ret;
483 * Reads data from the object referenced by handle.
485 * On error, the contents of *data are undefined.
488 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
489 struct drm_file *file_priv)
491 struct drm_i915_gem_pread *args = data;
492 struct drm_gem_object *obj;
493 struct drm_i915_gem_object *obj_priv;
494 int ret;
496 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
497 if (obj == NULL)
498 return -EBADF;
499 obj_priv = obj->driver_private;
501 /* Bounds check source.
503 * XXX: This could use review for overflow issues...
505 if (args->offset > obj->size || args->size > obj->size ||
506 args->offset + args->size > obj->size) {
507 drm_gem_object_unreference(obj);
508 return -EINVAL;
511 if (i915_gem_object_needs_bit17_swizzle(obj)) {
512 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
513 } else {
514 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
515 if (ret != 0)
516 ret = i915_gem_shmem_pread_slow(dev, obj, args,
517 file_priv);
520 drm_gem_object_unreference(obj);
522 return ret;
525 /* This is the fast write path which cannot handle
526 * page faults in the source data
529 static inline int
530 fast_user_write(struct io_mapping *mapping,
531 loff_t page_base, int page_offset,
532 char __user *user_data,
533 int length)
535 char *vaddr_atomic;
536 unsigned long unwritten;
538 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
539 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
540 user_data, length);
541 io_mapping_unmap_atomic(vaddr_atomic);
542 if (unwritten)
543 return -EFAULT;
544 return 0;
547 /* Here's the write path which can sleep for
548 * page faults
551 static inline int
552 slow_kernel_write(struct io_mapping *mapping,
553 loff_t gtt_base, int gtt_offset,
554 struct page *user_page, int user_offset,
555 int length)
557 char *src_vaddr, *dst_vaddr;
558 unsigned long unwritten;
560 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
561 src_vaddr = kmap_atomic(user_page, KM_USER1);
562 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
563 src_vaddr + user_offset,
564 length);
565 kunmap_atomic(src_vaddr, KM_USER1);
566 io_mapping_unmap_atomic(dst_vaddr);
567 if (unwritten)
568 return -EFAULT;
569 return 0;
572 static inline int
573 fast_shmem_write(struct page **pages,
574 loff_t page_base, int page_offset,
575 char __user *data,
576 int length)
578 char __iomem *vaddr;
579 unsigned long unwritten;
581 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
582 if (vaddr == NULL)
583 return -ENOMEM;
584 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
585 kunmap_atomic(vaddr, KM_USER0);
587 if (unwritten)
588 return -EFAULT;
589 return 0;
593 * This is the fast pwrite path, where we copy the data directly from the
594 * user into the GTT, uncached.
596 static int
597 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
598 struct drm_i915_gem_pwrite *args,
599 struct drm_file *file_priv)
601 struct drm_i915_gem_object *obj_priv = obj->driver_private;
602 drm_i915_private_t *dev_priv = dev->dev_private;
603 ssize_t remain;
604 loff_t offset, page_base;
605 char __user *user_data;
606 int page_offset, page_length;
607 int ret;
609 user_data = (char __user *) (uintptr_t) args->data_ptr;
610 remain = args->size;
611 if (!access_ok(VERIFY_READ, user_data, remain))
612 return -EFAULT;
615 mutex_lock(&dev->struct_mutex);
616 ret = i915_gem_object_pin(obj, 0);
617 if (ret) {
618 mutex_unlock(&dev->struct_mutex);
619 return ret;
621 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
622 if (ret)
623 goto fail;
625 obj_priv = obj->driver_private;
626 offset = obj_priv->gtt_offset + args->offset;
628 while (remain > 0) {
629 /* Operation in this page
631 * page_base = page offset within aperture
632 * page_offset = offset within page
633 * page_length = bytes to copy for this page
635 page_base = (offset & ~(PAGE_SIZE-1));
636 page_offset = offset & (PAGE_SIZE-1);
637 page_length = remain;
638 if ((page_offset + remain) > PAGE_SIZE)
639 page_length = PAGE_SIZE - page_offset;
641 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
642 page_offset, user_data, page_length);
644 /* If we get a fault while copying data, then (presumably) our
645 * source page isn't available. Return the error and we'll
646 * retry in the slow path.
648 if (ret)
649 goto fail;
651 remain -= page_length;
652 user_data += page_length;
653 offset += page_length;
656 fail:
657 i915_gem_object_unpin(obj);
658 mutex_unlock(&dev->struct_mutex);
660 return ret;
664 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
665 * the memory and maps it using kmap_atomic for copying.
667 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
668 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
670 static int
671 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
672 struct drm_i915_gem_pwrite *args,
673 struct drm_file *file_priv)
675 struct drm_i915_gem_object *obj_priv = obj->driver_private;
676 drm_i915_private_t *dev_priv = dev->dev_private;
677 ssize_t remain;
678 loff_t gtt_page_base, offset;
679 loff_t first_data_page, last_data_page, num_pages;
680 loff_t pinned_pages, i;
681 struct page **user_pages;
682 struct mm_struct *mm = current->mm;
683 int gtt_page_offset, data_page_offset, data_page_index, page_length;
684 int ret;
685 uint64_t data_ptr = args->data_ptr;
687 remain = args->size;
689 /* Pin the user pages containing the data. We can't fault while
690 * holding the struct mutex, and all of the pwrite implementations
691 * want to hold it while dereferencing the user data.
693 first_data_page = data_ptr / PAGE_SIZE;
694 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
695 num_pages = last_data_page - first_data_page + 1;
697 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
698 if (user_pages == NULL)
699 return -ENOMEM;
701 down_read(&mm->mmap_sem);
702 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
703 num_pages, 0, 0, user_pages, NULL);
704 up_read(&mm->mmap_sem);
705 if (pinned_pages < num_pages) {
706 ret = -EFAULT;
707 goto out_unpin_pages;
710 mutex_lock(&dev->struct_mutex);
711 ret = i915_gem_object_pin(obj, 0);
712 if (ret)
713 goto out_unlock;
715 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
716 if (ret)
717 goto out_unpin_object;
719 obj_priv = obj->driver_private;
720 offset = obj_priv->gtt_offset + args->offset;
722 while (remain > 0) {
723 /* Operation in this page
725 * gtt_page_base = page offset within aperture
726 * gtt_page_offset = offset within page in aperture
727 * data_page_index = page number in get_user_pages return
728 * data_page_offset = offset with data_page_index page.
729 * page_length = bytes to copy for this page
731 gtt_page_base = offset & PAGE_MASK;
732 gtt_page_offset = offset & ~PAGE_MASK;
733 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
734 data_page_offset = data_ptr & ~PAGE_MASK;
736 page_length = remain;
737 if ((gtt_page_offset + page_length) > PAGE_SIZE)
738 page_length = PAGE_SIZE - gtt_page_offset;
739 if ((data_page_offset + page_length) > PAGE_SIZE)
740 page_length = PAGE_SIZE - data_page_offset;
742 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
743 gtt_page_base, gtt_page_offset,
744 user_pages[data_page_index],
745 data_page_offset,
746 page_length);
748 /* If we get a fault while copying data, then (presumably) our
749 * source page isn't available. Return the error and we'll
750 * retry in the slow path.
752 if (ret)
753 goto out_unpin_object;
755 remain -= page_length;
756 offset += page_length;
757 data_ptr += page_length;
760 out_unpin_object:
761 i915_gem_object_unpin(obj);
762 out_unlock:
763 mutex_unlock(&dev->struct_mutex);
764 out_unpin_pages:
765 for (i = 0; i < pinned_pages; i++)
766 page_cache_release(user_pages[i]);
767 drm_free_large(user_pages);
769 return ret;
773 * This is the fast shmem pwrite path, which attempts to directly
774 * copy_from_user into the kmapped pages backing the object.
776 static int
777 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
778 struct drm_i915_gem_pwrite *args,
779 struct drm_file *file_priv)
781 struct drm_i915_gem_object *obj_priv = obj->driver_private;
782 ssize_t remain;
783 loff_t offset, page_base;
784 char __user *user_data;
785 int page_offset, page_length;
786 int ret;
788 user_data = (char __user *) (uintptr_t) args->data_ptr;
789 remain = args->size;
791 mutex_lock(&dev->struct_mutex);
793 ret = i915_gem_object_get_pages(obj);
794 if (ret != 0)
795 goto fail_unlock;
797 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
798 if (ret != 0)
799 goto fail_put_pages;
801 obj_priv = obj->driver_private;
802 offset = args->offset;
803 obj_priv->dirty = 1;
805 while (remain > 0) {
806 /* Operation in this page
808 * page_base = page offset within aperture
809 * page_offset = offset within page
810 * page_length = bytes to copy for this page
812 page_base = (offset & ~(PAGE_SIZE-1));
813 page_offset = offset & (PAGE_SIZE-1);
814 page_length = remain;
815 if ((page_offset + remain) > PAGE_SIZE)
816 page_length = PAGE_SIZE - page_offset;
818 ret = fast_shmem_write(obj_priv->pages,
819 page_base, page_offset,
820 user_data, page_length);
821 if (ret)
822 goto fail_put_pages;
824 remain -= page_length;
825 user_data += page_length;
826 offset += page_length;
829 fail_put_pages:
830 i915_gem_object_put_pages(obj);
831 fail_unlock:
832 mutex_unlock(&dev->struct_mutex);
834 return ret;
838 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
839 * the memory and maps it using kmap_atomic for copying.
841 * This avoids taking mmap_sem for faulting on the user's address while the
842 * struct_mutex is held.
844 static int
845 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
846 struct drm_i915_gem_pwrite *args,
847 struct drm_file *file_priv)
849 struct drm_i915_gem_object *obj_priv = obj->driver_private;
850 struct mm_struct *mm = current->mm;
851 struct page **user_pages;
852 ssize_t remain;
853 loff_t offset, pinned_pages, i;
854 loff_t first_data_page, last_data_page, num_pages;
855 int shmem_page_index, shmem_page_offset;
856 int data_page_index, data_page_offset;
857 int page_length;
858 int ret;
859 uint64_t data_ptr = args->data_ptr;
860 int do_bit17_swizzling;
862 remain = args->size;
864 /* Pin the user pages containing the data. We can't fault while
865 * holding the struct mutex, and all of the pwrite implementations
866 * want to hold it while dereferencing the user data.
868 first_data_page = data_ptr / PAGE_SIZE;
869 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
870 num_pages = last_data_page - first_data_page + 1;
872 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
873 if (user_pages == NULL)
874 return -ENOMEM;
876 down_read(&mm->mmap_sem);
877 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
878 num_pages, 0, 0, user_pages, NULL);
879 up_read(&mm->mmap_sem);
880 if (pinned_pages < num_pages) {
881 ret = -EFAULT;
882 goto fail_put_user_pages;
885 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
887 mutex_lock(&dev->struct_mutex);
889 ret = i915_gem_object_get_pages_or_evict(obj);
890 if (ret)
891 goto fail_unlock;
893 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
894 if (ret != 0)
895 goto fail_put_pages;
897 obj_priv = obj->driver_private;
898 offset = args->offset;
899 obj_priv->dirty = 1;
901 while (remain > 0) {
902 /* Operation in this page
904 * shmem_page_index = page number within shmem file
905 * shmem_page_offset = offset within page in shmem file
906 * data_page_index = page number in get_user_pages return
907 * data_page_offset = offset with data_page_index page.
908 * page_length = bytes to copy for this page
910 shmem_page_index = offset / PAGE_SIZE;
911 shmem_page_offset = offset & ~PAGE_MASK;
912 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
913 data_page_offset = data_ptr & ~PAGE_MASK;
915 page_length = remain;
916 if ((shmem_page_offset + page_length) > PAGE_SIZE)
917 page_length = PAGE_SIZE - shmem_page_offset;
918 if ((data_page_offset + page_length) > PAGE_SIZE)
919 page_length = PAGE_SIZE - data_page_offset;
921 if (do_bit17_swizzling) {
922 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
923 shmem_page_offset,
924 user_pages[data_page_index],
925 data_page_offset,
926 page_length,
928 } else {
929 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
930 shmem_page_offset,
931 user_pages[data_page_index],
932 data_page_offset,
933 page_length);
935 if (ret)
936 goto fail_put_pages;
938 remain -= page_length;
939 data_ptr += page_length;
940 offset += page_length;
943 fail_put_pages:
944 i915_gem_object_put_pages(obj);
945 fail_unlock:
946 mutex_unlock(&dev->struct_mutex);
947 fail_put_user_pages:
948 for (i = 0; i < pinned_pages; i++)
949 page_cache_release(user_pages[i]);
950 drm_free_large(user_pages);
952 return ret;
956 * Writes data to the object referenced by handle.
958 * On error, the contents of the buffer that were to be modified are undefined.
961 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
962 struct drm_file *file_priv)
964 struct drm_i915_gem_pwrite *args = data;
965 struct drm_gem_object *obj;
966 struct drm_i915_gem_object *obj_priv;
967 int ret = 0;
969 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
970 if (obj == NULL)
971 return -EBADF;
972 obj_priv = obj->driver_private;
974 /* Bounds check destination.
976 * XXX: This could use review for overflow issues...
978 if (args->offset > obj->size || args->size > obj->size ||
979 args->offset + args->size > obj->size) {
980 drm_gem_object_unreference(obj);
981 return -EINVAL;
984 /* We can only do the GTT pwrite on untiled buffers, as otherwise
985 * it would end up going through the fenced access, and we'll get
986 * different detiling behavior between reading and writing.
987 * pread/pwrite currently are reading and writing from the CPU
988 * perspective, requiring manual detiling by the client.
990 if (obj_priv->phys_obj)
991 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
992 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
993 dev->gtt_total != 0) {
994 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
995 if (ret == -EFAULT) {
996 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
997 file_priv);
999 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
1000 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
1001 } else {
1002 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
1003 if (ret == -EFAULT) {
1004 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
1005 file_priv);
1009 #if WATCH_PWRITE
1010 if (ret)
1011 DRM_INFO("pwrite failed %d\n", ret);
1012 #endif
1014 drm_gem_object_unreference(obj);
1016 return ret;
1020 * Called when user space prepares to use an object with the CPU, either
1021 * through the mmap ioctl's mapping or a GTT mapping.
1024 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1025 struct drm_file *file_priv)
1027 struct drm_i915_private *dev_priv = dev->dev_private;
1028 struct drm_i915_gem_set_domain *args = data;
1029 struct drm_gem_object *obj;
1030 struct drm_i915_gem_object *obj_priv;
1031 uint32_t read_domains = args->read_domains;
1032 uint32_t write_domain = args->write_domain;
1033 int ret;
1035 if (!(dev->driver->driver_features & DRIVER_GEM))
1036 return -ENODEV;
1038 /* Only handle setting domains to types used by the CPU. */
1039 if (write_domain & I915_GEM_GPU_DOMAINS)
1040 return -EINVAL;
1042 if (read_domains & I915_GEM_GPU_DOMAINS)
1043 return -EINVAL;
1045 /* Having something in the write domain implies it's in the read
1046 * domain, and only that read domain. Enforce that in the request.
1048 if (write_domain != 0 && read_domains != write_domain)
1049 return -EINVAL;
1051 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1052 if (obj == NULL)
1053 return -EBADF;
1054 obj_priv = obj->driver_private;
1056 mutex_lock(&dev->struct_mutex);
1058 intel_mark_busy(dev, obj);
1060 #if WATCH_BUF
1061 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1062 obj, obj->size, read_domains, write_domain);
1063 #endif
1064 if (read_domains & I915_GEM_DOMAIN_GTT) {
1065 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1067 /* Update the LRU on the fence for the CPU access that's
1068 * about to occur.
1070 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1071 list_move_tail(&obj_priv->fence_list,
1072 &dev_priv->mm.fence_list);
1075 /* Silently promote "you're not bound, there was nothing to do"
1076 * to success, since the client was just asking us to
1077 * make sure everything was done.
1079 if (ret == -EINVAL)
1080 ret = 0;
1081 } else {
1082 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1085 drm_gem_object_unreference(obj);
1086 mutex_unlock(&dev->struct_mutex);
1087 return ret;
1091 * Called when user space has done writes to this buffer
1094 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1095 struct drm_file *file_priv)
1097 struct drm_i915_gem_sw_finish *args = data;
1098 struct drm_gem_object *obj;
1099 struct drm_i915_gem_object *obj_priv;
1100 int ret = 0;
1102 if (!(dev->driver->driver_features & DRIVER_GEM))
1103 return -ENODEV;
1105 mutex_lock(&dev->struct_mutex);
1106 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1107 if (obj == NULL) {
1108 mutex_unlock(&dev->struct_mutex);
1109 return -EBADF;
1112 #if WATCH_BUF
1113 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1114 __func__, args->handle, obj, obj->size);
1115 #endif
1116 obj_priv = obj->driver_private;
1118 /* Pinned buffers may be scanout, so flush the cache */
1119 if (obj_priv->pin_count)
1120 i915_gem_object_flush_cpu_write_domain(obj);
1122 drm_gem_object_unreference(obj);
1123 mutex_unlock(&dev->struct_mutex);
1124 return ret;
1128 * Maps the contents of an object, returning the address it is mapped
1129 * into.
1131 * While the mapping holds a reference on the contents of the object, it doesn't
1132 * imply a ref on the object itself.
1135 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1136 struct drm_file *file_priv)
1138 struct drm_i915_gem_mmap *args = data;
1139 struct drm_gem_object *obj;
1140 loff_t offset;
1141 unsigned long addr;
1143 if (!(dev->driver->driver_features & DRIVER_GEM))
1144 return -ENODEV;
1146 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1147 if (obj == NULL)
1148 return -EBADF;
1150 offset = args->offset;
1152 down_write(&current->mm->mmap_sem);
1153 addr = do_mmap(obj->filp, 0, args->size,
1154 PROT_READ | PROT_WRITE, MAP_SHARED,
1155 args->offset);
1156 up_write(&current->mm->mmap_sem);
1157 mutex_lock(&dev->struct_mutex);
1158 drm_gem_object_unreference(obj);
1159 mutex_unlock(&dev->struct_mutex);
1160 if (IS_ERR((void *)addr))
1161 return addr;
1163 args->addr_ptr = (uint64_t) addr;
1165 return 0;
1169 * i915_gem_fault - fault a page into the GTT
1170 * vma: VMA in question
1171 * vmf: fault info
1173 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1174 * from userspace. The fault handler takes care of binding the object to
1175 * the GTT (if needed), allocating and programming a fence register (again,
1176 * only if needed based on whether the old reg is still valid or the object
1177 * is tiled) and inserting a new PTE into the faulting process.
1179 * Note that the faulting process may involve evicting existing objects
1180 * from the GTT and/or fence registers to make room. So performance may
1181 * suffer if the GTT working set is large or there are few fence registers
1182 * left.
1184 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1186 struct drm_gem_object *obj = vma->vm_private_data;
1187 struct drm_device *dev = obj->dev;
1188 struct drm_i915_private *dev_priv = dev->dev_private;
1189 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1190 pgoff_t page_offset;
1191 unsigned long pfn;
1192 int ret = 0;
1193 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1195 /* We don't use vmf->pgoff since that has the fake offset */
1196 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1197 PAGE_SHIFT;
1199 /* Now bind it into the GTT if needed */
1200 mutex_lock(&dev->struct_mutex);
1201 if (!obj_priv->gtt_space) {
1202 ret = i915_gem_object_bind_to_gtt(obj, 0);
1203 if (ret)
1204 goto unlock;
1206 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1208 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1209 if (ret)
1210 goto unlock;
1213 /* Need a new fence register? */
1214 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1215 ret = i915_gem_object_get_fence_reg(obj);
1216 if (ret)
1217 goto unlock;
1220 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1221 page_offset;
1223 /* Finally, remap it using the new GTT offset */
1224 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1225 unlock:
1226 mutex_unlock(&dev->struct_mutex);
1228 switch (ret) {
1229 case 0:
1230 case -ERESTARTSYS:
1231 return VM_FAULT_NOPAGE;
1232 case -ENOMEM:
1233 case -EAGAIN:
1234 return VM_FAULT_OOM;
1235 default:
1236 return VM_FAULT_SIGBUS;
1241 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1242 * @obj: obj in question
1244 * GEM memory mapping works by handing back to userspace a fake mmap offset
1245 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1246 * up the object based on the offset and sets up the various memory mapping
1247 * structures.
1249 * This routine allocates and attaches a fake offset for @obj.
1251 static int
1252 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1254 struct drm_device *dev = obj->dev;
1255 struct drm_gem_mm *mm = dev->mm_private;
1256 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1257 struct drm_map_list *list;
1258 struct drm_local_map *map;
1259 int ret = 0;
1261 /* Set the object up for mmap'ing */
1262 list = &obj->map_list;
1263 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1264 if (!list->map)
1265 return -ENOMEM;
1267 map = list->map;
1268 map->type = _DRM_GEM;
1269 map->size = obj->size;
1270 map->handle = obj;
1272 /* Get a DRM GEM mmap offset allocated... */
1273 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1274 obj->size / PAGE_SIZE, 0, 0);
1275 if (!list->file_offset_node) {
1276 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1277 ret = -ENOMEM;
1278 goto out_free_list;
1281 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1282 obj->size / PAGE_SIZE, 0);
1283 if (!list->file_offset_node) {
1284 ret = -ENOMEM;
1285 goto out_free_list;
1288 list->hash.key = list->file_offset_node->start;
1289 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1290 DRM_ERROR("failed to add to map hash\n");
1291 goto out_free_mm;
1294 /* By now we should be all set, any drm_mmap request on the offset
1295 * below will get to our mmap & fault handler */
1296 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1298 return 0;
1300 out_free_mm:
1301 drm_mm_put_block(list->file_offset_node);
1302 out_free_list:
1303 kfree(list->map);
1305 return ret;
1309 * i915_gem_release_mmap - remove physical page mappings
1310 * @obj: obj in question
1312 * Preserve the reservation of the mmaping with the DRM core code, but
1313 * relinquish ownership of the pages back to the system.
1315 * It is vital that we remove the page mapping if we have mapped a tiled
1316 * object through the GTT and then lose the fence register due to
1317 * resource pressure. Similarly if the object has been moved out of the
1318 * aperture, than pages mapped into userspace must be revoked. Removing the
1319 * mapping will then trigger a page fault on the next user access, allowing
1320 * fixup by i915_gem_fault().
1322 void
1323 i915_gem_release_mmap(struct drm_gem_object *obj)
1325 struct drm_device *dev = obj->dev;
1326 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1328 if (dev->dev_mapping)
1329 unmap_mapping_range(dev->dev_mapping,
1330 obj_priv->mmap_offset, obj->size, 1);
1333 static void
1334 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1336 struct drm_device *dev = obj->dev;
1337 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1338 struct drm_gem_mm *mm = dev->mm_private;
1339 struct drm_map_list *list;
1341 list = &obj->map_list;
1342 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1344 if (list->file_offset_node) {
1345 drm_mm_put_block(list->file_offset_node);
1346 list->file_offset_node = NULL;
1349 if (list->map) {
1350 kfree(list->map);
1351 list->map = NULL;
1354 obj_priv->mmap_offset = 0;
1358 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1359 * @obj: object to check
1361 * Return the required GTT alignment for an object, taking into account
1362 * potential fence register mapping if needed.
1364 static uint32_t
1365 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1367 struct drm_device *dev = obj->dev;
1368 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1369 int start, i;
1372 * Minimum alignment is 4k (GTT page size), but might be greater
1373 * if a fence register is needed for the object.
1375 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1376 return 4096;
1379 * Previous chips need to be aligned to the size of the smallest
1380 * fence register that can contain the object.
1382 if (IS_I9XX(dev))
1383 start = 1024*1024;
1384 else
1385 start = 512*1024;
1387 for (i = start; i < obj->size; i <<= 1)
1390 return i;
1394 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1395 * @dev: DRM device
1396 * @data: GTT mapping ioctl data
1397 * @file_priv: GEM object info
1399 * Simply returns the fake offset to userspace so it can mmap it.
1400 * The mmap call will end up in drm_gem_mmap(), which will set things
1401 * up so we can get faults in the handler above.
1403 * The fault handler will take care of binding the object into the GTT
1404 * (since it may have been evicted to make room for something), allocating
1405 * a fence register, and mapping the appropriate aperture address into
1406 * userspace.
1409 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1410 struct drm_file *file_priv)
1412 struct drm_i915_gem_mmap_gtt *args = data;
1413 struct drm_i915_private *dev_priv = dev->dev_private;
1414 struct drm_gem_object *obj;
1415 struct drm_i915_gem_object *obj_priv;
1416 int ret;
1418 if (!(dev->driver->driver_features & DRIVER_GEM))
1419 return -ENODEV;
1421 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1422 if (obj == NULL)
1423 return -EBADF;
1425 mutex_lock(&dev->struct_mutex);
1427 obj_priv = obj->driver_private;
1429 if (obj_priv->madv != I915_MADV_WILLNEED) {
1430 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1431 drm_gem_object_unreference(obj);
1432 mutex_unlock(&dev->struct_mutex);
1433 return -EINVAL;
1437 if (!obj_priv->mmap_offset) {
1438 ret = i915_gem_create_mmap_offset(obj);
1439 if (ret) {
1440 drm_gem_object_unreference(obj);
1441 mutex_unlock(&dev->struct_mutex);
1442 return ret;
1446 args->offset = obj_priv->mmap_offset;
1449 * Pull it into the GTT so that we have a page list (makes the
1450 * initial fault faster and any subsequent flushing possible).
1452 if (!obj_priv->agp_mem) {
1453 ret = i915_gem_object_bind_to_gtt(obj, 0);
1454 if (ret) {
1455 drm_gem_object_unreference(obj);
1456 mutex_unlock(&dev->struct_mutex);
1457 return ret;
1459 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1462 drm_gem_object_unreference(obj);
1463 mutex_unlock(&dev->struct_mutex);
1465 return 0;
1468 void
1469 i915_gem_object_put_pages(struct drm_gem_object *obj)
1471 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1472 int page_count = obj->size / PAGE_SIZE;
1473 int i;
1475 BUG_ON(obj_priv->pages_refcount == 0);
1476 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1478 if (--obj_priv->pages_refcount != 0)
1479 return;
1481 if (obj_priv->tiling_mode != I915_TILING_NONE)
1482 i915_gem_object_save_bit_17_swizzle(obj);
1484 if (obj_priv->madv == I915_MADV_DONTNEED)
1485 obj_priv->dirty = 0;
1487 for (i = 0; i < page_count; i++) {
1488 if (obj_priv->pages[i] == NULL)
1489 break;
1491 if (obj_priv->dirty)
1492 set_page_dirty(obj_priv->pages[i]);
1494 if (obj_priv->madv == I915_MADV_WILLNEED)
1495 mark_page_accessed(obj_priv->pages[i]);
1497 page_cache_release(obj_priv->pages[i]);
1499 obj_priv->dirty = 0;
1501 drm_free_large(obj_priv->pages);
1502 obj_priv->pages = NULL;
1505 static void
1506 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1508 struct drm_device *dev = obj->dev;
1509 drm_i915_private_t *dev_priv = dev->dev_private;
1510 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1512 /* Add a reference if we're newly entering the active list. */
1513 if (!obj_priv->active) {
1514 drm_gem_object_reference(obj);
1515 obj_priv->active = 1;
1517 /* Move from whatever list we were on to the tail of execution. */
1518 spin_lock(&dev_priv->mm.active_list_lock);
1519 list_move_tail(&obj_priv->list,
1520 &dev_priv->mm.active_list);
1521 spin_unlock(&dev_priv->mm.active_list_lock);
1522 obj_priv->last_rendering_seqno = seqno;
1525 static void
1526 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1528 struct drm_device *dev = obj->dev;
1529 drm_i915_private_t *dev_priv = dev->dev_private;
1530 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1532 BUG_ON(!obj_priv->active);
1533 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1534 obj_priv->last_rendering_seqno = 0;
1537 /* Immediately discard the backing storage */
1538 static void
1539 i915_gem_object_truncate(struct drm_gem_object *obj)
1541 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1542 struct inode *inode;
1544 inode = obj->filp->f_path.dentry->d_inode;
1545 if (inode->i_op->truncate)
1546 inode->i_op->truncate (inode);
1548 obj_priv->madv = __I915_MADV_PURGED;
1551 static inline int
1552 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1554 return obj_priv->madv == I915_MADV_DONTNEED;
1557 static void
1558 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1560 struct drm_device *dev = obj->dev;
1561 drm_i915_private_t *dev_priv = dev->dev_private;
1562 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1564 i915_verify_inactive(dev, __FILE__, __LINE__);
1565 if (obj_priv->pin_count != 0)
1566 list_del_init(&obj_priv->list);
1567 else
1568 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1570 obj_priv->last_rendering_seqno = 0;
1571 if (obj_priv->active) {
1572 obj_priv->active = 0;
1573 drm_gem_object_unreference(obj);
1575 i915_verify_inactive(dev, __FILE__, __LINE__);
1579 * Creates a new sequence number, emitting a write of it to the status page
1580 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1582 * Must be called with struct_lock held.
1584 * Returned sequence numbers are nonzero on success.
1586 static uint32_t
1587 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1588 uint32_t flush_domains)
1590 drm_i915_private_t *dev_priv = dev->dev_private;
1591 struct drm_i915_file_private *i915_file_priv = NULL;
1592 struct drm_i915_gem_request *request;
1593 uint32_t seqno;
1594 int was_empty;
1595 RING_LOCALS;
1597 if (file_priv != NULL)
1598 i915_file_priv = file_priv->driver_priv;
1600 request = kzalloc(sizeof(*request), GFP_KERNEL);
1601 if (request == NULL)
1602 return 0;
1604 /* Grab the seqno we're going to make this request be, and bump the
1605 * next (skipping 0 so it can be the reserved no-seqno value).
1607 seqno = dev_priv->mm.next_gem_seqno;
1608 dev_priv->mm.next_gem_seqno++;
1609 if (dev_priv->mm.next_gem_seqno == 0)
1610 dev_priv->mm.next_gem_seqno++;
1612 BEGIN_LP_RING(4);
1613 OUT_RING(MI_STORE_DWORD_INDEX);
1614 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1615 OUT_RING(seqno);
1617 OUT_RING(MI_USER_INTERRUPT);
1618 ADVANCE_LP_RING();
1620 DRM_DEBUG("%d\n", seqno);
1622 request->seqno = seqno;
1623 request->emitted_jiffies = jiffies;
1624 was_empty = list_empty(&dev_priv->mm.request_list);
1625 list_add_tail(&request->list, &dev_priv->mm.request_list);
1626 if (i915_file_priv) {
1627 list_add_tail(&request->client_list,
1628 &i915_file_priv->mm.request_list);
1629 } else {
1630 INIT_LIST_HEAD(&request->client_list);
1633 /* Associate any objects on the flushing list matching the write
1634 * domain we're flushing with our flush.
1636 if (flush_domains != 0) {
1637 struct drm_i915_gem_object *obj_priv, *next;
1639 list_for_each_entry_safe(obj_priv, next,
1640 &dev_priv->mm.flushing_list, list) {
1641 struct drm_gem_object *obj = obj_priv->obj;
1643 if ((obj->write_domain & flush_domains) ==
1644 obj->write_domain) {
1645 uint32_t old_write_domain = obj->write_domain;
1647 obj->write_domain = 0;
1648 i915_gem_object_move_to_active(obj, seqno);
1650 trace_i915_gem_object_change_domain(obj,
1651 obj->read_domains,
1652 old_write_domain);
1658 if (!dev_priv->mm.suspended) {
1659 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1660 if (was_empty)
1661 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1663 return seqno;
1667 * Command execution barrier
1669 * Ensures that all commands in the ring are finished
1670 * before signalling the CPU
1672 static uint32_t
1673 i915_retire_commands(struct drm_device *dev)
1675 drm_i915_private_t *dev_priv = dev->dev_private;
1676 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1677 uint32_t flush_domains = 0;
1678 RING_LOCALS;
1680 /* The sampler always gets flushed on i965 (sigh) */
1681 if (IS_I965G(dev))
1682 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1683 BEGIN_LP_RING(2);
1684 OUT_RING(cmd);
1685 OUT_RING(0); /* noop */
1686 ADVANCE_LP_RING();
1687 return flush_domains;
1691 * Moves buffers associated only with the given active seqno from the active
1692 * to inactive list, potentially freeing them.
1694 static void
1695 i915_gem_retire_request(struct drm_device *dev,
1696 struct drm_i915_gem_request *request)
1698 drm_i915_private_t *dev_priv = dev->dev_private;
1700 trace_i915_gem_request_retire(dev, request->seqno);
1702 /* Move any buffers on the active list that are no longer referenced
1703 * by the ringbuffer to the flushing/inactive lists as appropriate.
1705 spin_lock(&dev_priv->mm.active_list_lock);
1706 while (!list_empty(&dev_priv->mm.active_list)) {
1707 struct drm_gem_object *obj;
1708 struct drm_i915_gem_object *obj_priv;
1710 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1711 struct drm_i915_gem_object,
1712 list);
1713 obj = obj_priv->obj;
1715 /* If the seqno being retired doesn't match the oldest in the
1716 * list, then the oldest in the list must still be newer than
1717 * this seqno.
1719 if (obj_priv->last_rendering_seqno != request->seqno)
1720 goto out;
1722 #if WATCH_LRU
1723 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1724 __func__, request->seqno, obj);
1725 #endif
1727 if (obj->write_domain != 0)
1728 i915_gem_object_move_to_flushing(obj);
1729 else {
1730 /* Take a reference on the object so it won't be
1731 * freed while the spinlock is held. The list
1732 * protection for this spinlock is safe when breaking
1733 * the lock like this since the next thing we do
1734 * is just get the head of the list again.
1736 drm_gem_object_reference(obj);
1737 i915_gem_object_move_to_inactive(obj);
1738 spin_unlock(&dev_priv->mm.active_list_lock);
1739 drm_gem_object_unreference(obj);
1740 spin_lock(&dev_priv->mm.active_list_lock);
1743 out:
1744 spin_unlock(&dev_priv->mm.active_list_lock);
1748 * Returns true if seq1 is later than seq2.
1750 bool
1751 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1753 return (int32_t)(seq1 - seq2) >= 0;
1756 uint32_t
1757 i915_get_gem_seqno(struct drm_device *dev)
1759 drm_i915_private_t *dev_priv = dev->dev_private;
1761 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1765 * This function clears the request list as sequence numbers are passed.
1767 void
1768 i915_gem_retire_requests(struct drm_device *dev)
1770 drm_i915_private_t *dev_priv = dev->dev_private;
1771 uint32_t seqno;
1773 if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
1774 return;
1776 seqno = i915_get_gem_seqno(dev);
1778 while (!list_empty(&dev_priv->mm.request_list)) {
1779 struct drm_i915_gem_request *request;
1780 uint32_t retiring_seqno;
1782 request = list_first_entry(&dev_priv->mm.request_list,
1783 struct drm_i915_gem_request,
1784 list);
1785 retiring_seqno = request->seqno;
1787 if (i915_seqno_passed(seqno, retiring_seqno) ||
1788 atomic_read(&dev_priv->mm.wedged)) {
1789 i915_gem_retire_request(dev, request);
1791 list_del(&request->list);
1792 list_del(&request->client_list);
1793 kfree(request);
1794 } else
1795 break;
1798 if (unlikely (dev_priv->trace_irq_seqno &&
1799 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1800 i915_user_irq_put(dev);
1801 dev_priv->trace_irq_seqno = 0;
1805 void
1806 i915_gem_retire_work_handler(struct work_struct *work)
1808 drm_i915_private_t *dev_priv;
1809 struct drm_device *dev;
1811 dev_priv = container_of(work, drm_i915_private_t,
1812 mm.retire_work.work);
1813 dev = dev_priv->dev;
1815 mutex_lock(&dev->struct_mutex);
1816 i915_gem_retire_requests(dev);
1817 if (!dev_priv->mm.suspended &&
1818 !list_empty(&dev_priv->mm.request_list))
1819 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1820 mutex_unlock(&dev->struct_mutex);
1823 static int
1824 i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1826 drm_i915_private_t *dev_priv = dev->dev_private;
1827 u32 ier;
1828 int ret = 0;
1830 BUG_ON(seqno == 0);
1832 if (atomic_read(&dev_priv->mm.wedged))
1833 return -EIO;
1835 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1836 if (IS_IGDNG(dev))
1837 ier = I915_READ(DEIER) | I915_READ(GTIER);
1838 else
1839 ier = I915_READ(IER);
1840 if (!ier) {
1841 DRM_ERROR("something (likely vbetool) disabled "
1842 "interrupts, re-enabling\n");
1843 i915_driver_irq_preinstall(dev);
1844 i915_driver_irq_postinstall(dev);
1847 trace_i915_gem_request_wait_begin(dev, seqno);
1849 dev_priv->mm.waiting_gem_seqno = seqno;
1850 i915_user_irq_get(dev);
1851 if (interruptible)
1852 ret = wait_event_interruptible(dev_priv->irq_queue,
1853 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1854 atomic_read(&dev_priv->mm.wedged));
1855 else
1856 wait_event(dev_priv->irq_queue,
1857 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1858 atomic_read(&dev_priv->mm.wedged));
1860 i915_user_irq_put(dev);
1861 dev_priv->mm.waiting_gem_seqno = 0;
1863 trace_i915_gem_request_wait_end(dev, seqno);
1865 if (atomic_read(&dev_priv->mm.wedged))
1866 ret = -EIO;
1868 if (ret && ret != -ERESTARTSYS)
1869 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1870 __func__, ret, seqno, i915_get_gem_seqno(dev));
1872 /* Directly dispatch request retiring. While we have the work queue
1873 * to handle this, the waiter on a request often wants an associated
1874 * buffer to have made it to the inactive list, and we would need
1875 * a separate wait queue to handle that.
1877 if (ret == 0)
1878 i915_gem_retire_requests(dev);
1880 return ret;
1884 * Waits for a sequence number to be signaled, and cleans up the
1885 * request and object lists appropriately for that event.
1887 static int
1888 i915_wait_request(struct drm_device *dev, uint32_t seqno)
1890 return i915_do_wait_request(dev, seqno, 1);
1894 * Waits for the ring to finish up to the latest request. Usefull for waiting
1895 * for flip events, e.g for the overlay support. */
1896 int i915_lp_ring_sync(struct drm_device *dev)
1898 uint32_t seqno;
1899 int ret;
1901 seqno = i915_add_request(dev, NULL, 0);
1903 if (seqno == 0)
1904 return -ENOMEM;
1906 ret = i915_do_wait_request(dev, seqno, 0);
1907 BUG_ON(ret == -ERESTARTSYS);
1908 return ret;
1911 static void
1912 i915_gem_flush(struct drm_device *dev,
1913 uint32_t invalidate_domains,
1914 uint32_t flush_domains)
1916 drm_i915_private_t *dev_priv = dev->dev_private;
1917 uint32_t cmd;
1918 RING_LOCALS;
1920 #if WATCH_EXEC
1921 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1922 invalidate_domains, flush_domains);
1923 #endif
1924 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
1925 invalidate_domains, flush_domains);
1927 if (flush_domains & I915_GEM_DOMAIN_CPU)
1928 drm_agp_chipset_flush(dev);
1930 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
1932 * read/write caches:
1934 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1935 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1936 * also flushed at 2d versus 3d pipeline switches.
1938 * read-only caches:
1940 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1941 * MI_READ_FLUSH is set, and is always flushed on 965.
1943 * I915_GEM_DOMAIN_COMMAND may not exist?
1945 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1946 * invalidated when MI_EXE_FLUSH is set.
1948 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1949 * invalidated with every MI_FLUSH.
1951 * TLBs:
1953 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1954 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1955 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1956 * are flushed at any MI_FLUSH.
1959 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1960 if ((invalidate_domains|flush_domains) &
1961 I915_GEM_DOMAIN_RENDER)
1962 cmd &= ~MI_NO_WRITE_FLUSH;
1963 if (!IS_I965G(dev)) {
1965 * On the 965, the sampler cache always gets flushed
1966 * and this bit is reserved.
1968 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1969 cmd |= MI_READ_FLUSH;
1971 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1972 cmd |= MI_EXE_FLUSH;
1974 #if WATCH_EXEC
1975 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1976 #endif
1977 BEGIN_LP_RING(2);
1978 OUT_RING(cmd);
1979 OUT_RING(MI_NOOP);
1980 ADVANCE_LP_RING();
1985 * Ensures that all rendering to the object has completed and the object is
1986 * safe to unbind from the GTT or access from the CPU.
1988 static int
1989 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1991 struct drm_device *dev = obj->dev;
1992 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1993 int ret;
1995 /* This function only exists to support waiting for existing rendering,
1996 * not for emitting required flushes.
1998 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
2000 /* If there is rendering queued on the buffer being evicted, wait for
2001 * it.
2003 if (obj_priv->active) {
2004 #if WATCH_BUF
2005 DRM_INFO("%s: object %p wait for seqno %08x\n",
2006 __func__, obj, obj_priv->last_rendering_seqno);
2007 #endif
2008 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
2009 if (ret != 0)
2010 return ret;
2013 return 0;
2017 * Unbinds an object from the GTT aperture.
2020 i915_gem_object_unbind(struct drm_gem_object *obj)
2022 struct drm_device *dev = obj->dev;
2023 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2024 int ret = 0;
2026 #if WATCH_BUF
2027 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
2028 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
2029 #endif
2030 if (obj_priv->gtt_space == NULL)
2031 return 0;
2033 if (obj_priv->pin_count != 0) {
2034 DRM_ERROR("Attempting to unbind pinned buffer\n");
2035 return -EINVAL;
2038 /* blow away mappings if mapped through GTT */
2039 i915_gem_release_mmap(obj);
2041 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2042 i915_gem_clear_fence_reg(obj);
2044 /* Move the object to the CPU domain to ensure that
2045 * any possible CPU writes while it's not in the GTT
2046 * are flushed when we go to remap it. This will
2047 * also ensure that all pending GPU writes are finished
2048 * before we unbind.
2050 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2051 if (ret) {
2052 if (ret != -ERESTARTSYS)
2053 DRM_ERROR("set_domain failed: %d\n", ret);
2054 return ret;
2057 BUG_ON(obj_priv->active);
2059 if (obj_priv->agp_mem != NULL) {
2060 drm_unbind_agp(obj_priv->agp_mem);
2061 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2062 obj_priv->agp_mem = NULL;
2065 i915_gem_object_put_pages(obj);
2066 BUG_ON(obj_priv->pages_refcount);
2068 if (obj_priv->gtt_space) {
2069 atomic_dec(&dev->gtt_count);
2070 atomic_sub(obj->size, &dev->gtt_memory);
2072 drm_mm_put_block(obj_priv->gtt_space);
2073 obj_priv->gtt_space = NULL;
2076 /* Remove ourselves from the LRU list if present. */
2077 if (!list_empty(&obj_priv->list))
2078 list_del_init(&obj_priv->list);
2080 if (i915_gem_object_is_purgeable(obj_priv))
2081 i915_gem_object_truncate(obj);
2083 trace_i915_gem_object_unbind(obj);
2085 return 0;
2088 static struct drm_gem_object *
2089 i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2091 drm_i915_private_t *dev_priv = dev->dev_private;
2092 struct drm_i915_gem_object *obj_priv;
2093 struct drm_gem_object *best = NULL;
2094 struct drm_gem_object *first = NULL;
2096 /* Try to find the smallest clean object */
2097 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2098 struct drm_gem_object *obj = obj_priv->obj;
2099 if (obj->size >= min_size) {
2100 if ((!obj_priv->dirty ||
2101 i915_gem_object_is_purgeable(obj_priv)) &&
2102 (!best || obj->size < best->size)) {
2103 best = obj;
2104 if (best->size == min_size)
2105 return best;
2107 if (!first)
2108 first = obj;
2112 return best ? best : first;
2115 static int
2116 i915_gem_evict_everything(struct drm_device *dev)
2118 drm_i915_private_t *dev_priv = dev->dev_private;
2119 uint32_t seqno;
2120 int ret;
2121 bool lists_empty;
2123 spin_lock(&dev_priv->mm.active_list_lock);
2124 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2125 list_empty(&dev_priv->mm.flushing_list) &&
2126 list_empty(&dev_priv->mm.active_list));
2127 spin_unlock(&dev_priv->mm.active_list_lock);
2129 if (lists_empty)
2130 return -ENOSPC;
2132 /* Flush everything (on to the inactive lists) and evict */
2133 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2134 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2135 if (seqno == 0)
2136 return -ENOMEM;
2138 ret = i915_wait_request(dev, seqno);
2139 if (ret)
2140 return ret;
2142 ret = i915_gem_evict_from_inactive_list(dev);
2143 if (ret)
2144 return ret;
2146 spin_lock(&dev_priv->mm.active_list_lock);
2147 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2148 list_empty(&dev_priv->mm.flushing_list) &&
2149 list_empty(&dev_priv->mm.active_list));
2150 spin_unlock(&dev_priv->mm.active_list_lock);
2151 BUG_ON(!lists_empty);
2153 return 0;
2156 static int
2157 i915_gem_evict_something(struct drm_device *dev, int min_size)
2159 drm_i915_private_t *dev_priv = dev->dev_private;
2160 struct drm_gem_object *obj;
2161 int ret;
2163 for (;;) {
2164 i915_gem_retire_requests(dev);
2166 /* If there's an inactive buffer available now, grab it
2167 * and be done.
2169 obj = i915_gem_find_inactive_object(dev, min_size);
2170 if (obj) {
2171 struct drm_i915_gem_object *obj_priv;
2173 #if WATCH_LRU
2174 DRM_INFO("%s: evicting %p\n", __func__, obj);
2175 #endif
2176 obj_priv = obj->driver_private;
2177 BUG_ON(obj_priv->pin_count != 0);
2178 BUG_ON(obj_priv->active);
2180 /* Wait on the rendering and unbind the buffer. */
2181 return i915_gem_object_unbind(obj);
2184 /* If we didn't get anything, but the ring is still processing
2185 * things, wait for the next to finish and hopefully leave us
2186 * a buffer to evict.
2188 if (!list_empty(&dev_priv->mm.request_list)) {
2189 struct drm_i915_gem_request *request;
2191 request = list_first_entry(&dev_priv->mm.request_list,
2192 struct drm_i915_gem_request,
2193 list);
2195 ret = i915_wait_request(dev, request->seqno);
2196 if (ret)
2197 return ret;
2199 continue;
2202 /* If we didn't have anything on the request list but there
2203 * are buffers awaiting a flush, emit one and try again.
2204 * When we wait on it, those buffers waiting for that flush
2205 * will get moved to inactive.
2207 if (!list_empty(&dev_priv->mm.flushing_list)) {
2208 struct drm_i915_gem_object *obj_priv;
2210 /* Find an object that we can immediately reuse */
2211 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
2212 obj = obj_priv->obj;
2213 if (obj->size >= min_size)
2214 break;
2216 obj = NULL;
2219 if (obj != NULL) {
2220 uint32_t seqno;
2222 i915_gem_flush(dev,
2223 obj->write_domain,
2224 obj->write_domain);
2225 seqno = i915_add_request(dev, NULL, obj->write_domain);
2226 if (seqno == 0)
2227 return -ENOMEM;
2229 ret = i915_wait_request(dev, seqno);
2230 if (ret)
2231 return ret;
2233 continue;
2237 /* If we didn't do any of the above, there's no single buffer
2238 * large enough to swap out for the new one, so just evict
2239 * everything and start again. (This should be rare.)
2241 if (!list_empty (&dev_priv->mm.inactive_list))
2242 return i915_gem_evict_from_inactive_list(dev);
2243 else
2244 return i915_gem_evict_everything(dev);
2249 i915_gem_object_get_pages(struct drm_gem_object *obj)
2251 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2252 int page_count, i;
2253 struct address_space *mapping;
2254 struct inode *inode;
2255 struct page *page;
2256 int ret;
2258 if (obj_priv->pages_refcount++ != 0)
2259 return 0;
2261 /* Get the list of pages out of our struct file. They'll be pinned
2262 * at this point until we release them.
2264 page_count = obj->size / PAGE_SIZE;
2265 BUG_ON(obj_priv->pages != NULL);
2266 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2267 if (obj_priv->pages == NULL) {
2268 obj_priv->pages_refcount--;
2269 return -ENOMEM;
2272 inode = obj->filp->f_path.dentry->d_inode;
2273 mapping = inode->i_mapping;
2274 for (i = 0; i < page_count; i++) {
2275 page = read_mapping_page(mapping, i, NULL);
2276 if (IS_ERR(page)) {
2277 ret = PTR_ERR(page);
2278 i915_gem_object_put_pages(obj);
2279 return ret;
2281 obj_priv->pages[i] = page;
2284 if (obj_priv->tiling_mode != I915_TILING_NONE)
2285 i915_gem_object_do_bit_17_swizzle(obj);
2287 return 0;
2290 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2292 struct drm_gem_object *obj = reg->obj;
2293 struct drm_device *dev = obj->dev;
2294 drm_i915_private_t *dev_priv = dev->dev_private;
2295 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2296 int regnum = obj_priv->fence_reg;
2297 uint64_t val;
2299 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2300 0xfffff000) << 32;
2301 val |= obj_priv->gtt_offset & 0xfffff000;
2302 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2303 if (obj_priv->tiling_mode == I915_TILING_Y)
2304 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2305 val |= I965_FENCE_REG_VALID;
2307 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2310 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2312 struct drm_gem_object *obj = reg->obj;
2313 struct drm_device *dev = obj->dev;
2314 drm_i915_private_t *dev_priv = dev->dev_private;
2315 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2316 int regnum = obj_priv->fence_reg;
2317 int tile_width;
2318 uint32_t fence_reg, val;
2319 uint32_t pitch_val;
2321 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2322 (obj_priv->gtt_offset & (obj->size - 1))) {
2323 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2324 __func__, obj_priv->gtt_offset, obj->size);
2325 return;
2328 if (obj_priv->tiling_mode == I915_TILING_Y &&
2329 HAS_128_BYTE_Y_TILING(dev))
2330 tile_width = 128;
2331 else
2332 tile_width = 512;
2334 /* Note: pitch better be a power of two tile widths */
2335 pitch_val = obj_priv->stride / tile_width;
2336 pitch_val = ffs(pitch_val) - 1;
2338 val = obj_priv->gtt_offset;
2339 if (obj_priv->tiling_mode == I915_TILING_Y)
2340 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2341 val |= I915_FENCE_SIZE_BITS(obj->size);
2342 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2343 val |= I830_FENCE_REG_VALID;
2345 if (regnum < 8)
2346 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2347 else
2348 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2349 I915_WRITE(fence_reg, val);
2352 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2354 struct drm_gem_object *obj = reg->obj;
2355 struct drm_device *dev = obj->dev;
2356 drm_i915_private_t *dev_priv = dev->dev_private;
2357 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2358 int regnum = obj_priv->fence_reg;
2359 uint32_t val;
2360 uint32_t pitch_val;
2361 uint32_t fence_size_bits;
2363 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2364 (obj_priv->gtt_offset & (obj->size - 1))) {
2365 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2366 __func__, obj_priv->gtt_offset);
2367 return;
2370 pitch_val = obj_priv->stride / 128;
2371 pitch_val = ffs(pitch_val) - 1;
2372 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2374 val = obj_priv->gtt_offset;
2375 if (obj_priv->tiling_mode == I915_TILING_Y)
2376 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2377 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2378 WARN_ON(fence_size_bits & ~0x00000f00);
2379 val |= fence_size_bits;
2380 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2381 val |= I830_FENCE_REG_VALID;
2383 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2387 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2388 * @obj: object to map through a fence reg
2390 * When mapping objects through the GTT, userspace wants to be able to write
2391 * to them without having to worry about swizzling if the object is tiled.
2393 * This function walks the fence regs looking for a free one for @obj,
2394 * stealing one if it can't find any.
2396 * It then sets up the reg based on the object's properties: address, pitch
2397 * and tiling format.
2400 i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2402 struct drm_device *dev = obj->dev;
2403 struct drm_i915_private *dev_priv = dev->dev_private;
2404 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2405 struct drm_i915_fence_reg *reg = NULL;
2406 struct drm_i915_gem_object *old_obj_priv = NULL;
2407 int i, ret, avail;
2409 /* Just update our place in the LRU if our fence is getting used. */
2410 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2411 list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2412 return 0;
2415 switch (obj_priv->tiling_mode) {
2416 case I915_TILING_NONE:
2417 WARN(1, "allocating a fence for non-tiled object?\n");
2418 break;
2419 case I915_TILING_X:
2420 if (!obj_priv->stride)
2421 return -EINVAL;
2422 WARN((obj_priv->stride & (512 - 1)),
2423 "object 0x%08x is X tiled but has non-512B pitch\n",
2424 obj_priv->gtt_offset);
2425 break;
2426 case I915_TILING_Y:
2427 if (!obj_priv->stride)
2428 return -EINVAL;
2429 WARN((obj_priv->stride & (128 - 1)),
2430 "object 0x%08x is Y tiled but has non-128B pitch\n",
2431 obj_priv->gtt_offset);
2432 break;
2435 /* First try to find a free reg */
2436 avail = 0;
2437 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2438 reg = &dev_priv->fence_regs[i];
2439 if (!reg->obj)
2440 break;
2442 old_obj_priv = reg->obj->driver_private;
2443 if (!old_obj_priv->pin_count)
2444 avail++;
2447 /* None available, try to steal one or wait for a user to finish */
2448 if (i == dev_priv->num_fence_regs) {
2449 struct drm_gem_object *old_obj = NULL;
2451 if (avail == 0)
2452 return -ENOSPC;
2454 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2455 fence_list) {
2456 old_obj = old_obj_priv->obj;
2458 if (old_obj_priv->pin_count)
2459 continue;
2461 /* Take a reference, as otherwise the wait_rendering
2462 * below may cause the object to get freed out from
2463 * under us.
2465 drm_gem_object_reference(old_obj);
2467 /* i915 uses fences for GPU access to tiled buffers */
2468 if (IS_I965G(dev) || !old_obj_priv->active)
2469 break;
2471 /* This brings the object to the head of the LRU if it
2472 * had been written to. The only way this should
2473 * result in us waiting longer than the expected
2474 * optimal amount of time is if there was a
2475 * fence-using buffer later that was read-only.
2477 i915_gem_object_flush_gpu_write_domain(old_obj);
2478 ret = i915_gem_object_wait_rendering(old_obj);
2479 if (ret != 0) {
2480 drm_gem_object_unreference(old_obj);
2481 return ret;
2484 break;
2488 * Zap this virtual mapping so we can set up a fence again
2489 * for this object next time we need it.
2491 i915_gem_release_mmap(old_obj);
2493 i = old_obj_priv->fence_reg;
2494 reg = &dev_priv->fence_regs[i];
2496 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2497 list_del_init(&old_obj_priv->fence_list);
2499 drm_gem_object_unreference(old_obj);
2502 obj_priv->fence_reg = i;
2503 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2505 reg->obj = obj;
2507 if (IS_I965G(dev))
2508 i965_write_fence_reg(reg);
2509 else if (IS_I9XX(dev))
2510 i915_write_fence_reg(reg);
2511 else
2512 i830_write_fence_reg(reg);
2514 trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
2516 return 0;
2520 * i915_gem_clear_fence_reg - clear out fence register info
2521 * @obj: object to clear
2523 * Zeroes out the fence register itself and clears out the associated
2524 * data structures in dev_priv and obj_priv.
2526 static void
2527 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2529 struct drm_device *dev = obj->dev;
2530 drm_i915_private_t *dev_priv = dev->dev_private;
2531 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2533 if (IS_I965G(dev))
2534 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2535 else {
2536 uint32_t fence_reg;
2538 if (obj_priv->fence_reg < 8)
2539 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2540 else
2541 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2542 8) * 4;
2544 I915_WRITE(fence_reg, 0);
2547 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2548 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2549 list_del_init(&obj_priv->fence_list);
2553 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2554 * to the buffer to finish, and then resets the fence register.
2555 * @obj: tiled object holding a fence register.
2557 * Zeroes out the fence register itself and clears out the associated
2558 * data structures in dev_priv and obj_priv.
2561 i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2563 struct drm_device *dev = obj->dev;
2564 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2566 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2567 return 0;
2569 /* On the i915, GPU access to tiled buffers is via a fence,
2570 * therefore we must wait for any outstanding access to complete
2571 * before clearing the fence.
2573 if (!IS_I965G(dev)) {
2574 int ret;
2576 i915_gem_object_flush_gpu_write_domain(obj);
2577 i915_gem_object_flush_gtt_write_domain(obj);
2578 ret = i915_gem_object_wait_rendering(obj);
2579 if (ret != 0)
2580 return ret;
2583 i915_gem_clear_fence_reg (obj);
2585 return 0;
2589 * Finds free space in the GTT aperture and binds the object there.
2591 static int
2592 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2594 struct drm_device *dev = obj->dev;
2595 drm_i915_private_t *dev_priv = dev->dev_private;
2596 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2597 struct drm_mm_node *free_space;
2598 bool retry_alloc = false;
2599 int ret;
2601 if (dev_priv->mm.suspended)
2602 return -EBUSY;
2604 if (obj_priv->madv != I915_MADV_WILLNEED) {
2605 DRM_ERROR("Attempting to bind a purgeable object\n");
2606 return -EINVAL;
2609 if (alignment == 0)
2610 alignment = i915_gem_get_gtt_alignment(obj);
2611 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
2612 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2613 return -EINVAL;
2616 search_free:
2617 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2618 obj->size, alignment, 0);
2619 if (free_space != NULL) {
2620 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2621 alignment);
2622 if (obj_priv->gtt_space != NULL) {
2623 obj_priv->gtt_space->private = obj;
2624 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2627 if (obj_priv->gtt_space == NULL) {
2628 /* If the gtt is empty and we're still having trouble
2629 * fitting our object in, we're out of memory.
2631 #if WATCH_LRU
2632 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2633 #endif
2634 ret = i915_gem_evict_something(dev, obj->size);
2635 if (ret)
2636 return ret;
2638 goto search_free;
2641 #if WATCH_BUF
2642 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2643 obj->size, obj_priv->gtt_offset);
2644 #endif
2645 if (retry_alloc) {
2646 i915_gem_object_set_page_gfp_mask (obj,
2647 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2649 ret = i915_gem_object_get_pages(obj);
2650 if (retry_alloc) {
2651 i915_gem_object_set_page_gfp_mask (obj,
2652 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2654 if (ret) {
2655 drm_mm_put_block(obj_priv->gtt_space);
2656 obj_priv->gtt_space = NULL;
2658 if (ret == -ENOMEM) {
2659 /* first try to clear up some space from the GTT */
2660 ret = i915_gem_evict_something(dev, obj->size);
2661 if (ret) {
2662 /* now try to shrink everyone else */
2663 if (! retry_alloc) {
2664 retry_alloc = true;
2665 goto search_free;
2668 return ret;
2671 goto search_free;
2674 return ret;
2677 /* Create an AGP memory structure pointing at our pages, and bind it
2678 * into the GTT.
2680 obj_priv->agp_mem = drm_agp_bind_pages(dev,
2681 obj_priv->pages,
2682 obj->size >> PAGE_SHIFT,
2683 obj_priv->gtt_offset,
2684 obj_priv->agp_type);
2685 if (obj_priv->agp_mem == NULL) {
2686 i915_gem_object_put_pages(obj);
2687 drm_mm_put_block(obj_priv->gtt_space);
2688 obj_priv->gtt_space = NULL;
2690 ret = i915_gem_evict_something(dev, obj->size);
2691 if (ret)
2692 return ret;
2694 goto search_free;
2696 atomic_inc(&dev->gtt_count);
2697 atomic_add(obj->size, &dev->gtt_memory);
2699 /* Assert that the object is not currently in any GPU domain. As it
2700 * wasn't in the GTT, there shouldn't be any way it could have been in
2701 * a GPU cache
2703 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2704 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2706 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2708 return 0;
2711 void
2712 i915_gem_clflush_object(struct drm_gem_object *obj)
2714 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2716 /* If we don't have a page list set up, then we're not pinned
2717 * to GPU, and we can ignore the cache flush because it'll happen
2718 * again at bind time.
2720 if (obj_priv->pages == NULL)
2721 return;
2723 trace_i915_gem_object_clflush(obj);
2725 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2728 /** Flushes any GPU write domain for the object if it's dirty. */
2729 static void
2730 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2732 struct drm_device *dev = obj->dev;
2733 uint32_t seqno;
2734 uint32_t old_write_domain;
2736 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2737 return;
2739 /* Queue the GPU write cache flushing we need. */
2740 old_write_domain = obj->write_domain;
2741 i915_gem_flush(dev, 0, obj->write_domain);
2742 seqno = i915_add_request(dev, NULL, obj->write_domain);
2743 obj->write_domain = 0;
2744 i915_gem_object_move_to_active(obj, seqno);
2746 trace_i915_gem_object_change_domain(obj,
2747 obj->read_domains,
2748 old_write_domain);
2751 /** Flushes the GTT write domain for the object if it's dirty. */
2752 static void
2753 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2755 uint32_t old_write_domain;
2757 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2758 return;
2760 /* No actual flushing is required for the GTT write domain. Writes
2761 * to it immediately go to main memory as far as we know, so there's
2762 * no chipset flush. It also doesn't land in render cache.
2764 old_write_domain = obj->write_domain;
2765 obj->write_domain = 0;
2767 trace_i915_gem_object_change_domain(obj,
2768 obj->read_domains,
2769 old_write_domain);
2772 /** Flushes the CPU write domain for the object if it's dirty. */
2773 static void
2774 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2776 struct drm_device *dev = obj->dev;
2777 uint32_t old_write_domain;
2779 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2780 return;
2782 i915_gem_clflush_object(obj);
2783 drm_agp_chipset_flush(dev);
2784 old_write_domain = obj->write_domain;
2785 obj->write_domain = 0;
2787 trace_i915_gem_object_change_domain(obj,
2788 obj->read_domains,
2789 old_write_domain);
2793 * Moves a single object to the GTT read, and possibly write domain.
2795 * This function returns when the move is complete, including waiting on
2796 * flushes to occur.
2799 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2801 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2802 uint32_t old_write_domain, old_read_domains;
2803 int ret;
2805 /* Not valid to be called on unbound objects. */
2806 if (obj_priv->gtt_space == NULL)
2807 return -EINVAL;
2809 i915_gem_object_flush_gpu_write_domain(obj);
2810 /* Wait on any GPU rendering and flushing to occur. */
2811 ret = i915_gem_object_wait_rendering(obj);
2812 if (ret != 0)
2813 return ret;
2815 old_write_domain = obj->write_domain;
2816 old_read_domains = obj->read_domains;
2818 /* If we're writing through the GTT domain, then CPU and GPU caches
2819 * will need to be invalidated at next use.
2821 if (write)
2822 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2824 i915_gem_object_flush_cpu_write_domain(obj);
2826 /* It should now be out of any other write domains, and we can update
2827 * the domain values for our changes.
2829 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2830 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2831 if (write) {
2832 obj->write_domain = I915_GEM_DOMAIN_GTT;
2833 obj_priv->dirty = 1;
2836 trace_i915_gem_object_change_domain(obj,
2837 old_read_domains,
2838 old_write_domain);
2840 return 0;
2844 * Moves a single object to the CPU read, and possibly write domain.
2846 * This function returns when the move is complete, including waiting on
2847 * flushes to occur.
2849 static int
2850 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2852 uint32_t old_write_domain, old_read_domains;
2853 int ret;
2855 i915_gem_object_flush_gpu_write_domain(obj);
2856 /* Wait on any GPU rendering and flushing to occur. */
2857 ret = i915_gem_object_wait_rendering(obj);
2858 if (ret != 0)
2859 return ret;
2861 i915_gem_object_flush_gtt_write_domain(obj);
2863 /* If we have a partially-valid cache of the object in the CPU,
2864 * finish invalidating it and free the per-page flags.
2866 i915_gem_object_set_to_full_cpu_read_domain(obj);
2868 old_write_domain = obj->write_domain;
2869 old_read_domains = obj->read_domains;
2871 /* Flush the CPU cache if it's still invalid. */
2872 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2873 i915_gem_clflush_object(obj);
2875 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2878 /* It should now be out of any other write domains, and we can update
2879 * the domain values for our changes.
2881 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2883 /* If we're writing through the CPU, then the GPU read domains will
2884 * need to be invalidated at next use.
2886 if (write) {
2887 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2888 obj->write_domain = I915_GEM_DOMAIN_CPU;
2891 trace_i915_gem_object_change_domain(obj,
2892 old_read_domains,
2893 old_write_domain);
2895 return 0;
2899 * Set the next domain for the specified object. This
2900 * may not actually perform the necessary flushing/invaliding though,
2901 * as that may want to be batched with other set_domain operations
2903 * This is (we hope) the only really tricky part of gem. The goal
2904 * is fairly simple -- track which caches hold bits of the object
2905 * and make sure they remain coherent. A few concrete examples may
2906 * help to explain how it works. For shorthand, we use the notation
2907 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2908 * a pair of read and write domain masks.
2910 * Case 1: the batch buffer
2912 * 1. Allocated
2913 * 2. Written by CPU
2914 * 3. Mapped to GTT
2915 * 4. Read by GPU
2916 * 5. Unmapped from GTT
2917 * 6. Freed
2919 * Let's take these a step at a time
2921 * 1. Allocated
2922 * Pages allocated from the kernel may still have
2923 * cache contents, so we set them to (CPU, CPU) always.
2924 * 2. Written by CPU (using pwrite)
2925 * The pwrite function calls set_domain (CPU, CPU) and
2926 * this function does nothing (as nothing changes)
2927 * 3. Mapped by GTT
2928 * This function asserts that the object is not
2929 * currently in any GPU-based read or write domains
2930 * 4. Read by GPU
2931 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2932 * As write_domain is zero, this function adds in the
2933 * current read domains (CPU+COMMAND, 0).
2934 * flush_domains is set to CPU.
2935 * invalidate_domains is set to COMMAND
2936 * clflush is run to get data out of the CPU caches
2937 * then i915_dev_set_domain calls i915_gem_flush to
2938 * emit an MI_FLUSH and drm_agp_chipset_flush
2939 * 5. Unmapped from GTT
2940 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2941 * flush_domains and invalidate_domains end up both zero
2942 * so no flushing/invalidating happens
2943 * 6. Freed
2944 * yay, done
2946 * Case 2: The shared render buffer
2948 * 1. Allocated
2949 * 2. Mapped to GTT
2950 * 3. Read/written by GPU
2951 * 4. set_domain to (CPU,CPU)
2952 * 5. Read/written by CPU
2953 * 6. Read/written by GPU
2955 * 1. Allocated
2956 * Same as last example, (CPU, CPU)
2957 * 2. Mapped to GTT
2958 * Nothing changes (assertions find that it is not in the GPU)
2959 * 3. Read/written by GPU
2960 * execbuffer calls set_domain (RENDER, RENDER)
2961 * flush_domains gets CPU
2962 * invalidate_domains gets GPU
2963 * clflush (obj)
2964 * MI_FLUSH and drm_agp_chipset_flush
2965 * 4. set_domain (CPU, CPU)
2966 * flush_domains gets GPU
2967 * invalidate_domains gets CPU
2968 * wait_rendering (obj) to make sure all drawing is complete.
2969 * This will include an MI_FLUSH to get the data from GPU
2970 * to memory
2971 * clflush (obj) to invalidate the CPU cache
2972 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2973 * 5. Read/written by CPU
2974 * cache lines are loaded and dirtied
2975 * 6. Read written by GPU
2976 * Same as last GPU access
2978 * Case 3: The constant buffer
2980 * 1. Allocated
2981 * 2. Written by CPU
2982 * 3. Read by GPU
2983 * 4. Updated (written) by CPU again
2984 * 5. Read by GPU
2986 * 1. Allocated
2987 * (CPU, CPU)
2988 * 2. Written by CPU
2989 * (CPU, CPU)
2990 * 3. Read by GPU
2991 * (CPU+RENDER, 0)
2992 * flush_domains = CPU
2993 * invalidate_domains = RENDER
2994 * clflush (obj)
2995 * MI_FLUSH
2996 * drm_agp_chipset_flush
2997 * 4. Updated (written) by CPU again
2998 * (CPU, CPU)
2999 * flush_domains = 0 (no previous write domain)
3000 * invalidate_domains = 0 (no new read domains)
3001 * 5. Read by GPU
3002 * (CPU+RENDER, 0)
3003 * flush_domains = CPU
3004 * invalidate_domains = RENDER
3005 * clflush (obj)
3006 * MI_FLUSH
3007 * drm_agp_chipset_flush
3009 static void
3010 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3012 struct drm_device *dev = obj->dev;
3013 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3014 uint32_t invalidate_domains = 0;
3015 uint32_t flush_domains = 0;
3016 uint32_t old_read_domains;
3018 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
3019 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
3021 intel_mark_busy(dev, obj);
3023 #if WATCH_BUF
3024 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
3025 __func__, obj,
3026 obj->read_domains, obj->pending_read_domains,
3027 obj->write_domain, obj->pending_write_domain);
3028 #endif
3030 * If the object isn't moving to a new write domain,
3031 * let the object stay in multiple read domains
3033 if (obj->pending_write_domain == 0)
3034 obj->pending_read_domains |= obj->read_domains;
3035 else
3036 obj_priv->dirty = 1;
3039 * Flush the current write domain if
3040 * the new read domains don't match. Invalidate
3041 * any read domains which differ from the old
3042 * write domain
3044 if (obj->write_domain &&
3045 obj->write_domain != obj->pending_read_domains) {
3046 flush_domains |= obj->write_domain;
3047 invalidate_domains |=
3048 obj->pending_read_domains & ~obj->write_domain;
3051 * Invalidate any read caches which may have
3052 * stale data. That is, any new read domains.
3054 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3055 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3056 #if WATCH_BUF
3057 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3058 __func__, flush_domains, invalidate_domains);
3059 #endif
3060 i915_gem_clflush_object(obj);
3063 old_read_domains = obj->read_domains;
3065 /* The actual obj->write_domain will be updated with
3066 * pending_write_domain after we emit the accumulated flush for all
3067 * of our domain changes in execbuffers (which clears objects'
3068 * write_domains). So if we have a current write domain that we
3069 * aren't changing, set pending_write_domain to that.
3071 if (flush_domains == 0 && obj->pending_write_domain == 0)
3072 obj->pending_write_domain = obj->write_domain;
3073 obj->read_domains = obj->pending_read_domains;
3075 dev->invalidate_domains |= invalidate_domains;
3076 dev->flush_domains |= flush_domains;
3077 #if WATCH_BUF
3078 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3079 __func__,
3080 obj->read_domains, obj->write_domain,
3081 dev->invalidate_domains, dev->flush_domains);
3082 #endif
3084 trace_i915_gem_object_change_domain(obj,
3085 old_read_domains,
3086 obj->write_domain);
3090 * Moves the object from a partially CPU read to a full one.
3092 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3093 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3095 static void
3096 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3098 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3100 if (!obj_priv->page_cpu_valid)
3101 return;
3103 /* If we're partially in the CPU read domain, finish moving it in.
3105 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3106 int i;
3108 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3109 if (obj_priv->page_cpu_valid[i])
3110 continue;
3111 drm_clflush_pages(obj_priv->pages + i, 1);
3115 /* Free the page_cpu_valid mappings which are now stale, whether
3116 * or not we've got I915_GEM_DOMAIN_CPU.
3118 kfree(obj_priv->page_cpu_valid);
3119 obj_priv->page_cpu_valid = NULL;
3123 * Set the CPU read domain on a range of the object.
3125 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3126 * not entirely valid. The page_cpu_valid member of the object flags which
3127 * pages have been flushed, and will be respected by
3128 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3129 * of the whole object.
3131 * This function returns when the move is complete, including waiting on
3132 * flushes to occur.
3134 static int
3135 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3136 uint64_t offset, uint64_t size)
3138 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3139 uint32_t old_read_domains;
3140 int i, ret;
3142 if (offset == 0 && size == obj->size)
3143 return i915_gem_object_set_to_cpu_domain(obj, 0);
3145 i915_gem_object_flush_gpu_write_domain(obj);
3146 /* Wait on any GPU rendering and flushing to occur. */
3147 ret = i915_gem_object_wait_rendering(obj);
3148 if (ret != 0)
3149 return ret;
3150 i915_gem_object_flush_gtt_write_domain(obj);
3152 /* If we're already fully in the CPU read domain, we're done. */
3153 if (obj_priv->page_cpu_valid == NULL &&
3154 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
3155 return 0;
3157 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3158 * newly adding I915_GEM_DOMAIN_CPU
3160 if (obj_priv->page_cpu_valid == NULL) {
3161 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3162 GFP_KERNEL);
3163 if (obj_priv->page_cpu_valid == NULL)
3164 return -ENOMEM;
3165 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3166 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
3168 /* Flush the cache on any pages that are still invalid from the CPU's
3169 * perspective.
3171 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3172 i++) {
3173 if (obj_priv->page_cpu_valid[i])
3174 continue;
3176 drm_clflush_pages(obj_priv->pages + i, 1);
3178 obj_priv->page_cpu_valid[i] = 1;
3181 /* It should now be out of any other write domains, and we can update
3182 * the domain values for our changes.
3184 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3186 old_read_domains = obj->read_domains;
3187 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3189 trace_i915_gem_object_change_domain(obj,
3190 old_read_domains,
3191 obj->write_domain);
3193 return 0;
3197 * Pin an object to the GTT and evaluate the relocations landing in it.
3199 static int
3200 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3201 struct drm_file *file_priv,
3202 struct drm_i915_gem_exec_object *entry,
3203 struct drm_i915_gem_relocation_entry *relocs)
3205 struct drm_device *dev = obj->dev;
3206 drm_i915_private_t *dev_priv = dev->dev_private;
3207 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3208 int i, ret;
3209 void __iomem *reloc_page;
3211 /* Choose the GTT offset for our buffer and put it there. */
3212 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3213 if (ret)
3214 return ret;
3216 entry->offset = obj_priv->gtt_offset;
3218 /* Apply the relocations, using the GTT aperture to avoid cache
3219 * flushing requirements.
3221 for (i = 0; i < entry->relocation_count; i++) {
3222 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
3223 struct drm_gem_object *target_obj;
3224 struct drm_i915_gem_object *target_obj_priv;
3225 uint32_t reloc_val, reloc_offset;
3226 uint32_t __iomem *reloc_entry;
3228 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
3229 reloc->target_handle);
3230 if (target_obj == NULL) {
3231 i915_gem_object_unpin(obj);
3232 return -EBADF;
3234 target_obj_priv = target_obj->driver_private;
3236 #if WATCH_RELOC
3237 DRM_INFO("%s: obj %p offset %08x target %d "
3238 "read %08x write %08x gtt %08x "
3239 "presumed %08x delta %08x\n",
3240 __func__,
3241 obj,
3242 (int) reloc->offset,
3243 (int) reloc->target_handle,
3244 (int) reloc->read_domains,
3245 (int) reloc->write_domain,
3246 (int) target_obj_priv->gtt_offset,
3247 (int) reloc->presumed_offset,
3248 reloc->delta);
3249 #endif
3251 /* The target buffer should have appeared before us in the
3252 * exec_object list, so it should have a GTT space bound by now.
3254 if (target_obj_priv->gtt_space == NULL) {
3255 DRM_ERROR("No GTT space found for object %d\n",
3256 reloc->target_handle);
3257 drm_gem_object_unreference(target_obj);
3258 i915_gem_object_unpin(obj);
3259 return -EINVAL;
3262 /* Validate that the target is in a valid r/w GPU domain */
3263 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3264 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3265 DRM_ERROR("reloc with read/write CPU domains: "
3266 "obj %p target %d offset %d "
3267 "read %08x write %08x",
3268 obj, reloc->target_handle,
3269 (int) reloc->offset,
3270 reloc->read_domains,
3271 reloc->write_domain);
3272 drm_gem_object_unreference(target_obj);
3273 i915_gem_object_unpin(obj);
3274 return -EINVAL;
3276 if (reloc->write_domain && target_obj->pending_write_domain &&
3277 reloc->write_domain != target_obj->pending_write_domain) {
3278 DRM_ERROR("Write domain conflict: "
3279 "obj %p target %d offset %d "
3280 "new %08x old %08x\n",
3281 obj, reloc->target_handle,
3282 (int) reloc->offset,
3283 reloc->write_domain,
3284 target_obj->pending_write_domain);
3285 drm_gem_object_unreference(target_obj);
3286 i915_gem_object_unpin(obj);
3287 return -EINVAL;
3290 target_obj->pending_read_domains |= reloc->read_domains;
3291 target_obj->pending_write_domain |= reloc->write_domain;
3293 /* If the relocation already has the right value in it, no
3294 * more work needs to be done.
3296 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3297 drm_gem_object_unreference(target_obj);
3298 continue;
3301 /* Check that the relocation address is valid... */
3302 if (reloc->offset > obj->size - 4) {
3303 DRM_ERROR("Relocation beyond object bounds: "
3304 "obj %p target %d offset %d size %d.\n",
3305 obj, reloc->target_handle,
3306 (int) reloc->offset, (int) obj->size);
3307 drm_gem_object_unreference(target_obj);
3308 i915_gem_object_unpin(obj);
3309 return -EINVAL;
3311 if (reloc->offset & 3) {
3312 DRM_ERROR("Relocation not 4-byte aligned: "
3313 "obj %p target %d offset %d.\n",
3314 obj, reloc->target_handle,
3315 (int) reloc->offset);
3316 drm_gem_object_unreference(target_obj);
3317 i915_gem_object_unpin(obj);
3318 return -EINVAL;
3321 /* and points to somewhere within the target object. */
3322 if (reloc->delta >= target_obj->size) {
3323 DRM_ERROR("Relocation beyond target object bounds: "
3324 "obj %p target %d delta %d size %d.\n",
3325 obj, reloc->target_handle,
3326 (int) reloc->delta, (int) target_obj->size);
3327 drm_gem_object_unreference(target_obj);
3328 i915_gem_object_unpin(obj);
3329 return -EINVAL;
3332 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3333 if (ret != 0) {
3334 drm_gem_object_unreference(target_obj);
3335 i915_gem_object_unpin(obj);
3336 return -EINVAL;
3339 /* Map the page containing the relocation we're going to
3340 * perform.
3342 reloc_offset = obj_priv->gtt_offset + reloc->offset;
3343 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3344 (reloc_offset &
3345 ~(PAGE_SIZE - 1)));
3346 reloc_entry = (uint32_t __iomem *)(reloc_page +
3347 (reloc_offset & (PAGE_SIZE - 1)));
3348 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
3350 #if WATCH_BUF
3351 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3352 obj, (unsigned int) reloc->offset,
3353 readl(reloc_entry), reloc_val);
3354 #endif
3355 writel(reloc_val, reloc_entry);
3356 io_mapping_unmap_atomic(reloc_page);
3358 /* The updated presumed offset for this entry will be
3359 * copied back out to the user.
3361 reloc->presumed_offset = target_obj_priv->gtt_offset;
3363 drm_gem_object_unreference(target_obj);
3366 #if WATCH_BUF
3367 if (0)
3368 i915_gem_dump_object(obj, 128, __func__, ~0);
3369 #endif
3370 return 0;
3373 /** Dispatch a batchbuffer to the ring
3375 static int
3376 i915_dispatch_gem_execbuffer(struct drm_device *dev,
3377 struct drm_i915_gem_execbuffer *exec,
3378 struct drm_clip_rect *cliprects,
3379 uint64_t exec_offset)
3381 drm_i915_private_t *dev_priv = dev->dev_private;
3382 int nbox = exec->num_cliprects;
3383 int i = 0, count;
3384 uint32_t exec_start, exec_len;
3385 RING_LOCALS;
3387 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3388 exec_len = (uint32_t) exec->batch_len;
3390 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
3392 count = nbox ? nbox : 1;
3394 for (i = 0; i < count; i++) {
3395 if (i < nbox) {
3396 int ret = i915_emit_box(dev, cliprects, i,
3397 exec->DR1, exec->DR4);
3398 if (ret)
3399 return ret;
3402 if (IS_I830(dev) || IS_845G(dev)) {
3403 BEGIN_LP_RING(4);
3404 OUT_RING(MI_BATCH_BUFFER);
3405 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3406 OUT_RING(exec_start + exec_len - 4);
3407 OUT_RING(0);
3408 ADVANCE_LP_RING();
3409 } else {
3410 BEGIN_LP_RING(2);
3411 if (IS_I965G(dev)) {
3412 OUT_RING(MI_BATCH_BUFFER_START |
3413 (2 << 6) |
3414 MI_BATCH_NON_SECURE_I965);
3415 OUT_RING(exec_start);
3416 } else {
3417 OUT_RING(MI_BATCH_BUFFER_START |
3418 (2 << 6));
3419 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3421 ADVANCE_LP_RING();
3425 /* XXX breadcrumb */
3426 return 0;
3429 /* Throttle our rendering by waiting until the ring has completed our requests
3430 * emitted over 20 msec ago.
3432 * Note that if we were to use the current jiffies each time around the loop,
3433 * we wouldn't escape the function with any frames outstanding if the time to
3434 * render a frame was over 20ms.
3436 * This should get us reasonable parallelism between CPU and GPU but also
3437 * relatively low latency when blocking on a particular request to finish.
3439 static int
3440 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3442 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3443 int ret = 0;
3444 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3446 mutex_lock(&dev->struct_mutex);
3447 while (!list_empty(&i915_file_priv->mm.request_list)) {
3448 struct drm_i915_gem_request *request;
3450 request = list_first_entry(&i915_file_priv->mm.request_list,
3451 struct drm_i915_gem_request,
3452 client_list);
3454 if (time_after_eq(request->emitted_jiffies, recent_enough))
3455 break;
3457 ret = i915_wait_request(dev, request->seqno);
3458 if (ret != 0)
3459 break;
3461 mutex_unlock(&dev->struct_mutex);
3463 return ret;
3466 static int
3467 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3468 uint32_t buffer_count,
3469 struct drm_i915_gem_relocation_entry **relocs)
3471 uint32_t reloc_count = 0, reloc_index = 0, i;
3472 int ret;
3474 *relocs = NULL;
3475 for (i = 0; i < buffer_count; i++) {
3476 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3477 return -EINVAL;
3478 reloc_count += exec_list[i].relocation_count;
3481 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3482 if (*relocs == NULL)
3483 return -ENOMEM;
3485 for (i = 0; i < buffer_count; i++) {
3486 struct drm_i915_gem_relocation_entry __user *user_relocs;
3488 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3490 ret = copy_from_user(&(*relocs)[reloc_index],
3491 user_relocs,
3492 exec_list[i].relocation_count *
3493 sizeof(**relocs));
3494 if (ret != 0) {
3495 drm_free_large(*relocs);
3496 *relocs = NULL;
3497 return -EFAULT;
3500 reloc_index += exec_list[i].relocation_count;
3503 return 0;
3506 static int
3507 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3508 uint32_t buffer_count,
3509 struct drm_i915_gem_relocation_entry *relocs)
3511 uint32_t reloc_count = 0, i;
3512 int ret = 0;
3514 for (i = 0; i < buffer_count; i++) {
3515 struct drm_i915_gem_relocation_entry __user *user_relocs;
3516 int unwritten;
3518 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3520 unwritten = copy_to_user(user_relocs,
3521 &relocs[reloc_count],
3522 exec_list[i].relocation_count *
3523 sizeof(*relocs));
3525 if (unwritten) {
3526 ret = -EFAULT;
3527 goto err;
3530 reloc_count += exec_list[i].relocation_count;
3533 err:
3534 drm_free_large(relocs);
3536 return ret;
3539 static int
3540 i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3541 uint64_t exec_offset)
3543 uint32_t exec_start, exec_len;
3545 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3546 exec_len = (uint32_t) exec->batch_len;
3548 if ((exec_start | exec_len) & 0x7)
3549 return -EINVAL;
3551 if (!exec_start)
3552 return -EINVAL;
3554 return 0;
3558 i915_gem_execbuffer(struct drm_device *dev, void *data,
3559 struct drm_file *file_priv)
3561 drm_i915_private_t *dev_priv = dev->dev_private;
3562 struct drm_i915_gem_execbuffer *args = data;
3563 struct drm_i915_gem_exec_object *exec_list = NULL;
3564 struct drm_gem_object **object_list = NULL;
3565 struct drm_gem_object *batch_obj;
3566 struct drm_i915_gem_object *obj_priv;
3567 struct drm_clip_rect *cliprects = NULL;
3568 struct drm_i915_gem_relocation_entry *relocs;
3569 int ret, ret2, i, pinned = 0;
3570 uint64_t exec_offset;
3571 uint32_t seqno, flush_domains, reloc_index;
3572 int pin_tries;
3574 #if WATCH_EXEC
3575 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3576 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3577 #endif
3579 if (args->buffer_count < 1) {
3580 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3581 return -EINVAL;
3583 /* Copy in the exec list from userland */
3584 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3585 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
3586 if (exec_list == NULL || object_list == NULL) {
3587 DRM_ERROR("Failed to allocate exec or object list "
3588 "for %d buffers\n",
3589 args->buffer_count);
3590 ret = -ENOMEM;
3591 goto pre_mutex_err;
3593 ret = copy_from_user(exec_list,
3594 (struct drm_i915_relocation_entry __user *)
3595 (uintptr_t) args->buffers_ptr,
3596 sizeof(*exec_list) * args->buffer_count);
3597 if (ret != 0) {
3598 DRM_ERROR("copy %d exec entries failed %d\n",
3599 args->buffer_count, ret);
3600 goto pre_mutex_err;
3603 if (args->num_cliprects != 0) {
3604 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3605 GFP_KERNEL);
3606 if (cliprects == NULL)
3607 goto pre_mutex_err;
3609 ret = copy_from_user(cliprects,
3610 (struct drm_clip_rect __user *)
3611 (uintptr_t) args->cliprects_ptr,
3612 sizeof(*cliprects) * args->num_cliprects);
3613 if (ret != 0) {
3614 DRM_ERROR("copy %d cliprects failed: %d\n",
3615 args->num_cliprects, ret);
3616 goto pre_mutex_err;
3620 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3621 &relocs);
3622 if (ret != 0)
3623 goto pre_mutex_err;
3625 mutex_lock(&dev->struct_mutex);
3627 i915_verify_inactive(dev, __FILE__, __LINE__);
3629 if (atomic_read(&dev_priv->mm.wedged)) {
3630 DRM_ERROR("Execbuf while wedged\n");
3631 mutex_unlock(&dev->struct_mutex);
3632 ret = -EIO;
3633 goto pre_mutex_err;
3636 if (dev_priv->mm.suspended) {
3637 DRM_ERROR("Execbuf while VT-switched.\n");
3638 mutex_unlock(&dev->struct_mutex);
3639 ret = -EBUSY;
3640 goto pre_mutex_err;
3643 /* Look up object handles */
3644 for (i = 0; i < args->buffer_count; i++) {
3645 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3646 exec_list[i].handle);
3647 if (object_list[i] == NULL) {
3648 DRM_ERROR("Invalid object handle %d at index %d\n",
3649 exec_list[i].handle, i);
3650 ret = -EBADF;
3651 goto err;
3654 obj_priv = object_list[i]->driver_private;
3655 if (obj_priv->in_execbuffer) {
3656 DRM_ERROR("Object %p appears more than once in object list\n",
3657 object_list[i]);
3658 ret = -EBADF;
3659 goto err;
3661 obj_priv->in_execbuffer = true;
3664 /* Pin and relocate */
3665 for (pin_tries = 0; ; pin_tries++) {
3666 ret = 0;
3667 reloc_index = 0;
3669 for (i = 0; i < args->buffer_count; i++) {
3670 object_list[i]->pending_read_domains = 0;
3671 object_list[i]->pending_write_domain = 0;
3672 ret = i915_gem_object_pin_and_relocate(object_list[i],
3673 file_priv,
3674 &exec_list[i],
3675 &relocs[reloc_index]);
3676 if (ret)
3677 break;
3678 pinned = i + 1;
3679 reloc_index += exec_list[i].relocation_count;
3681 /* success */
3682 if (ret == 0)
3683 break;
3685 /* error other than GTT full, or we've already tried again */
3686 if (ret != -ENOSPC || pin_tries >= 1) {
3687 if (ret != -ERESTARTSYS) {
3688 unsigned long long total_size = 0;
3689 for (i = 0; i < args->buffer_count; i++)
3690 total_size += object_list[i]->size;
3691 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
3692 pinned+1, args->buffer_count,
3693 total_size, ret);
3694 DRM_ERROR("%d objects [%d pinned], "
3695 "%d object bytes [%d pinned], "
3696 "%d/%d gtt bytes\n",
3697 atomic_read(&dev->object_count),
3698 atomic_read(&dev->pin_count),
3699 atomic_read(&dev->object_memory),
3700 atomic_read(&dev->pin_memory),
3701 atomic_read(&dev->gtt_memory),
3702 dev->gtt_total);
3704 goto err;
3707 /* unpin all of our buffers */
3708 for (i = 0; i < pinned; i++)
3709 i915_gem_object_unpin(object_list[i]);
3710 pinned = 0;
3712 /* evict everyone we can from the aperture */
3713 ret = i915_gem_evict_everything(dev);
3714 if (ret && ret != -ENOSPC)
3715 goto err;
3718 /* Set the pending read domains for the batch buffer to COMMAND */
3719 batch_obj = object_list[args->buffer_count-1];
3720 if (batch_obj->pending_write_domain) {
3721 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3722 ret = -EINVAL;
3723 goto err;
3725 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3727 /* Sanity check the batch buffer, prior to moving objects */
3728 exec_offset = exec_list[args->buffer_count - 1].offset;
3729 ret = i915_gem_check_execbuffer (args, exec_offset);
3730 if (ret != 0) {
3731 DRM_ERROR("execbuf with invalid offset/length\n");
3732 goto err;
3735 i915_verify_inactive(dev, __FILE__, __LINE__);
3737 /* Zero the global flush/invalidate flags. These
3738 * will be modified as new domains are computed
3739 * for each object
3741 dev->invalidate_domains = 0;
3742 dev->flush_domains = 0;
3744 for (i = 0; i < args->buffer_count; i++) {
3745 struct drm_gem_object *obj = object_list[i];
3747 /* Compute new gpu domains and update invalidate/flush */
3748 i915_gem_object_set_to_gpu_domain(obj);
3751 i915_verify_inactive(dev, __FILE__, __LINE__);
3753 if (dev->invalidate_domains | dev->flush_domains) {
3754 #if WATCH_EXEC
3755 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3756 __func__,
3757 dev->invalidate_domains,
3758 dev->flush_domains);
3759 #endif
3760 i915_gem_flush(dev,
3761 dev->invalidate_domains,
3762 dev->flush_domains);
3763 if (dev->flush_domains)
3764 (void)i915_add_request(dev, file_priv,
3765 dev->flush_domains);
3768 for (i = 0; i < args->buffer_count; i++) {
3769 struct drm_gem_object *obj = object_list[i];
3770 uint32_t old_write_domain = obj->write_domain;
3772 obj->write_domain = obj->pending_write_domain;
3773 trace_i915_gem_object_change_domain(obj,
3774 obj->read_domains,
3775 old_write_domain);
3778 i915_verify_inactive(dev, __FILE__, __LINE__);
3780 #if WATCH_COHERENCY
3781 for (i = 0; i < args->buffer_count; i++) {
3782 i915_gem_object_check_coherency(object_list[i],
3783 exec_list[i].handle);
3785 #endif
3787 #if WATCH_EXEC
3788 i915_gem_dump_object(batch_obj,
3789 args->batch_len,
3790 __func__,
3791 ~0);
3792 #endif
3794 /* Exec the batchbuffer */
3795 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
3796 if (ret) {
3797 DRM_ERROR("dispatch failed %d\n", ret);
3798 goto err;
3802 * Ensure that the commands in the batch buffer are
3803 * finished before the interrupt fires
3805 flush_domains = i915_retire_commands(dev);
3807 i915_verify_inactive(dev, __FILE__, __LINE__);
3810 * Get a seqno representing the execution of the current buffer,
3811 * which we can wait on. We would like to mitigate these interrupts,
3812 * likely by only creating seqnos occasionally (so that we have
3813 * *some* interrupts representing completion of buffers that we can
3814 * wait on when trying to clear up gtt space).
3816 seqno = i915_add_request(dev, file_priv, flush_domains);
3817 BUG_ON(seqno == 0);
3818 for (i = 0; i < args->buffer_count; i++) {
3819 struct drm_gem_object *obj = object_list[i];
3821 i915_gem_object_move_to_active(obj, seqno);
3822 #if WATCH_LRU
3823 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3824 #endif
3826 #if WATCH_LRU
3827 i915_dump_lru(dev, __func__);
3828 #endif
3830 i915_verify_inactive(dev, __FILE__, __LINE__);
3832 err:
3833 for (i = 0; i < pinned; i++)
3834 i915_gem_object_unpin(object_list[i]);
3836 for (i = 0; i < args->buffer_count; i++) {
3837 if (object_list[i]) {
3838 obj_priv = object_list[i]->driver_private;
3839 obj_priv->in_execbuffer = false;
3841 drm_gem_object_unreference(object_list[i]);
3844 mutex_unlock(&dev->struct_mutex);
3846 if (!ret) {
3847 /* Copy the new buffer offsets back to the user's exec list. */
3848 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3849 (uintptr_t) args->buffers_ptr,
3850 exec_list,
3851 sizeof(*exec_list) * args->buffer_count);
3852 if (ret) {
3853 ret = -EFAULT;
3854 DRM_ERROR("failed to copy %d exec entries "
3855 "back to user (%d)\n",
3856 args->buffer_count, ret);
3860 /* Copy the updated relocations out regardless of current error
3861 * state. Failure to update the relocs would mean that the next
3862 * time userland calls execbuf, it would do so with presumed offset
3863 * state that didn't match the actual object state.
3865 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3866 relocs);
3867 if (ret2 != 0) {
3868 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3870 if (ret == 0)
3871 ret = ret2;
3874 pre_mutex_err:
3875 drm_free_large(object_list);
3876 drm_free_large(exec_list);
3877 kfree(cliprects);
3879 return ret;
3883 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3885 struct drm_device *dev = obj->dev;
3886 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3887 int ret;
3889 i915_verify_inactive(dev, __FILE__, __LINE__);
3890 if (obj_priv->gtt_space == NULL) {
3891 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3892 if (ret)
3893 return ret;
3896 * Pre-965 chips need a fence register set up in order to
3897 * properly handle tiled surfaces.
3899 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
3900 ret = i915_gem_object_get_fence_reg(obj);
3901 if (ret != 0) {
3902 if (ret != -EBUSY && ret != -ERESTARTSYS)
3903 DRM_ERROR("Failure to install fence: %d\n",
3904 ret);
3905 return ret;
3908 obj_priv->pin_count++;
3910 /* If the object is not active and not pending a flush,
3911 * remove it from the inactive list
3913 if (obj_priv->pin_count == 1) {
3914 atomic_inc(&dev->pin_count);
3915 atomic_add(obj->size, &dev->pin_memory);
3916 if (!obj_priv->active &&
3917 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
3918 !list_empty(&obj_priv->list))
3919 list_del_init(&obj_priv->list);
3921 i915_verify_inactive(dev, __FILE__, __LINE__);
3923 return 0;
3926 void
3927 i915_gem_object_unpin(struct drm_gem_object *obj)
3929 struct drm_device *dev = obj->dev;
3930 drm_i915_private_t *dev_priv = dev->dev_private;
3931 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3933 i915_verify_inactive(dev, __FILE__, __LINE__);
3934 obj_priv->pin_count--;
3935 BUG_ON(obj_priv->pin_count < 0);
3936 BUG_ON(obj_priv->gtt_space == NULL);
3938 /* If the object is no longer pinned, and is
3939 * neither active nor being flushed, then stick it on
3940 * the inactive list
3942 if (obj_priv->pin_count == 0) {
3943 if (!obj_priv->active &&
3944 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
3945 list_move_tail(&obj_priv->list,
3946 &dev_priv->mm.inactive_list);
3947 atomic_dec(&dev->pin_count);
3948 atomic_sub(obj->size, &dev->pin_memory);
3950 i915_verify_inactive(dev, __FILE__, __LINE__);
3954 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3955 struct drm_file *file_priv)
3957 struct drm_i915_gem_pin *args = data;
3958 struct drm_gem_object *obj;
3959 struct drm_i915_gem_object *obj_priv;
3960 int ret;
3962 mutex_lock(&dev->struct_mutex);
3964 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3965 if (obj == NULL) {
3966 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3967 args->handle);
3968 mutex_unlock(&dev->struct_mutex);
3969 return -EBADF;
3971 obj_priv = obj->driver_private;
3973 if (obj_priv->madv != I915_MADV_WILLNEED) {
3974 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3975 drm_gem_object_unreference(obj);
3976 mutex_unlock(&dev->struct_mutex);
3977 return -EINVAL;
3980 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3981 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3982 args->handle);
3983 drm_gem_object_unreference(obj);
3984 mutex_unlock(&dev->struct_mutex);
3985 return -EINVAL;
3988 obj_priv->user_pin_count++;
3989 obj_priv->pin_filp = file_priv;
3990 if (obj_priv->user_pin_count == 1) {
3991 ret = i915_gem_object_pin(obj, args->alignment);
3992 if (ret != 0) {
3993 drm_gem_object_unreference(obj);
3994 mutex_unlock(&dev->struct_mutex);
3995 return ret;
3999 /* XXX - flush the CPU caches for pinned objects
4000 * as the X server doesn't manage domains yet
4002 i915_gem_object_flush_cpu_write_domain(obj);
4003 args->offset = obj_priv->gtt_offset;
4004 drm_gem_object_unreference(obj);
4005 mutex_unlock(&dev->struct_mutex);
4007 return 0;
4011 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4012 struct drm_file *file_priv)
4014 struct drm_i915_gem_pin *args = data;
4015 struct drm_gem_object *obj;
4016 struct drm_i915_gem_object *obj_priv;
4018 mutex_lock(&dev->struct_mutex);
4020 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4021 if (obj == NULL) {
4022 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4023 args->handle);
4024 mutex_unlock(&dev->struct_mutex);
4025 return -EBADF;
4028 obj_priv = obj->driver_private;
4029 if (obj_priv->pin_filp != file_priv) {
4030 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4031 args->handle);
4032 drm_gem_object_unreference(obj);
4033 mutex_unlock(&dev->struct_mutex);
4034 return -EINVAL;
4036 obj_priv->user_pin_count--;
4037 if (obj_priv->user_pin_count == 0) {
4038 obj_priv->pin_filp = NULL;
4039 i915_gem_object_unpin(obj);
4042 drm_gem_object_unreference(obj);
4043 mutex_unlock(&dev->struct_mutex);
4044 return 0;
4048 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4049 struct drm_file *file_priv)
4051 struct drm_i915_gem_busy *args = data;
4052 struct drm_gem_object *obj;
4053 struct drm_i915_gem_object *obj_priv;
4055 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4056 if (obj == NULL) {
4057 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4058 args->handle);
4059 return -EBADF;
4062 mutex_lock(&dev->struct_mutex);
4063 /* Update the active list for the hardware's current position.
4064 * Otherwise this only updates on a delayed timer or when irqs are
4065 * actually unmasked, and our working set ends up being larger than
4066 * required.
4068 i915_gem_retire_requests(dev);
4070 obj_priv = obj->driver_private;
4071 /* Don't count being on the flushing list against the object being
4072 * done. Otherwise, a buffer left on the flushing list but not getting
4073 * flushed (because nobody's flushing that domain) won't ever return
4074 * unbusy and get reused by libdrm's bo cache. The other expected
4075 * consumer of this interface, OpenGL's occlusion queries, also specs
4076 * that the objects get unbusy "eventually" without any interference.
4078 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
4080 drm_gem_object_unreference(obj);
4081 mutex_unlock(&dev->struct_mutex);
4082 return 0;
4086 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4087 struct drm_file *file_priv)
4089 return i915_gem_ring_throttle(dev, file_priv);
4093 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4094 struct drm_file *file_priv)
4096 struct drm_i915_gem_madvise *args = data;
4097 struct drm_gem_object *obj;
4098 struct drm_i915_gem_object *obj_priv;
4100 switch (args->madv) {
4101 case I915_MADV_DONTNEED:
4102 case I915_MADV_WILLNEED:
4103 break;
4104 default:
4105 return -EINVAL;
4108 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4109 if (obj == NULL) {
4110 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4111 args->handle);
4112 return -EBADF;
4115 mutex_lock(&dev->struct_mutex);
4116 obj_priv = obj->driver_private;
4118 if (obj_priv->pin_count) {
4119 drm_gem_object_unreference(obj);
4120 mutex_unlock(&dev->struct_mutex);
4122 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4123 return -EINVAL;
4126 if (obj_priv->madv != __I915_MADV_PURGED)
4127 obj_priv->madv = args->madv;
4129 /* if the object is no longer bound, discard its backing storage */
4130 if (i915_gem_object_is_purgeable(obj_priv) &&
4131 obj_priv->gtt_space == NULL)
4132 i915_gem_object_truncate(obj);
4134 args->retained = obj_priv->madv != __I915_MADV_PURGED;
4136 drm_gem_object_unreference(obj);
4137 mutex_unlock(&dev->struct_mutex);
4139 return 0;
4142 int i915_gem_init_object(struct drm_gem_object *obj)
4144 struct drm_i915_gem_object *obj_priv;
4146 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
4147 if (obj_priv == NULL)
4148 return -ENOMEM;
4151 * We've just allocated pages from the kernel,
4152 * so they've just been written by the CPU with
4153 * zeros. They'll need to be clflushed before we
4154 * use them with the GPU.
4156 obj->write_domain = I915_GEM_DOMAIN_CPU;
4157 obj->read_domains = I915_GEM_DOMAIN_CPU;
4159 obj_priv->agp_type = AGP_USER_MEMORY;
4161 obj->driver_private = obj_priv;
4162 obj_priv->obj = obj;
4163 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4164 INIT_LIST_HEAD(&obj_priv->list);
4165 INIT_LIST_HEAD(&obj_priv->fence_list);
4166 obj_priv->madv = I915_MADV_WILLNEED;
4168 trace_i915_gem_object_create(obj);
4170 return 0;
4173 void i915_gem_free_object(struct drm_gem_object *obj)
4175 struct drm_device *dev = obj->dev;
4176 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4178 trace_i915_gem_object_destroy(obj);
4180 while (obj_priv->pin_count > 0)
4181 i915_gem_object_unpin(obj);
4183 if (obj_priv->phys_obj)
4184 i915_gem_detach_phys_object(dev, obj);
4186 i915_gem_object_unbind(obj);
4188 if (obj_priv->mmap_offset)
4189 i915_gem_free_mmap_offset(obj);
4191 kfree(obj_priv->page_cpu_valid);
4192 kfree(obj_priv->bit_17);
4193 kfree(obj->driver_private);
4196 /** Unbinds all inactive objects. */
4197 static int
4198 i915_gem_evict_from_inactive_list(struct drm_device *dev)
4200 drm_i915_private_t *dev_priv = dev->dev_private;
4202 while (!list_empty(&dev_priv->mm.inactive_list)) {
4203 struct drm_gem_object *obj;
4204 int ret;
4206 obj = list_first_entry(&dev_priv->mm.inactive_list,
4207 struct drm_i915_gem_object,
4208 list)->obj;
4210 ret = i915_gem_object_unbind(obj);
4211 if (ret != 0) {
4212 DRM_ERROR("Error unbinding object: %d\n", ret);
4213 return ret;
4217 return 0;
4221 i915_gem_idle(struct drm_device *dev)
4223 drm_i915_private_t *dev_priv = dev->dev_private;
4224 uint32_t seqno, cur_seqno, last_seqno;
4225 int stuck, ret;
4227 mutex_lock(&dev->struct_mutex);
4229 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
4230 mutex_unlock(&dev->struct_mutex);
4231 return 0;
4234 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4235 * We need to replace this with a semaphore, or something.
4237 dev_priv->mm.suspended = 1;
4238 del_timer(&dev_priv->hangcheck_timer);
4240 /* Cancel the retire work handler, wait for it to finish if running
4242 mutex_unlock(&dev->struct_mutex);
4243 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4244 mutex_lock(&dev->struct_mutex);
4246 i915_kernel_lost_context(dev);
4248 /* Flush the GPU along with all non-CPU write domains
4250 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
4251 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
4253 if (seqno == 0) {
4254 mutex_unlock(&dev->struct_mutex);
4255 return -ENOMEM;
4258 dev_priv->mm.waiting_gem_seqno = seqno;
4259 last_seqno = 0;
4260 stuck = 0;
4261 for (;;) {
4262 cur_seqno = i915_get_gem_seqno(dev);
4263 if (i915_seqno_passed(cur_seqno, seqno))
4264 break;
4265 if (last_seqno == cur_seqno) {
4266 if (stuck++ > 100) {
4267 DRM_ERROR("hardware wedged\n");
4268 atomic_set(&dev_priv->mm.wedged, 1);
4269 DRM_WAKEUP(&dev_priv->irq_queue);
4270 break;
4273 msleep(10);
4274 last_seqno = cur_seqno;
4276 dev_priv->mm.waiting_gem_seqno = 0;
4278 i915_gem_retire_requests(dev);
4280 spin_lock(&dev_priv->mm.active_list_lock);
4281 if (!atomic_read(&dev_priv->mm.wedged)) {
4282 /* Active and flushing should now be empty as we've
4283 * waited for a sequence higher than any pending execbuffer
4285 WARN_ON(!list_empty(&dev_priv->mm.active_list));
4286 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
4287 /* Request should now be empty as we've also waited
4288 * for the last request in the list
4290 WARN_ON(!list_empty(&dev_priv->mm.request_list));
4293 /* Empty the active and flushing lists to inactive. If there's
4294 * anything left at this point, it means that we're wedged and
4295 * nothing good's going to happen by leaving them there. So strip
4296 * the GPU domains and just stuff them onto inactive.
4298 while (!list_empty(&dev_priv->mm.active_list)) {
4299 struct drm_gem_object *obj;
4300 uint32_t old_write_domain;
4302 obj = list_first_entry(&dev_priv->mm.active_list,
4303 struct drm_i915_gem_object,
4304 list)->obj;
4305 old_write_domain = obj->write_domain;
4306 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4307 i915_gem_object_move_to_inactive(obj);
4309 trace_i915_gem_object_change_domain(obj,
4310 obj->read_domains,
4311 old_write_domain);
4313 spin_unlock(&dev_priv->mm.active_list_lock);
4315 while (!list_empty(&dev_priv->mm.flushing_list)) {
4316 struct drm_gem_object *obj;
4317 uint32_t old_write_domain;
4319 obj = list_first_entry(&dev_priv->mm.flushing_list,
4320 struct drm_i915_gem_object,
4321 list)->obj;
4322 old_write_domain = obj->write_domain;
4323 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4324 i915_gem_object_move_to_inactive(obj);
4326 trace_i915_gem_object_change_domain(obj,
4327 obj->read_domains,
4328 old_write_domain);
4332 /* Move all inactive buffers out of the GTT. */
4333 ret = i915_gem_evict_from_inactive_list(dev);
4334 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
4335 if (ret) {
4336 mutex_unlock(&dev->struct_mutex);
4337 return ret;
4340 i915_gem_cleanup_ringbuffer(dev);
4341 mutex_unlock(&dev->struct_mutex);
4343 return 0;
4346 static int
4347 i915_gem_init_hws(struct drm_device *dev)
4349 drm_i915_private_t *dev_priv = dev->dev_private;
4350 struct drm_gem_object *obj;
4351 struct drm_i915_gem_object *obj_priv;
4352 int ret;
4354 /* If we need a physical address for the status page, it's already
4355 * initialized at driver load time.
4357 if (!I915_NEED_GFX_HWS(dev))
4358 return 0;
4360 obj = drm_gem_object_alloc(dev, 4096);
4361 if (obj == NULL) {
4362 DRM_ERROR("Failed to allocate status page\n");
4363 return -ENOMEM;
4365 obj_priv = obj->driver_private;
4366 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4368 ret = i915_gem_object_pin(obj, 4096);
4369 if (ret != 0) {
4370 drm_gem_object_unreference(obj);
4371 return ret;
4374 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
4376 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
4377 if (dev_priv->hw_status_page == NULL) {
4378 DRM_ERROR("Failed to map status page.\n");
4379 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4380 i915_gem_object_unpin(obj);
4381 drm_gem_object_unreference(obj);
4382 return -EINVAL;
4384 dev_priv->hws_obj = obj;
4385 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4386 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4387 I915_READ(HWS_PGA); /* posting read */
4388 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4390 return 0;
4393 static void
4394 i915_gem_cleanup_hws(struct drm_device *dev)
4396 drm_i915_private_t *dev_priv = dev->dev_private;
4397 struct drm_gem_object *obj;
4398 struct drm_i915_gem_object *obj_priv;
4400 if (dev_priv->hws_obj == NULL)
4401 return;
4403 obj = dev_priv->hws_obj;
4404 obj_priv = obj->driver_private;
4406 kunmap(obj_priv->pages[0]);
4407 i915_gem_object_unpin(obj);
4408 drm_gem_object_unreference(obj);
4409 dev_priv->hws_obj = NULL;
4411 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4412 dev_priv->hw_status_page = NULL;
4414 /* Write high address into HWS_PGA when disabling. */
4415 I915_WRITE(HWS_PGA, 0x1ffff000);
4419 i915_gem_init_ringbuffer(struct drm_device *dev)
4421 drm_i915_private_t *dev_priv = dev->dev_private;
4422 struct drm_gem_object *obj;
4423 struct drm_i915_gem_object *obj_priv;
4424 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
4425 int ret;
4426 u32 head;
4428 ret = i915_gem_init_hws(dev);
4429 if (ret != 0)
4430 return ret;
4432 obj = drm_gem_object_alloc(dev, 128 * 1024);
4433 if (obj == NULL) {
4434 DRM_ERROR("Failed to allocate ringbuffer\n");
4435 i915_gem_cleanup_hws(dev);
4436 return -ENOMEM;
4438 obj_priv = obj->driver_private;
4440 ret = i915_gem_object_pin(obj, 4096);
4441 if (ret != 0) {
4442 drm_gem_object_unreference(obj);
4443 i915_gem_cleanup_hws(dev);
4444 return ret;
4447 /* Set up the kernel mapping for the ring. */
4448 ring->Size = obj->size;
4450 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
4451 ring->map.size = obj->size;
4452 ring->map.type = 0;
4453 ring->map.flags = 0;
4454 ring->map.mtrr = 0;
4456 drm_core_ioremap_wc(&ring->map, dev);
4457 if (ring->map.handle == NULL) {
4458 DRM_ERROR("Failed to map ringbuffer.\n");
4459 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4460 i915_gem_object_unpin(obj);
4461 drm_gem_object_unreference(obj);
4462 i915_gem_cleanup_hws(dev);
4463 return -EINVAL;
4465 ring->ring_obj = obj;
4466 ring->virtual_start = ring->map.handle;
4468 /* Stop the ring if it's running. */
4469 I915_WRITE(PRB0_CTL, 0);
4470 I915_WRITE(PRB0_TAIL, 0);
4471 I915_WRITE(PRB0_HEAD, 0);
4473 /* Initialize the ring. */
4474 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
4475 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4477 /* G45 ring initialization fails to reset head to zero */
4478 if (head != 0) {
4479 DRM_ERROR("Ring head not reset to zero "
4480 "ctl %08x head %08x tail %08x start %08x\n",
4481 I915_READ(PRB0_CTL),
4482 I915_READ(PRB0_HEAD),
4483 I915_READ(PRB0_TAIL),
4484 I915_READ(PRB0_START));
4485 I915_WRITE(PRB0_HEAD, 0);
4487 DRM_ERROR("Ring head forced to zero "
4488 "ctl %08x head %08x tail %08x start %08x\n",
4489 I915_READ(PRB0_CTL),
4490 I915_READ(PRB0_HEAD),
4491 I915_READ(PRB0_TAIL),
4492 I915_READ(PRB0_START));
4495 I915_WRITE(PRB0_CTL,
4496 ((obj->size - 4096) & RING_NR_PAGES) |
4497 RING_NO_REPORT |
4498 RING_VALID);
4500 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4502 /* If the head is still not zero, the ring is dead */
4503 if (head != 0) {
4504 DRM_ERROR("Ring initialization failed "
4505 "ctl %08x head %08x tail %08x start %08x\n",
4506 I915_READ(PRB0_CTL),
4507 I915_READ(PRB0_HEAD),
4508 I915_READ(PRB0_TAIL),
4509 I915_READ(PRB0_START));
4510 return -EIO;
4513 /* Update our cache of the ring state */
4514 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4515 i915_kernel_lost_context(dev);
4516 else {
4517 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4518 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4519 ring->space = ring->head - (ring->tail + 8);
4520 if (ring->space < 0)
4521 ring->space += ring->Size;
4524 return 0;
4527 void
4528 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4530 drm_i915_private_t *dev_priv = dev->dev_private;
4532 if (dev_priv->ring.ring_obj == NULL)
4533 return;
4535 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4537 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4538 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4539 dev_priv->ring.ring_obj = NULL;
4540 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4542 i915_gem_cleanup_hws(dev);
4546 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4547 struct drm_file *file_priv)
4549 drm_i915_private_t *dev_priv = dev->dev_private;
4550 int ret;
4552 if (drm_core_check_feature(dev, DRIVER_MODESET))
4553 return 0;
4555 if (atomic_read(&dev_priv->mm.wedged)) {
4556 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4557 atomic_set(&dev_priv->mm.wedged, 0);
4560 mutex_lock(&dev->struct_mutex);
4561 dev_priv->mm.suspended = 0;
4563 ret = i915_gem_init_ringbuffer(dev);
4564 if (ret != 0) {
4565 mutex_unlock(&dev->struct_mutex);
4566 return ret;
4569 spin_lock(&dev_priv->mm.active_list_lock);
4570 BUG_ON(!list_empty(&dev_priv->mm.active_list));
4571 spin_unlock(&dev_priv->mm.active_list_lock);
4573 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4574 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4575 BUG_ON(!list_empty(&dev_priv->mm.request_list));
4576 mutex_unlock(&dev->struct_mutex);
4578 drm_irq_install(dev);
4580 return 0;
4584 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4585 struct drm_file *file_priv)
4587 if (drm_core_check_feature(dev, DRIVER_MODESET))
4588 return 0;
4590 drm_irq_uninstall(dev);
4591 return i915_gem_idle(dev);
4594 void
4595 i915_gem_lastclose(struct drm_device *dev)
4597 int ret;
4599 if (drm_core_check_feature(dev, DRIVER_MODESET))
4600 return;
4602 ret = i915_gem_idle(dev);
4603 if (ret)
4604 DRM_ERROR("failed to idle hardware: %d\n", ret);
4607 void
4608 i915_gem_load(struct drm_device *dev)
4610 int i;
4611 drm_i915_private_t *dev_priv = dev->dev_private;
4613 spin_lock_init(&dev_priv->mm.active_list_lock);
4614 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4615 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4616 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4617 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4618 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4619 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4620 i915_gem_retire_work_handler);
4621 dev_priv->mm.next_gem_seqno = 1;
4623 spin_lock(&shrink_list_lock);
4624 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4625 spin_unlock(&shrink_list_lock);
4627 /* Old X drivers will take 0-2 for front, back, depth buffers */
4628 dev_priv->fence_reg_start = 3;
4630 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4631 dev_priv->num_fence_regs = 16;
4632 else
4633 dev_priv->num_fence_regs = 8;
4635 /* Initialize fence registers to zero */
4636 if (IS_I965G(dev)) {
4637 for (i = 0; i < 16; i++)
4638 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4639 } else {
4640 for (i = 0; i < 8; i++)
4641 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4642 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4643 for (i = 0; i < 8; i++)
4644 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4647 i915_gem_detect_bit_6_swizzle(dev);
4651 * Create a physically contiguous memory object for this object
4652 * e.g. for cursor + overlay regs
4654 int i915_gem_init_phys_object(struct drm_device *dev,
4655 int id, int size)
4657 drm_i915_private_t *dev_priv = dev->dev_private;
4658 struct drm_i915_gem_phys_object *phys_obj;
4659 int ret;
4661 if (dev_priv->mm.phys_objs[id - 1] || !size)
4662 return 0;
4664 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4665 if (!phys_obj)
4666 return -ENOMEM;
4668 phys_obj->id = id;
4670 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4671 if (!phys_obj->handle) {
4672 ret = -ENOMEM;
4673 goto kfree_obj;
4675 #ifdef CONFIG_X86
4676 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4677 #endif
4679 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4681 return 0;
4682 kfree_obj:
4683 kfree(phys_obj);
4684 return ret;
4687 void i915_gem_free_phys_object(struct drm_device *dev, int id)
4689 drm_i915_private_t *dev_priv = dev->dev_private;
4690 struct drm_i915_gem_phys_object *phys_obj;
4692 if (!dev_priv->mm.phys_objs[id - 1])
4693 return;
4695 phys_obj = dev_priv->mm.phys_objs[id - 1];
4696 if (phys_obj->cur_obj) {
4697 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4700 #ifdef CONFIG_X86
4701 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4702 #endif
4703 drm_pci_free(dev, phys_obj->handle);
4704 kfree(phys_obj);
4705 dev_priv->mm.phys_objs[id - 1] = NULL;
4708 void i915_gem_free_all_phys_object(struct drm_device *dev)
4710 int i;
4712 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4713 i915_gem_free_phys_object(dev, i);
4716 void i915_gem_detach_phys_object(struct drm_device *dev,
4717 struct drm_gem_object *obj)
4719 struct drm_i915_gem_object *obj_priv;
4720 int i;
4721 int ret;
4722 int page_count;
4724 obj_priv = obj->driver_private;
4725 if (!obj_priv->phys_obj)
4726 return;
4728 ret = i915_gem_object_get_pages(obj);
4729 if (ret)
4730 goto out;
4732 page_count = obj->size / PAGE_SIZE;
4734 for (i = 0; i < page_count; i++) {
4735 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
4736 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4738 memcpy(dst, src, PAGE_SIZE);
4739 kunmap_atomic(dst, KM_USER0);
4741 drm_clflush_pages(obj_priv->pages, page_count);
4742 drm_agp_chipset_flush(dev);
4744 i915_gem_object_put_pages(obj);
4745 out:
4746 obj_priv->phys_obj->cur_obj = NULL;
4747 obj_priv->phys_obj = NULL;
4751 i915_gem_attach_phys_object(struct drm_device *dev,
4752 struct drm_gem_object *obj, int id)
4754 drm_i915_private_t *dev_priv = dev->dev_private;
4755 struct drm_i915_gem_object *obj_priv;
4756 int ret = 0;
4757 int page_count;
4758 int i;
4760 if (id > I915_MAX_PHYS_OBJECT)
4761 return -EINVAL;
4763 obj_priv = obj->driver_private;
4765 if (obj_priv->phys_obj) {
4766 if (obj_priv->phys_obj->id == id)
4767 return 0;
4768 i915_gem_detach_phys_object(dev, obj);
4772 /* create a new object */
4773 if (!dev_priv->mm.phys_objs[id - 1]) {
4774 ret = i915_gem_init_phys_object(dev, id,
4775 obj->size);
4776 if (ret) {
4777 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4778 goto out;
4782 /* bind to the object */
4783 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4784 obj_priv->phys_obj->cur_obj = obj;
4786 ret = i915_gem_object_get_pages(obj);
4787 if (ret) {
4788 DRM_ERROR("failed to get page list\n");
4789 goto out;
4792 page_count = obj->size / PAGE_SIZE;
4794 for (i = 0; i < page_count; i++) {
4795 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
4796 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4798 memcpy(dst, src, PAGE_SIZE);
4799 kunmap_atomic(src, KM_USER0);
4802 i915_gem_object_put_pages(obj);
4804 return 0;
4805 out:
4806 return ret;
4809 static int
4810 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4811 struct drm_i915_gem_pwrite *args,
4812 struct drm_file *file_priv)
4814 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4815 void *obj_addr;
4816 int ret;
4817 char __user *user_data;
4819 user_data = (char __user *) (uintptr_t) args->data_ptr;
4820 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4822 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
4823 ret = copy_from_user(obj_addr, user_data, args->size);
4824 if (ret)
4825 return -EFAULT;
4827 drm_agp_chipset_flush(dev);
4828 return 0;
4831 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4833 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4835 /* Clean up our request list when the client is going away, so that
4836 * later retire_requests won't dereference our soon-to-be-gone
4837 * file_priv.
4839 mutex_lock(&dev->struct_mutex);
4840 while (!list_empty(&i915_file_priv->mm.request_list))
4841 list_del_init(i915_file_priv->mm.request_list.next);
4842 mutex_unlock(&dev->struct_mutex);
4845 static int
4846 i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4848 drm_i915_private_t *dev_priv, *next_dev;
4849 struct drm_i915_gem_object *obj_priv, *next_obj;
4850 int cnt = 0;
4851 int would_deadlock = 1;
4853 /* "fast-path" to count number of available objects */
4854 if (nr_to_scan == 0) {
4855 spin_lock(&shrink_list_lock);
4856 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4857 struct drm_device *dev = dev_priv->dev;
4859 if (mutex_trylock(&dev->struct_mutex)) {
4860 list_for_each_entry(obj_priv,
4861 &dev_priv->mm.inactive_list,
4862 list)
4863 cnt++;
4864 mutex_unlock(&dev->struct_mutex);
4867 spin_unlock(&shrink_list_lock);
4869 return (cnt / 100) * sysctl_vfs_cache_pressure;
4872 spin_lock(&shrink_list_lock);
4874 /* first scan for clean buffers */
4875 list_for_each_entry_safe(dev_priv, next_dev,
4876 &shrink_list, mm.shrink_list) {
4877 struct drm_device *dev = dev_priv->dev;
4879 if (! mutex_trylock(&dev->struct_mutex))
4880 continue;
4882 spin_unlock(&shrink_list_lock);
4884 i915_gem_retire_requests(dev);
4886 list_for_each_entry_safe(obj_priv, next_obj,
4887 &dev_priv->mm.inactive_list,
4888 list) {
4889 if (i915_gem_object_is_purgeable(obj_priv)) {
4890 i915_gem_object_unbind(obj_priv->obj);
4891 if (--nr_to_scan <= 0)
4892 break;
4896 spin_lock(&shrink_list_lock);
4897 mutex_unlock(&dev->struct_mutex);
4899 would_deadlock = 0;
4901 if (nr_to_scan <= 0)
4902 break;
4905 /* second pass, evict/count anything still on the inactive list */
4906 list_for_each_entry_safe(dev_priv, next_dev,
4907 &shrink_list, mm.shrink_list) {
4908 struct drm_device *dev = dev_priv->dev;
4910 if (! mutex_trylock(&dev->struct_mutex))
4911 continue;
4913 spin_unlock(&shrink_list_lock);
4915 list_for_each_entry_safe(obj_priv, next_obj,
4916 &dev_priv->mm.inactive_list,
4917 list) {
4918 if (nr_to_scan > 0) {
4919 i915_gem_object_unbind(obj_priv->obj);
4920 nr_to_scan--;
4921 } else
4922 cnt++;
4925 spin_lock(&shrink_list_lock);
4926 mutex_unlock(&dev->struct_mutex);
4928 would_deadlock = 0;
4931 spin_unlock(&shrink_list_lock);
4933 if (would_deadlock)
4934 return -1;
4935 else if (cnt > 0)
4936 return (cnt / 100) * sysctl_vfs_cache_pressure;
4937 else
4938 return 0;
4941 static struct shrinker shrinker = {
4942 .shrink = i915_gem_shrink,
4943 .seeks = DEFAULT_SEEKS,
4946 __init void
4947 i915_gem_shrinker_init(void)
4949 register_shrinker(&shrinker);
4952 __exit void
4953 i915_gem_shrinker_exit(void)
4955 unregister_shrinker(&shrinker);