drm/i915: Add buffer to inactive list immediately during fault
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
blobf19abbe624d14031525134131f4361f670ae1096
1 /*
2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include <linux/swap.h>
33 #include <linux/pci.h>
35 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
37 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
38 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
39 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
40 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
41 int write);
42 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset,
44 uint64_t size);
45 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
47 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
48 unsigned alignment);
49 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
50 static int i915_gem_evict_something(struct drm_device *dev);
51 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
52 struct drm_i915_gem_pwrite *args,
53 struct drm_file *file_priv);
55 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
56 unsigned long end)
58 drm_i915_private_t *dev_priv = dev->dev_private;
60 if (start >= end ||
61 (start & (PAGE_SIZE - 1)) != 0 ||
62 (end & (PAGE_SIZE - 1)) != 0) {
63 return -EINVAL;
66 drm_mm_init(&dev_priv->mm.gtt_space, start,
67 end - start);
69 dev->gtt_total = (uint32_t) (end - start);
71 return 0;
74 int
75 i915_gem_init_ioctl(struct drm_device *dev, void *data,
76 struct drm_file *file_priv)
78 struct drm_i915_gem_init *args = data;
79 int ret;
81 mutex_lock(&dev->struct_mutex);
82 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
83 mutex_unlock(&dev->struct_mutex);
85 return ret;
88 int
89 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
90 struct drm_file *file_priv)
92 struct drm_i915_gem_get_aperture *args = data;
94 if (!(dev->driver->driver_features & DRIVER_GEM))
95 return -ENODEV;
97 args->aper_size = dev->gtt_total;
98 args->aper_available_size = (args->aper_size -
99 atomic_read(&dev->pin_memory));
101 return 0;
106 * Creates a new mm object and returns a handle to it.
109 i915_gem_create_ioctl(struct drm_device *dev, void *data,
110 struct drm_file *file_priv)
112 struct drm_i915_gem_create *args = data;
113 struct drm_gem_object *obj;
114 int handle, ret;
116 args->size = roundup(args->size, PAGE_SIZE);
118 /* Allocate the new object */
119 obj = drm_gem_object_alloc(dev, args->size);
120 if (obj == NULL)
121 return -ENOMEM;
123 ret = drm_gem_handle_create(file_priv, obj, &handle);
124 mutex_lock(&dev->struct_mutex);
125 drm_gem_object_handle_unreference(obj);
126 mutex_unlock(&dev->struct_mutex);
128 if (ret)
129 return ret;
131 args->handle = handle;
133 return 0;
136 static inline int
137 fast_shmem_read(struct page **pages,
138 loff_t page_base, int page_offset,
139 char __user *data,
140 int length)
142 char __iomem *vaddr;
143 int unwritten;
145 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
146 if (vaddr == NULL)
147 return -ENOMEM;
148 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
149 kunmap_atomic(vaddr, KM_USER0);
151 if (unwritten)
152 return -EFAULT;
154 return 0;
157 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
159 drm_i915_private_t *dev_priv = obj->dev->dev_private;
160 struct drm_i915_gem_object *obj_priv = obj->driver_private;
162 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
163 obj_priv->tiling_mode != I915_TILING_NONE;
166 static inline int
167 slow_shmem_copy(struct page *dst_page,
168 int dst_offset,
169 struct page *src_page,
170 int src_offset,
171 int length)
173 char *dst_vaddr, *src_vaddr;
175 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
176 if (dst_vaddr == NULL)
177 return -ENOMEM;
179 src_vaddr = kmap_atomic(src_page, KM_USER1);
180 if (src_vaddr == NULL) {
181 kunmap_atomic(dst_vaddr, KM_USER0);
182 return -ENOMEM;
185 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
187 kunmap_atomic(src_vaddr, KM_USER1);
188 kunmap_atomic(dst_vaddr, KM_USER0);
190 return 0;
193 static inline int
194 slow_shmem_bit17_copy(struct page *gpu_page,
195 int gpu_offset,
196 struct page *cpu_page,
197 int cpu_offset,
198 int length,
199 int is_read)
201 char *gpu_vaddr, *cpu_vaddr;
203 /* Use the unswizzled path if this page isn't affected. */
204 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
205 if (is_read)
206 return slow_shmem_copy(cpu_page, cpu_offset,
207 gpu_page, gpu_offset, length);
208 else
209 return slow_shmem_copy(gpu_page, gpu_offset,
210 cpu_page, cpu_offset, length);
213 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
214 if (gpu_vaddr == NULL)
215 return -ENOMEM;
217 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
218 if (cpu_vaddr == NULL) {
219 kunmap_atomic(gpu_vaddr, KM_USER0);
220 return -ENOMEM;
223 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
224 * XORing with the other bits (A9 for Y, A9 and A10 for X)
226 while (length > 0) {
227 int cacheline_end = ALIGN(gpu_offset + 1, 64);
228 int this_length = min(cacheline_end - gpu_offset, length);
229 int swizzled_gpu_offset = gpu_offset ^ 64;
231 if (is_read) {
232 memcpy(cpu_vaddr + cpu_offset,
233 gpu_vaddr + swizzled_gpu_offset,
234 this_length);
235 } else {
236 memcpy(gpu_vaddr + swizzled_gpu_offset,
237 cpu_vaddr + cpu_offset,
238 this_length);
240 cpu_offset += this_length;
241 gpu_offset += this_length;
242 length -= this_length;
245 kunmap_atomic(cpu_vaddr, KM_USER1);
246 kunmap_atomic(gpu_vaddr, KM_USER0);
248 return 0;
252 * This is the fast shmem pread path, which attempts to copy_from_user directly
253 * from the backing pages of the object to the user's address space. On a
254 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
256 static int
257 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
258 struct drm_i915_gem_pread *args,
259 struct drm_file *file_priv)
261 struct drm_i915_gem_object *obj_priv = obj->driver_private;
262 ssize_t remain;
263 loff_t offset, page_base;
264 char __user *user_data;
265 int page_offset, page_length;
266 int ret;
268 user_data = (char __user *) (uintptr_t) args->data_ptr;
269 remain = args->size;
271 mutex_lock(&dev->struct_mutex);
273 ret = i915_gem_object_get_pages(obj);
274 if (ret != 0)
275 goto fail_unlock;
277 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
278 args->size);
279 if (ret != 0)
280 goto fail_put_pages;
282 obj_priv = obj->driver_private;
283 offset = args->offset;
285 while (remain > 0) {
286 /* Operation in this page
288 * page_base = page offset within aperture
289 * page_offset = offset within page
290 * page_length = bytes to copy for this page
292 page_base = (offset & ~(PAGE_SIZE-1));
293 page_offset = offset & (PAGE_SIZE-1);
294 page_length = remain;
295 if ((page_offset + remain) > PAGE_SIZE)
296 page_length = PAGE_SIZE - page_offset;
298 ret = fast_shmem_read(obj_priv->pages,
299 page_base, page_offset,
300 user_data, page_length);
301 if (ret)
302 goto fail_put_pages;
304 remain -= page_length;
305 user_data += page_length;
306 offset += page_length;
309 fail_put_pages:
310 i915_gem_object_put_pages(obj);
311 fail_unlock:
312 mutex_unlock(&dev->struct_mutex);
314 return ret;
318 * This is the fallback shmem pread path, which allocates temporary storage
319 * in kernel space to copy_to_user into outside of the struct_mutex, so we
320 * can copy out of the object's backing pages while holding the struct mutex
321 * and not take page faults.
323 static int
324 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
325 struct drm_i915_gem_pread *args,
326 struct drm_file *file_priv)
328 struct drm_i915_gem_object *obj_priv = obj->driver_private;
329 struct mm_struct *mm = current->mm;
330 struct page **user_pages;
331 ssize_t remain;
332 loff_t offset, pinned_pages, i;
333 loff_t first_data_page, last_data_page, num_pages;
334 int shmem_page_index, shmem_page_offset;
335 int data_page_index, data_page_offset;
336 int page_length;
337 int ret;
338 uint64_t data_ptr = args->data_ptr;
339 int do_bit17_swizzling;
341 remain = args->size;
343 /* Pin the user pages containing the data. We can't fault while
344 * holding the struct mutex, yet we want to hold it while
345 * dereferencing the user data.
347 first_data_page = data_ptr / PAGE_SIZE;
348 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
349 num_pages = last_data_page - first_data_page + 1;
351 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
352 if (user_pages == NULL)
353 return -ENOMEM;
355 down_read(&mm->mmap_sem);
356 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
357 num_pages, 1, 0, user_pages, NULL);
358 up_read(&mm->mmap_sem);
359 if (pinned_pages < num_pages) {
360 ret = -EFAULT;
361 goto fail_put_user_pages;
364 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
366 mutex_lock(&dev->struct_mutex);
368 ret = i915_gem_object_get_pages(obj);
369 if (ret != 0)
370 goto fail_unlock;
372 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
373 args->size);
374 if (ret != 0)
375 goto fail_put_pages;
377 obj_priv = obj->driver_private;
378 offset = args->offset;
380 while (remain > 0) {
381 /* Operation in this page
383 * shmem_page_index = page number within shmem file
384 * shmem_page_offset = offset within page in shmem file
385 * data_page_index = page number in get_user_pages return
386 * data_page_offset = offset with data_page_index page.
387 * page_length = bytes to copy for this page
389 shmem_page_index = offset / PAGE_SIZE;
390 shmem_page_offset = offset & ~PAGE_MASK;
391 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
392 data_page_offset = data_ptr & ~PAGE_MASK;
394 page_length = remain;
395 if ((shmem_page_offset + page_length) > PAGE_SIZE)
396 page_length = PAGE_SIZE - shmem_page_offset;
397 if ((data_page_offset + page_length) > PAGE_SIZE)
398 page_length = PAGE_SIZE - data_page_offset;
400 if (do_bit17_swizzling) {
401 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
402 shmem_page_offset,
403 user_pages[data_page_index],
404 data_page_offset,
405 page_length,
407 } else {
408 ret = slow_shmem_copy(user_pages[data_page_index],
409 data_page_offset,
410 obj_priv->pages[shmem_page_index],
411 shmem_page_offset,
412 page_length);
414 if (ret)
415 goto fail_put_pages;
417 remain -= page_length;
418 data_ptr += page_length;
419 offset += page_length;
422 fail_put_pages:
423 i915_gem_object_put_pages(obj);
424 fail_unlock:
425 mutex_unlock(&dev->struct_mutex);
426 fail_put_user_pages:
427 for (i = 0; i < pinned_pages; i++) {
428 SetPageDirty(user_pages[i]);
429 page_cache_release(user_pages[i]);
431 drm_free_large(user_pages);
433 return ret;
437 * Reads data from the object referenced by handle.
439 * On error, the contents of *data are undefined.
442 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
443 struct drm_file *file_priv)
445 struct drm_i915_gem_pread *args = data;
446 struct drm_gem_object *obj;
447 struct drm_i915_gem_object *obj_priv;
448 int ret;
450 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
451 if (obj == NULL)
452 return -EBADF;
453 obj_priv = obj->driver_private;
455 /* Bounds check source.
457 * XXX: This could use review for overflow issues...
459 if (args->offset > obj->size || args->size > obj->size ||
460 args->offset + args->size > obj->size) {
461 drm_gem_object_unreference(obj);
462 return -EINVAL;
465 if (i915_gem_object_needs_bit17_swizzle(obj)) {
466 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
467 } else {
468 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
469 if (ret != 0)
470 ret = i915_gem_shmem_pread_slow(dev, obj, args,
471 file_priv);
474 drm_gem_object_unreference(obj);
476 return ret;
479 /* This is the fast write path which cannot handle
480 * page faults in the source data
483 static inline int
484 fast_user_write(struct io_mapping *mapping,
485 loff_t page_base, int page_offset,
486 char __user *user_data,
487 int length)
489 char *vaddr_atomic;
490 unsigned long unwritten;
492 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
493 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
494 user_data, length);
495 io_mapping_unmap_atomic(vaddr_atomic);
496 if (unwritten)
497 return -EFAULT;
498 return 0;
501 /* Here's the write path which can sleep for
502 * page faults
505 static inline int
506 slow_kernel_write(struct io_mapping *mapping,
507 loff_t gtt_base, int gtt_offset,
508 struct page *user_page, int user_offset,
509 int length)
511 char *src_vaddr, *dst_vaddr;
512 unsigned long unwritten;
514 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
515 src_vaddr = kmap_atomic(user_page, KM_USER1);
516 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
517 src_vaddr + user_offset,
518 length);
519 kunmap_atomic(src_vaddr, KM_USER1);
520 io_mapping_unmap_atomic(dst_vaddr);
521 if (unwritten)
522 return -EFAULT;
523 return 0;
526 static inline int
527 fast_shmem_write(struct page **pages,
528 loff_t page_base, int page_offset,
529 char __user *data,
530 int length)
532 char __iomem *vaddr;
533 unsigned long unwritten;
535 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
536 if (vaddr == NULL)
537 return -ENOMEM;
538 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
539 kunmap_atomic(vaddr, KM_USER0);
541 if (unwritten)
542 return -EFAULT;
543 return 0;
547 * This is the fast pwrite path, where we copy the data directly from the
548 * user into the GTT, uncached.
550 static int
551 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
552 struct drm_i915_gem_pwrite *args,
553 struct drm_file *file_priv)
555 struct drm_i915_gem_object *obj_priv = obj->driver_private;
556 drm_i915_private_t *dev_priv = dev->dev_private;
557 ssize_t remain;
558 loff_t offset, page_base;
559 char __user *user_data;
560 int page_offset, page_length;
561 int ret;
563 user_data = (char __user *) (uintptr_t) args->data_ptr;
564 remain = args->size;
565 if (!access_ok(VERIFY_READ, user_data, remain))
566 return -EFAULT;
569 mutex_lock(&dev->struct_mutex);
570 ret = i915_gem_object_pin(obj, 0);
571 if (ret) {
572 mutex_unlock(&dev->struct_mutex);
573 return ret;
575 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
576 if (ret)
577 goto fail;
579 obj_priv = obj->driver_private;
580 offset = obj_priv->gtt_offset + args->offset;
582 while (remain > 0) {
583 /* Operation in this page
585 * page_base = page offset within aperture
586 * page_offset = offset within page
587 * page_length = bytes to copy for this page
589 page_base = (offset & ~(PAGE_SIZE-1));
590 page_offset = offset & (PAGE_SIZE-1);
591 page_length = remain;
592 if ((page_offset + remain) > PAGE_SIZE)
593 page_length = PAGE_SIZE - page_offset;
595 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
596 page_offset, user_data, page_length);
598 /* If we get a fault while copying data, then (presumably) our
599 * source page isn't available. Return the error and we'll
600 * retry in the slow path.
602 if (ret)
603 goto fail;
605 remain -= page_length;
606 user_data += page_length;
607 offset += page_length;
610 fail:
611 i915_gem_object_unpin(obj);
612 mutex_unlock(&dev->struct_mutex);
614 return ret;
618 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
619 * the memory and maps it using kmap_atomic for copying.
621 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
622 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
624 static int
625 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
626 struct drm_i915_gem_pwrite *args,
627 struct drm_file *file_priv)
629 struct drm_i915_gem_object *obj_priv = obj->driver_private;
630 drm_i915_private_t *dev_priv = dev->dev_private;
631 ssize_t remain;
632 loff_t gtt_page_base, offset;
633 loff_t first_data_page, last_data_page, num_pages;
634 loff_t pinned_pages, i;
635 struct page **user_pages;
636 struct mm_struct *mm = current->mm;
637 int gtt_page_offset, data_page_offset, data_page_index, page_length;
638 int ret;
639 uint64_t data_ptr = args->data_ptr;
641 remain = args->size;
643 /* Pin the user pages containing the data. We can't fault while
644 * holding the struct mutex, and all of the pwrite implementations
645 * want to hold it while dereferencing the user data.
647 first_data_page = data_ptr / PAGE_SIZE;
648 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
649 num_pages = last_data_page - first_data_page + 1;
651 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
652 if (user_pages == NULL)
653 return -ENOMEM;
655 down_read(&mm->mmap_sem);
656 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
657 num_pages, 0, 0, user_pages, NULL);
658 up_read(&mm->mmap_sem);
659 if (pinned_pages < num_pages) {
660 ret = -EFAULT;
661 goto out_unpin_pages;
664 mutex_lock(&dev->struct_mutex);
665 ret = i915_gem_object_pin(obj, 0);
666 if (ret)
667 goto out_unlock;
669 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
670 if (ret)
671 goto out_unpin_object;
673 obj_priv = obj->driver_private;
674 offset = obj_priv->gtt_offset + args->offset;
676 while (remain > 0) {
677 /* Operation in this page
679 * gtt_page_base = page offset within aperture
680 * gtt_page_offset = offset within page in aperture
681 * data_page_index = page number in get_user_pages return
682 * data_page_offset = offset with data_page_index page.
683 * page_length = bytes to copy for this page
685 gtt_page_base = offset & PAGE_MASK;
686 gtt_page_offset = offset & ~PAGE_MASK;
687 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
688 data_page_offset = data_ptr & ~PAGE_MASK;
690 page_length = remain;
691 if ((gtt_page_offset + page_length) > PAGE_SIZE)
692 page_length = PAGE_SIZE - gtt_page_offset;
693 if ((data_page_offset + page_length) > PAGE_SIZE)
694 page_length = PAGE_SIZE - data_page_offset;
696 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
697 gtt_page_base, gtt_page_offset,
698 user_pages[data_page_index],
699 data_page_offset,
700 page_length);
702 /* If we get a fault while copying data, then (presumably) our
703 * source page isn't available. Return the error and we'll
704 * retry in the slow path.
706 if (ret)
707 goto out_unpin_object;
709 remain -= page_length;
710 offset += page_length;
711 data_ptr += page_length;
714 out_unpin_object:
715 i915_gem_object_unpin(obj);
716 out_unlock:
717 mutex_unlock(&dev->struct_mutex);
718 out_unpin_pages:
719 for (i = 0; i < pinned_pages; i++)
720 page_cache_release(user_pages[i]);
721 drm_free_large(user_pages);
723 return ret;
727 * This is the fast shmem pwrite path, which attempts to directly
728 * copy_from_user into the kmapped pages backing the object.
730 static int
731 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
732 struct drm_i915_gem_pwrite *args,
733 struct drm_file *file_priv)
735 struct drm_i915_gem_object *obj_priv = obj->driver_private;
736 ssize_t remain;
737 loff_t offset, page_base;
738 char __user *user_data;
739 int page_offset, page_length;
740 int ret;
742 user_data = (char __user *) (uintptr_t) args->data_ptr;
743 remain = args->size;
745 mutex_lock(&dev->struct_mutex);
747 ret = i915_gem_object_get_pages(obj);
748 if (ret != 0)
749 goto fail_unlock;
751 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
752 if (ret != 0)
753 goto fail_put_pages;
755 obj_priv = obj->driver_private;
756 offset = args->offset;
757 obj_priv->dirty = 1;
759 while (remain > 0) {
760 /* Operation in this page
762 * page_base = page offset within aperture
763 * page_offset = offset within page
764 * page_length = bytes to copy for this page
766 page_base = (offset & ~(PAGE_SIZE-1));
767 page_offset = offset & (PAGE_SIZE-1);
768 page_length = remain;
769 if ((page_offset + remain) > PAGE_SIZE)
770 page_length = PAGE_SIZE - page_offset;
772 ret = fast_shmem_write(obj_priv->pages,
773 page_base, page_offset,
774 user_data, page_length);
775 if (ret)
776 goto fail_put_pages;
778 remain -= page_length;
779 user_data += page_length;
780 offset += page_length;
783 fail_put_pages:
784 i915_gem_object_put_pages(obj);
785 fail_unlock:
786 mutex_unlock(&dev->struct_mutex);
788 return ret;
792 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
793 * the memory and maps it using kmap_atomic for copying.
795 * This avoids taking mmap_sem for faulting on the user's address while the
796 * struct_mutex is held.
798 static int
799 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
800 struct drm_i915_gem_pwrite *args,
801 struct drm_file *file_priv)
803 struct drm_i915_gem_object *obj_priv = obj->driver_private;
804 struct mm_struct *mm = current->mm;
805 struct page **user_pages;
806 ssize_t remain;
807 loff_t offset, pinned_pages, i;
808 loff_t first_data_page, last_data_page, num_pages;
809 int shmem_page_index, shmem_page_offset;
810 int data_page_index, data_page_offset;
811 int page_length;
812 int ret;
813 uint64_t data_ptr = args->data_ptr;
814 int do_bit17_swizzling;
816 remain = args->size;
818 /* Pin the user pages containing the data. We can't fault while
819 * holding the struct mutex, and all of the pwrite implementations
820 * want to hold it while dereferencing the user data.
822 first_data_page = data_ptr / PAGE_SIZE;
823 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
824 num_pages = last_data_page - first_data_page + 1;
826 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
827 if (user_pages == NULL)
828 return -ENOMEM;
830 down_read(&mm->mmap_sem);
831 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
832 num_pages, 0, 0, user_pages, NULL);
833 up_read(&mm->mmap_sem);
834 if (pinned_pages < num_pages) {
835 ret = -EFAULT;
836 goto fail_put_user_pages;
839 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
841 mutex_lock(&dev->struct_mutex);
843 ret = i915_gem_object_get_pages(obj);
844 if (ret != 0)
845 goto fail_unlock;
847 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
848 if (ret != 0)
849 goto fail_put_pages;
851 obj_priv = obj->driver_private;
852 offset = args->offset;
853 obj_priv->dirty = 1;
855 while (remain > 0) {
856 /* Operation in this page
858 * shmem_page_index = page number within shmem file
859 * shmem_page_offset = offset within page in shmem file
860 * data_page_index = page number in get_user_pages return
861 * data_page_offset = offset with data_page_index page.
862 * page_length = bytes to copy for this page
864 shmem_page_index = offset / PAGE_SIZE;
865 shmem_page_offset = offset & ~PAGE_MASK;
866 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
867 data_page_offset = data_ptr & ~PAGE_MASK;
869 page_length = remain;
870 if ((shmem_page_offset + page_length) > PAGE_SIZE)
871 page_length = PAGE_SIZE - shmem_page_offset;
872 if ((data_page_offset + page_length) > PAGE_SIZE)
873 page_length = PAGE_SIZE - data_page_offset;
875 if (do_bit17_swizzling) {
876 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
877 shmem_page_offset,
878 user_pages[data_page_index],
879 data_page_offset,
880 page_length,
882 } else {
883 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
884 shmem_page_offset,
885 user_pages[data_page_index],
886 data_page_offset,
887 page_length);
889 if (ret)
890 goto fail_put_pages;
892 remain -= page_length;
893 data_ptr += page_length;
894 offset += page_length;
897 fail_put_pages:
898 i915_gem_object_put_pages(obj);
899 fail_unlock:
900 mutex_unlock(&dev->struct_mutex);
901 fail_put_user_pages:
902 for (i = 0; i < pinned_pages; i++)
903 page_cache_release(user_pages[i]);
904 drm_free_large(user_pages);
906 return ret;
910 * Writes data to the object referenced by handle.
912 * On error, the contents of the buffer that were to be modified are undefined.
915 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
916 struct drm_file *file_priv)
918 struct drm_i915_gem_pwrite *args = data;
919 struct drm_gem_object *obj;
920 struct drm_i915_gem_object *obj_priv;
921 int ret = 0;
923 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
924 if (obj == NULL)
925 return -EBADF;
926 obj_priv = obj->driver_private;
928 /* Bounds check destination.
930 * XXX: This could use review for overflow issues...
932 if (args->offset > obj->size || args->size > obj->size ||
933 args->offset + args->size > obj->size) {
934 drm_gem_object_unreference(obj);
935 return -EINVAL;
938 /* We can only do the GTT pwrite on untiled buffers, as otherwise
939 * it would end up going through the fenced access, and we'll get
940 * different detiling behavior between reading and writing.
941 * pread/pwrite currently are reading and writing from the CPU
942 * perspective, requiring manual detiling by the client.
944 if (obj_priv->phys_obj)
945 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
946 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
947 dev->gtt_total != 0) {
948 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
949 if (ret == -EFAULT) {
950 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
951 file_priv);
953 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
954 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
955 } else {
956 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
957 if (ret == -EFAULT) {
958 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
959 file_priv);
963 #if WATCH_PWRITE
964 if (ret)
965 DRM_INFO("pwrite failed %d\n", ret);
966 #endif
968 drm_gem_object_unreference(obj);
970 return ret;
974 * Called when user space prepares to use an object with the CPU, either
975 * through the mmap ioctl's mapping or a GTT mapping.
978 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
979 struct drm_file *file_priv)
981 struct drm_i915_private *dev_priv = dev->dev_private;
982 struct drm_i915_gem_set_domain *args = data;
983 struct drm_gem_object *obj;
984 uint32_t read_domains = args->read_domains;
985 uint32_t write_domain = args->write_domain;
986 int ret;
988 if (!(dev->driver->driver_features & DRIVER_GEM))
989 return -ENODEV;
991 /* Only handle setting domains to types used by the CPU. */
992 if (write_domain & I915_GEM_GPU_DOMAINS)
993 return -EINVAL;
995 if (read_domains & I915_GEM_GPU_DOMAINS)
996 return -EINVAL;
998 /* Having something in the write domain implies it's in the read
999 * domain, and only that read domain. Enforce that in the request.
1001 if (write_domain != 0 && read_domains != write_domain)
1002 return -EINVAL;
1004 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1005 if (obj == NULL)
1006 return -EBADF;
1008 mutex_lock(&dev->struct_mutex);
1009 #if WATCH_BUF
1010 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1011 obj, obj->size, read_domains, write_domain);
1012 #endif
1013 if (read_domains & I915_GEM_DOMAIN_GTT) {
1014 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1016 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1018 /* Update the LRU on the fence for the CPU access that's
1019 * about to occur.
1021 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1022 list_move_tail(&obj_priv->fence_list,
1023 &dev_priv->mm.fence_list);
1026 /* Silently promote "you're not bound, there was nothing to do"
1027 * to success, since the client was just asking us to
1028 * make sure everything was done.
1030 if (ret == -EINVAL)
1031 ret = 0;
1032 } else {
1033 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1036 drm_gem_object_unreference(obj);
1037 mutex_unlock(&dev->struct_mutex);
1038 return ret;
1042 * Called when user space has done writes to this buffer
1045 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1046 struct drm_file *file_priv)
1048 struct drm_i915_gem_sw_finish *args = data;
1049 struct drm_gem_object *obj;
1050 struct drm_i915_gem_object *obj_priv;
1051 int ret = 0;
1053 if (!(dev->driver->driver_features & DRIVER_GEM))
1054 return -ENODEV;
1056 mutex_lock(&dev->struct_mutex);
1057 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1058 if (obj == NULL) {
1059 mutex_unlock(&dev->struct_mutex);
1060 return -EBADF;
1063 #if WATCH_BUF
1064 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1065 __func__, args->handle, obj, obj->size);
1066 #endif
1067 obj_priv = obj->driver_private;
1069 /* Pinned buffers may be scanout, so flush the cache */
1070 if (obj_priv->pin_count)
1071 i915_gem_object_flush_cpu_write_domain(obj);
1073 drm_gem_object_unreference(obj);
1074 mutex_unlock(&dev->struct_mutex);
1075 return ret;
1079 * Maps the contents of an object, returning the address it is mapped
1080 * into.
1082 * While the mapping holds a reference on the contents of the object, it doesn't
1083 * imply a ref on the object itself.
1086 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1087 struct drm_file *file_priv)
1089 struct drm_i915_gem_mmap *args = data;
1090 struct drm_gem_object *obj;
1091 loff_t offset;
1092 unsigned long addr;
1094 if (!(dev->driver->driver_features & DRIVER_GEM))
1095 return -ENODEV;
1097 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1098 if (obj == NULL)
1099 return -EBADF;
1101 offset = args->offset;
1103 down_write(&current->mm->mmap_sem);
1104 addr = do_mmap(obj->filp, 0, args->size,
1105 PROT_READ | PROT_WRITE, MAP_SHARED,
1106 args->offset);
1107 up_write(&current->mm->mmap_sem);
1108 mutex_lock(&dev->struct_mutex);
1109 drm_gem_object_unreference(obj);
1110 mutex_unlock(&dev->struct_mutex);
1111 if (IS_ERR((void *)addr))
1112 return addr;
1114 args->addr_ptr = (uint64_t) addr;
1116 return 0;
1120 * i915_gem_fault - fault a page into the GTT
1121 * vma: VMA in question
1122 * vmf: fault info
1124 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1125 * from userspace. The fault handler takes care of binding the object to
1126 * the GTT (if needed), allocating and programming a fence register (again,
1127 * only if needed based on whether the old reg is still valid or the object
1128 * is tiled) and inserting a new PTE into the faulting process.
1130 * Note that the faulting process may involve evicting existing objects
1131 * from the GTT and/or fence registers to make room. So performance may
1132 * suffer if the GTT working set is large or there are few fence registers
1133 * left.
1135 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1137 struct drm_gem_object *obj = vma->vm_private_data;
1138 struct drm_device *dev = obj->dev;
1139 struct drm_i915_private *dev_priv = dev->dev_private;
1140 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1141 pgoff_t page_offset;
1142 unsigned long pfn;
1143 int ret = 0;
1144 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1146 /* We don't use vmf->pgoff since that has the fake offset */
1147 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1148 PAGE_SHIFT;
1150 /* Now bind it into the GTT if needed */
1151 mutex_lock(&dev->struct_mutex);
1152 if (!obj_priv->gtt_space) {
1153 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1154 if (ret) {
1155 mutex_unlock(&dev->struct_mutex);
1156 return VM_FAULT_SIGBUS;
1158 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1160 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1161 if (ret) {
1162 mutex_unlock(&dev->struct_mutex);
1163 return VM_FAULT_SIGBUS;
1167 /* Need a new fence register? */
1168 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1169 ret = i915_gem_object_get_fence_reg(obj);
1170 if (ret) {
1171 mutex_unlock(&dev->struct_mutex);
1172 return VM_FAULT_SIGBUS;
1176 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1177 page_offset;
1179 /* Finally, remap it using the new GTT offset */
1180 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1182 mutex_unlock(&dev->struct_mutex);
1184 switch (ret) {
1185 case -ENOMEM:
1186 case -EAGAIN:
1187 return VM_FAULT_OOM;
1188 case -EFAULT:
1189 case -EINVAL:
1190 return VM_FAULT_SIGBUS;
1191 default:
1192 return VM_FAULT_NOPAGE;
1197 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1198 * @obj: obj in question
1200 * GEM memory mapping works by handing back to userspace a fake mmap offset
1201 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1202 * up the object based on the offset and sets up the various memory mapping
1203 * structures.
1205 * This routine allocates and attaches a fake offset for @obj.
1207 static int
1208 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1210 struct drm_device *dev = obj->dev;
1211 struct drm_gem_mm *mm = dev->mm_private;
1212 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1213 struct drm_map_list *list;
1214 struct drm_local_map *map;
1215 int ret = 0;
1217 /* Set the object up for mmap'ing */
1218 list = &obj->map_list;
1219 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1220 if (!list->map)
1221 return -ENOMEM;
1223 map = list->map;
1224 map->type = _DRM_GEM;
1225 map->size = obj->size;
1226 map->handle = obj;
1228 /* Get a DRM GEM mmap offset allocated... */
1229 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1230 obj->size / PAGE_SIZE, 0, 0);
1231 if (!list->file_offset_node) {
1232 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1233 ret = -ENOMEM;
1234 goto out_free_list;
1237 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1238 obj->size / PAGE_SIZE, 0);
1239 if (!list->file_offset_node) {
1240 ret = -ENOMEM;
1241 goto out_free_list;
1244 list->hash.key = list->file_offset_node->start;
1245 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1246 DRM_ERROR("failed to add to map hash\n");
1247 goto out_free_mm;
1250 /* By now we should be all set, any drm_mmap request on the offset
1251 * below will get to our mmap & fault handler */
1252 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1254 return 0;
1256 out_free_mm:
1257 drm_mm_put_block(list->file_offset_node);
1258 out_free_list:
1259 kfree(list->map);
1261 return ret;
1265 * i915_gem_release_mmap - remove physical page mappings
1266 * @obj: obj in question
1268 * Preserve the reservation of the mmaping with the DRM core code, but
1269 * relinquish ownership of the pages back to the system.
1271 * It is vital that we remove the page mapping if we have mapped a tiled
1272 * object through the GTT and then lose the fence register due to
1273 * resource pressure. Similarly if the object has been moved out of the
1274 * aperture, than pages mapped into userspace must be revoked. Removing the
1275 * mapping will then trigger a page fault on the next user access, allowing
1276 * fixup by i915_gem_fault().
1278 void
1279 i915_gem_release_mmap(struct drm_gem_object *obj)
1281 struct drm_device *dev = obj->dev;
1282 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1284 if (dev->dev_mapping)
1285 unmap_mapping_range(dev->dev_mapping,
1286 obj_priv->mmap_offset, obj->size, 1);
1289 static void
1290 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1292 struct drm_device *dev = obj->dev;
1293 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1294 struct drm_gem_mm *mm = dev->mm_private;
1295 struct drm_map_list *list;
1297 list = &obj->map_list;
1298 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1300 if (list->file_offset_node) {
1301 drm_mm_put_block(list->file_offset_node);
1302 list->file_offset_node = NULL;
1305 if (list->map) {
1306 kfree(list->map);
1307 list->map = NULL;
1310 obj_priv->mmap_offset = 0;
1314 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1315 * @obj: object to check
1317 * Return the required GTT alignment for an object, taking into account
1318 * potential fence register mapping if needed.
1320 static uint32_t
1321 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1323 struct drm_device *dev = obj->dev;
1324 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1325 int start, i;
1328 * Minimum alignment is 4k (GTT page size), but might be greater
1329 * if a fence register is needed for the object.
1331 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1332 return 4096;
1335 * Previous chips need to be aligned to the size of the smallest
1336 * fence register that can contain the object.
1338 if (IS_I9XX(dev))
1339 start = 1024*1024;
1340 else
1341 start = 512*1024;
1343 for (i = start; i < obj->size; i <<= 1)
1346 return i;
1350 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1351 * @dev: DRM device
1352 * @data: GTT mapping ioctl data
1353 * @file_priv: GEM object info
1355 * Simply returns the fake offset to userspace so it can mmap it.
1356 * The mmap call will end up in drm_gem_mmap(), which will set things
1357 * up so we can get faults in the handler above.
1359 * The fault handler will take care of binding the object into the GTT
1360 * (since it may have been evicted to make room for something), allocating
1361 * a fence register, and mapping the appropriate aperture address into
1362 * userspace.
1365 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1366 struct drm_file *file_priv)
1368 struct drm_i915_gem_mmap_gtt *args = data;
1369 struct drm_i915_private *dev_priv = dev->dev_private;
1370 struct drm_gem_object *obj;
1371 struct drm_i915_gem_object *obj_priv;
1372 int ret;
1374 if (!(dev->driver->driver_features & DRIVER_GEM))
1375 return -ENODEV;
1377 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1378 if (obj == NULL)
1379 return -EBADF;
1381 mutex_lock(&dev->struct_mutex);
1383 obj_priv = obj->driver_private;
1385 if (!obj_priv->mmap_offset) {
1386 ret = i915_gem_create_mmap_offset(obj);
1387 if (ret) {
1388 drm_gem_object_unreference(obj);
1389 mutex_unlock(&dev->struct_mutex);
1390 return ret;
1394 args->offset = obj_priv->mmap_offset;
1396 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
1398 /* Make sure the alignment is correct for fence regs etc */
1399 if (obj_priv->agp_mem &&
1400 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
1401 drm_gem_object_unreference(obj);
1402 mutex_unlock(&dev->struct_mutex);
1403 return -EINVAL;
1407 * Pull it into the GTT so that we have a page list (makes the
1408 * initial fault faster and any subsequent flushing possible).
1410 if (!obj_priv->agp_mem) {
1411 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1412 if (ret) {
1413 drm_gem_object_unreference(obj);
1414 mutex_unlock(&dev->struct_mutex);
1415 return ret;
1417 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1420 drm_gem_object_unreference(obj);
1421 mutex_unlock(&dev->struct_mutex);
1423 return 0;
1426 void
1427 i915_gem_object_put_pages(struct drm_gem_object *obj)
1429 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1430 int page_count = obj->size / PAGE_SIZE;
1431 int i;
1433 BUG_ON(obj_priv->pages_refcount == 0);
1435 if (--obj_priv->pages_refcount != 0)
1436 return;
1438 if (obj_priv->tiling_mode != I915_TILING_NONE)
1439 i915_gem_object_save_bit_17_swizzle(obj);
1441 for (i = 0; i < page_count; i++)
1442 if (obj_priv->pages[i] != NULL) {
1443 if (obj_priv->dirty)
1444 set_page_dirty(obj_priv->pages[i]);
1445 mark_page_accessed(obj_priv->pages[i]);
1446 page_cache_release(obj_priv->pages[i]);
1448 obj_priv->dirty = 0;
1450 drm_free_large(obj_priv->pages);
1451 obj_priv->pages = NULL;
1454 static void
1455 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1457 struct drm_device *dev = obj->dev;
1458 drm_i915_private_t *dev_priv = dev->dev_private;
1459 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1461 /* Add a reference if we're newly entering the active list. */
1462 if (!obj_priv->active) {
1463 drm_gem_object_reference(obj);
1464 obj_priv->active = 1;
1466 /* Move from whatever list we were on to the tail of execution. */
1467 spin_lock(&dev_priv->mm.active_list_lock);
1468 list_move_tail(&obj_priv->list,
1469 &dev_priv->mm.active_list);
1470 spin_unlock(&dev_priv->mm.active_list_lock);
1471 obj_priv->last_rendering_seqno = seqno;
1474 static void
1475 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1477 struct drm_device *dev = obj->dev;
1478 drm_i915_private_t *dev_priv = dev->dev_private;
1479 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1481 BUG_ON(!obj_priv->active);
1482 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1483 obj_priv->last_rendering_seqno = 0;
1486 static void
1487 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1489 struct drm_device *dev = obj->dev;
1490 drm_i915_private_t *dev_priv = dev->dev_private;
1491 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1493 i915_verify_inactive(dev, __FILE__, __LINE__);
1494 if (obj_priv->pin_count != 0)
1495 list_del_init(&obj_priv->list);
1496 else
1497 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1499 obj_priv->last_rendering_seqno = 0;
1500 if (obj_priv->active) {
1501 obj_priv->active = 0;
1502 drm_gem_object_unreference(obj);
1504 i915_verify_inactive(dev, __FILE__, __LINE__);
1508 * Creates a new sequence number, emitting a write of it to the status page
1509 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1511 * Must be called with struct_lock held.
1513 * Returned sequence numbers are nonzero on success.
1515 static uint32_t
1516 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1517 uint32_t flush_domains)
1519 drm_i915_private_t *dev_priv = dev->dev_private;
1520 struct drm_i915_file_private *i915_file_priv = NULL;
1521 struct drm_i915_gem_request *request;
1522 uint32_t seqno;
1523 int was_empty;
1524 RING_LOCALS;
1526 if (file_priv != NULL)
1527 i915_file_priv = file_priv->driver_priv;
1529 request = kzalloc(sizeof(*request), GFP_KERNEL);
1530 if (request == NULL)
1531 return 0;
1533 /* Grab the seqno we're going to make this request be, and bump the
1534 * next (skipping 0 so it can be the reserved no-seqno value).
1536 seqno = dev_priv->mm.next_gem_seqno;
1537 dev_priv->mm.next_gem_seqno++;
1538 if (dev_priv->mm.next_gem_seqno == 0)
1539 dev_priv->mm.next_gem_seqno++;
1541 BEGIN_LP_RING(4);
1542 OUT_RING(MI_STORE_DWORD_INDEX);
1543 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1544 OUT_RING(seqno);
1546 OUT_RING(MI_USER_INTERRUPT);
1547 ADVANCE_LP_RING();
1549 DRM_DEBUG("%d\n", seqno);
1551 request->seqno = seqno;
1552 request->emitted_jiffies = jiffies;
1553 was_empty = list_empty(&dev_priv->mm.request_list);
1554 list_add_tail(&request->list, &dev_priv->mm.request_list);
1555 if (i915_file_priv) {
1556 list_add_tail(&request->client_list,
1557 &i915_file_priv->mm.request_list);
1558 } else {
1559 INIT_LIST_HEAD(&request->client_list);
1562 /* Associate any objects on the flushing list matching the write
1563 * domain we're flushing with our flush.
1565 if (flush_domains != 0) {
1566 struct drm_i915_gem_object *obj_priv, *next;
1568 list_for_each_entry_safe(obj_priv, next,
1569 &dev_priv->mm.flushing_list, list) {
1570 struct drm_gem_object *obj = obj_priv->obj;
1572 if ((obj->write_domain & flush_domains) ==
1573 obj->write_domain) {
1574 obj->write_domain = 0;
1575 i915_gem_object_move_to_active(obj, seqno);
1581 if (was_empty && !dev_priv->mm.suspended)
1582 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1583 return seqno;
1587 * Command execution barrier
1589 * Ensures that all commands in the ring are finished
1590 * before signalling the CPU
1592 static uint32_t
1593 i915_retire_commands(struct drm_device *dev)
1595 drm_i915_private_t *dev_priv = dev->dev_private;
1596 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1597 uint32_t flush_domains = 0;
1598 RING_LOCALS;
1600 /* The sampler always gets flushed on i965 (sigh) */
1601 if (IS_I965G(dev))
1602 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1603 BEGIN_LP_RING(2);
1604 OUT_RING(cmd);
1605 OUT_RING(0); /* noop */
1606 ADVANCE_LP_RING();
1607 return flush_domains;
1611 * Moves buffers associated only with the given active seqno from the active
1612 * to inactive list, potentially freeing them.
1614 static void
1615 i915_gem_retire_request(struct drm_device *dev,
1616 struct drm_i915_gem_request *request)
1618 drm_i915_private_t *dev_priv = dev->dev_private;
1620 /* Move any buffers on the active list that are no longer referenced
1621 * by the ringbuffer to the flushing/inactive lists as appropriate.
1623 spin_lock(&dev_priv->mm.active_list_lock);
1624 while (!list_empty(&dev_priv->mm.active_list)) {
1625 struct drm_gem_object *obj;
1626 struct drm_i915_gem_object *obj_priv;
1628 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1629 struct drm_i915_gem_object,
1630 list);
1631 obj = obj_priv->obj;
1633 /* If the seqno being retired doesn't match the oldest in the
1634 * list, then the oldest in the list must still be newer than
1635 * this seqno.
1637 if (obj_priv->last_rendering_seqno != request->seqno)
1638 goto out;
1640 #if WATCH_LRU
1641 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1642 __func__, request->seqno, obj);
1643 #endif
1645 if (obj->write_domain != 0)
1646 i915_gem_object_move_to_flushing(obj);
1647 else {
1648 /* Take a reference on the object so it won't be
1649 * freed while the spinlock is held. The list
1650 * protection for this spinlock is safe when breaking
1651 * the lock like this since the next thing we do
1652 * is just get the head of the list again.
1654 drm_gem_object_reference(obj);
1655 i915_gem_object_move_to_inactive(obj);
1656 spin_unlock(&dev_priv->mm.active_list_lock);
1657 drm_gem_object_unreference(obj);
1658 spin_lock(&dev_priv->mm.active_list_lock);
1661 out:
1662 spin_unlock(&dev_priv->mm.active_list_lock);
1666 * Returns true if seq1 is later than seq2.
1668 static int
1669 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1671 return (int32_t)(seq1 - seq2) >= 0;
1674 uint32_t
1675 i915_get_gem_seqno(struct drm_device *dev)
1677 drm_i915_private_t *dev_priv = dev->dev_private;
1679 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1683 * This function clears the request list as sequence numbers are passed.
1685 void
1686 i915_gem_retire_requests(struct drm_device *dev)
1688 drm_i915_private_t *dev_priv = dev->dev_private;
1689 uint32_t seqno;
1691 if (!dev_priv->hw_status_page)
1692 return;
1694 seqno = i915_get_gem_seqno(dev);
1696 while (!list_empty(&dev_priv->mm.request_list)) {
1697 struct drm_i915_gem_request *request;
1698 uint32_t retiring_seqno;
1700 request = list_first_entry(&dev_priv->mm.request_list,
1701 struct drm_i915_gem_request,
1702 list);
1703 retiring_seqno = request->seqno;
1705 if (i915_seqno_passed(seqno, retiring_seqno) ||
1706 dev_priv->mm.wedged) {
1707 i915_gem_retire_request(dev, request);
1709 list_del(&request->list);
1710 list_del(&request->client_list);
1711 kfree(request);
1712 } else
1713 break;
1717 void
1718 i915_gem_retire_work_handler(struct work_struct *work)
1720 drm_i915_private_t *dev_priv;
1721 struct drm_device *dev;
1723 dev_priv = container_of(work, drm_i915_private_t,
1724 mm.retire_work.work);
1725 dev = dev_priv->dev;
1727 mutex_lock(&dev->struct_mutex);
1728 i915_gem_retire_requests(dev);
1729 if (!dev_priv->mm.suspended &&
1730 !list_empty(&dev_priv->mm.request_list))
1731 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1732 mutex_unlock(&dev->struct_mutex);
1736 * Waits for a sequence number to be signaled, and cleans up the
1737 * request and object lists appropriately for that event.
1739 static int
1740 i915_wait_request(struct drm_device *dev, uint32_t seqno)
1742 drm_i915_private_t *dev_priv = dev->dev_private;
1743 u32 ier;
1744 int ret = 0;
1746 BUG_ON(seqno == 0);
1748 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1749 if (IS_IGDNG(dev))
1750 ier = I915_READ(DEIER) | I915_READ(GTIER);
1751 else
1752 ier = I915_READ(IER);
1753 if (!ier) {
1754 DRM_ERROR("something (likely vbetool) disabled "
1755 "interrupts, re-enabling\n");
1756 i915_driver_irq_preinstall(dev);
1757 i915_driver_irq_postinstall(dev);
1760 dev_priv->mm.waiting_gem_seqno = seqno;
1761 i915_user_irq_get(dev);
1762 ret = wait_event_interruptible(dev_priv->irq_queue,
1763 i915_seqno_passed(i915_get_gem_seqno(dev),
1764 seqno) ||
1765 dev_priv->mm.wedged);
1766 i915_user_irq_put(dev);
1767 dev_priv->mm.waiting_gem_seqno = 0;
1769 if (dev_priv->mm.wedged)
1770 ret = -EIO;
1772 if (ret && ret != -ERESTARTSYS)
1773 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1774 __func__, ret, seqno, i915_get_gem_seqno(dev));
1776 /* Directly dispatch request retiring. While we have the work queue
1777 * to handle this, the waiter on a request often wants an associated
1778 * buffer to have made it to the inactive list, and we would need
1779 * a separate wait queue to handle that.
1781 if (ret == 0)
1782 i915_gem_retire_requests(dev);
1784 return ret;
1787 static void
1788 i915_gem_flush(struct drm_device *dev,
1789 uint32_t invalidate_domains,
1790 uint32_t flush_domains)
1792 drm_i915_private_t *dev_priv = dev->dev_private;
1793 uint32_t cmd;
1794 RING_LOCALS;
1796 #if WATCH_EXEC
1797 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1798 invalidate_domains, flush_domains);
1799 #endif
1801 if (flush_domains & I915_GEM_DOMAIN_CPU)
1802 drm_agp_chipset_flush(dev);
1804 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
1806 * read/write caches:
1808 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1809 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1810 * also flushed at 2d versus 3d pipeline switches.
1812 * read-only caches:
1814 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1815 * MI_READ_FLUSH is set, and is always flushed on 965.
1817 * I915_GEM_DOMAIN_COMMAND may not exist?
1819 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1820 * invalidated when MI_EXE_FLUSH is set.
1822 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1823 * invalidated with every MI_FLUSH.
1825 * TLBs:
1827 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1828 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1829 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1830 * are flushed at any MI_FLUSH.
1833 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1834 if ((invalidate_domains|flush_domains) &
1835 I915_GEM_DOMAIN_RENDER)
1836 cmd &= ~MI_NO_WRITE_FLUSH;
1837 if (!IS_I965G(dev)) {
1839 * On the 965, the sampler cache always gets flushed
1840 * and this bit is reserved.
1842 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1843 cmd |= MI_READ_FLUSH;
1845 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1846 cmd |= MI_EXE_FLUSH;
1848 #if WATCH_EXEC
1849 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1850 #endif
1851 BEGIN_LP_RING(2);
1852 OUT_RING(cmd);
1853 OUT_RING(0); /* noop */
1854 ADVANCE_LP_RING();
1859 * Ensures that all rendering to the object has completed and the object is
1860 * safe to unbind from the GTT or access from the CPU.
1862 static int
1863 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1865 struct drm_device *dev = obj->dev;
1866 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1867 int ret;
1869 /* This function only exists to support waiting for existing rendering,
1870 * not for emitting required flushes.
1872 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
1874 /* If there is rendering queued on the buffer being evicted, wait for
1875 * it.
1877 if (obj_priv->active) {
1878 #if WATCH_BUF
1879 DRM_INFO("%s: object %p wait for seqno %08x\n",
1880 __func__, obj, obj_priv->last_rendering_seqno);
1881 #endif
1882 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1883 if (ret != 0)
1884 return ret;
1887 return 0;
1891 * Unbinds an object from the GTT aperture.
1894 i915_gem_object_unbind(struct drm_gem_object *obj)
1896 struct drm_device *dev = obj->dev;
1897 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1898 int ret = 0;
1900 #if WATCH_BUF
1901 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1902 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1903 #endif
1904 if (obj_priv->gtt_space == NULL)
1905 return 0;
1907 if (obj_priv->pin_count != 0) {
1908 DRM_ERROR("Attempting to unbind pinned buffer\n");
1909 return -EINVAL;
1912 /* Move the object to the CPU domain to ensure that
1913 * any possible CPU writes while it's not in the GTT
1914 * are flushed when we go to remap it. This will
1915 * also ensure that all pending GPU writes are finished
1916 * before we unbind.
1918 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1919 if (ret) {
1920 if (ret != -ERESTARTSYS)
1921 DRM_ERROR("set_domain failed: %d\n", ret);
1922 return ret;
1925 if (obj_priv->agp_mem != NULL) {
1926 drm_unbind_agp(obj_priv->agp_mem);
1927 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1928 obj_priv->agp_mem = NULL;
1931 BUG_ON(obj_priv->active);
1933 /* blow away mappings if mapped through GTT */
1934 i915_gem_release_mmap(obj);
1936 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1937 i915_gem_clear_fence_reg(obj);
1939 i915_gem_object_put_pages(obj);
1941 if (obj_priv->gtt_space) {
1942 atomic_dec(&dev->gtt_count);
1943 atomic_sub(obj->size, &dev->gtt_memory);
1945 drm_mm_put_block(obj_priv->gtt_space);
1946 obj_priv->gtt_space = NULL;
1949 /* Remove ourselves from the LRU list if present. */
1950 if (!list_empty(&obj_priv->list))
1951 list_del_init(&obj_priv->list);
1953 return 0;
1956 static int
1957 i915_gem_evict_something(struct drm_device *dev)
1959 drm_i915_private_t *dev_priv = dev->dev_private;
1960 struct drm_gem_object *obj;
1961 struct drm_i915_gem_object *obj_priv;
1962 int ret = 0;
1964 for (;;) {
1965 /* If there's an inactive buffer available now, grab it
1966 * and be done.
1968 if (!list_empty(&dev_priv->mm.inactive_list)) {
1969 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1970 struct drm_i915_gem_object,
1971 list);
1972 obj = obj_priv->obj;
1973 BUG_ON(obj_priv->pin_count != 0);
1974 #if WATCH_LRU
1975 DRM_INFO("%s: evicting %p\n", __func__, obj);
1976 #endif
1977 BUG_ON(obj_priv->active);
1979 /* Wait on the rendering and unbind the buffer. */
1980 ret = i915_gem_object_unbind(obj);
1981 break;
1984 /* If we didn't get anything, but the ring is still processing
1985 * things, wait for one of those things to finish and hopefully
1986 * leave us a buffer to evict.
1988 if (!list_empty(&dev_priv->mm.request_list)) {
1989 struct drm_i915_gem_request *request;
1991 request = list_first_entry(&dev_priv->mm.request_list,
1992 struct drm_i915_gem_request,
1993 list);
1995 ret = i915_wait_request(dev, request->seqno);
1996 if (ret)
1997 break;
1999 /* if waiting caused an object to become inactive,
2000 * then loop around and wait for it. Otherwise, we
2001 * assume that waiting freed and unbound something,
2002 * so there should now be some space in the GTT
2004 if (!list_empty(&dev_priv->mm.inactive_list))
2005 continue;
2006 break;
2009 /* If we didn't have anything on the request list but there
2010 * are buffers awaiting a flush, emit one and try again.
2011 * When we wait on it, those buffers waiting for that flush
2012 * will get moved to inactive.
2014 if (!list_empty(&dev_priv->mm.flushing_list)) {
2015 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
2016 struct drm_i915_gem_object,
2017 list);
2018 obj = obj_priv->obj;
2020 i915_gem_flush(dev,
2021 obj->write_domain,
2022 obj->write_domain);
2023 i915_add_request(dev, NULL, obj->write_domain);
2025 obj = NULL;
2026 continue;
2029 DRM_ERROR("inactive empty %d request empty %d "
2030 "flushing empty %d\n",
2031 list_empty(&dev_priv->mm.inactive_list),
2032 list_empty(&dev_priv->mm.request_list),
2033 list_empty(&dev_priv->mm.flushing_list));
2034 /* If we didn't do any of the above, there's nothing to be done
2035 * and we just can't fit it in.
2037 return -ENOSPC;
2039 return ret;
2042 static int
2043 i915_gem_evict_everything(struct drm_device *dev)
2045 int ret;
2047 for (;;) {
2048 ret = i915_gem_evict_something(dev);
2049 if (ret != 0)
2050 break;
2052 if (ret == -ENOSPC)
2053 return 0;
2054 return ret;
2058 i915_gem_object_get_pages(struct drm_gem_object *obj)
2060 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2061 int page_count, i;
2062 struct address_space *mapping;
2063 struct inode *inode;
2064 struct page *page;
2065 int ret;
2067 if (obj_priv->pages_refcount++ != 0)
2068 return 0;
2070 /* Get the list of pages out of our struct file. They'll be pinned
2071 * at this point until we release them.
2073 page_count = obj->size / PAGE_SIZE;
2074 BUG_ON(obj_priv->pages != NULL);
2075 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2076 if (obj_priv->pages == NULL) {
2077 DRM_ERROR("Faled to allocate page list\n");
2078 obj_priv->pages_refcount--;
2079 return -ENOMEM;
2082 inode = obj->filp->f_path.dentry->d_inode;
2083 mapping = inode->i_mapping;
2084 for (i = 0; i < page_count; i++) {
2085 page = read_mapping_page(mapping, i, NULL);
2086 if (IS_ERR(page)) {
2087 ret = PTR_ERR(page);
2088 DRM_ERROR("read_mapping_page failed: %d\n", ret);
2089 i915_gem_object_put_pages(obj);
2090 return ret;
2092 obj_priv->pages[i] = page;
2095 if (obj_priv->tiling_mode != I915_TILING_NONE)
2096 i915_gem_object_do_bit_17_swizzle(obj);
2098 return 0;
2101 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2103 struct drm_gem_object *obj = reg->obj;
2104 struct drm_device *dev = obj->dev;
2105 drm_i915_private_t *dev_priv = dev->dev_private;
2106 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2107 int regnum = obj_priv->fence_reg;
2108 uint64_t val;
2110 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2111 0xfffff000) << 32;
2112 val |= obj_priv->gtt_offset & 0xfffff000;
2113 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2114 if (obj_priv->tiling_mode == I915_TILING_Y)
2115 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2116 val |= I965_FENCE_REG_VALID;
2118 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2121 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2123 struct drm_gem_object *obj = reg->obj;
2124 struct drm_device *dev = obj->dev;
2125 drm_i915_private_t *dev_priv = dev->dev_private;
2126 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2127 int regnum = obj_priv->fence_reg;
2128 int tile_width;
2129 uint32_t fence_reg, val;
2130 uint32_t pitch_val;
2132 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2133 (obj_priv->gtt_offset & (obj->size - 1))) {
2134 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2135 __func__, obj_priv->gtt_offset, obj->size);
2136 return;
2139 if (obj_priv->tiling_mode == I915_TILING_Y &&
2140 HAS_128_BYTE_Y_TILING(dev))
2141 tile_width = 128;
2142 else
2143 tile_width = 512;
2145 /* Note: pitch better be a power of two tile widths */
2146 pitch_val = obj_priv->stride / tile_width;
2147 pitch_val = ffs(pitch_val) - 1;
2149 val = obj_priv->gtt_offset;
2150 if (obj_priv->tiling_mode == I915_TILING_Y)
2151 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2152 val |= I915_FENCE_SIZE_BITS(obj->size);
2153 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2154 val |= I830_FENCE_REG_VALID;
2156 if (regnum < 8)
2157 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2158 else
2159 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2160 I915_WRITE(fence_reg, val);
2163 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2165 struct drm_gem_object *obj = reg->obj;
2166 struct drm_device *dev = obj->dev;
2167 drm_i915_private_t *dev_priv = dev->dev_private;
2168 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2169 int regnum = obj_priv->fence_reg;
2170 uint32_t val;
2171 uint32_t pitch_val;
2172 uint32_t fence_size_bits;
2174 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2175 (obj_priv->gtt_offset & (obj->size - 1))) {
2176 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2177 __func__, obj_priv->gtt_offset);
2178 return;
2181 pitch_val = obj_priv->stride / 128;
2182 pitch_val = ffs(pitch_val) - 1;
2183 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2185 val = obj_priv->gtt_offset;
2186 if (obj_priv->tiling_mode == I915_TILING_Y)
2187 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2188 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2189 WARN_ON(fence_size_bits & ~0x00000f00);
2190 val |= fence_size_bits;
2191 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2192 val |= I830_FENCE_REG_VALID;
2194 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2198 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2199 * @obj: object to map through a fence reg
2201 * When mapping objects through the GTT, userspace wants to be able to write
2202 * to them without having to worry about swizzling if the object is tiled.
2204 * This function walks the fence regs looking for a free one for @obj,
2205 * stealing one if it can't find any.
2207 * It then sets up the reg based on the object's properties: address, pitch
2208 * and tiling format.
2211 i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2213 struct drm_device *dev = obj->dev;
2214 struct drm_i915_private *dev_priv = dev->dev_private;
2215 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2216 struct drm_i915_fence_reg *reg = NULL;
2217 struct drm_i915_gem_object *old_obj_priv = NULL;
2218 int i, ret, avail;
2220 /* Just update our place in the LRU if our fence is getting used. */
2221 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2222 list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2223 return 0;
2226 switch (obj_priv->tiling_mode) {
2227 case I915_TILING_NONE:
2228 WARN(1, "allocating a fence for non-tiled object?\n");
2229 break;
2230 case I915_TILING_X:
2231 if (!obj_priv->stride)
2232 return -EINVAL;
2233 WARN((obj_priv->stride & (512 - 1)),
2234 "object 0x%08x is X tiled but has non-512B pitch\n",
2235 obj_priv->gtt_offset);
2236 break;
2237 case I915_TILING_Y:
2238 if (!obj_priv->stride)
2239 return -EINVAL;
2240 WARN((obj_priv->stride & (128 - 1)),
2241 "object 0x%08x is Y tiled but has non-128B pitch\n",
2242 obj_priv->gtt_offset);
2243 break;
2246 /* First try to find a free reg */
2247 avail = 0;
2248 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2249 reg = &dev_priv->fence_regs[i];
2250 if (!reg->obj)
2251 break;
2253 old_obj_priv = reg->obj->driver_private;
2254 if (!old_obj_priv->pin_count)
2255 avail++;
2258 /* None available, try to steal one or wait for a user to finish */
2259 if (i == dev_priv->num_fence_regs) {
2260 struct drm_gem_object *old_obj = NULL;
2262 if (avail == 0)
2263 return -ENOSPC;
2265 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2266 fence_list) {
2267 old_obj = old_obj_priv->obj;
2269 if (old_obj_priv->pin_count)
2270 continue;
2272 /* Take a reference, as otherwise the wait_rendering
2273 * below may cause the object to get freed out from
2274 * under us.
2276 drm_gem_object_reference(old_obj);
2278 /* i915 uses fences for GPU access to tiled buffers */
2279 if (IS_I965G(dev) || !old_obj_priv->active)
2280 break;
2282 /* This brings the object to the head of the LRU if it
2283 * had been written to. The only way this should
2284 * result in us waiting longer than the expected
2285 * optimal amount of time is if there was a
2286 * fence-using buffer later that was read-only.
2288 i915_gem_object_flush_gpu_write_domain(old_obj);
2289 ret = i915_gem_object_wait_rendering(old_obj);
2290 if (ret != 0) {
2291 drm_gem_object_unreference(old_obj);
2292 return ret;
2295 break;
2299 * Zap this virtual mapping so we can set up a fence again
2300 * for this object next time we need it.
2302 i915_gem_release_mmap(old_obj);
2304 i = old_obj_priv->fence_reg;
2305 reg = &dev_priv->fence_regs[i];
2307 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2308 list_del_init(&old_obj_priv->fence_list);
2310 drm_gem_object_unreference(old_obj);
2313 obj_priv->fence_reg = i;
2314 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2316 reg->obj = obj;
2318 if (IS_I965G(dev))
2319 i965_write_fence_reg(reg);
2320 else if (IS_I9XX(dev))
2321 i915_write_fence_reg(reg);
2322 else
2323 i830_write_fence_reg(reg);
2325 return 0;
2329 * i915_gem_clear_fence_reg - clear out fence register info
2330 * @obj: object to clear
2332 * Zeroes out the fence register itself and clears out the associated
2333 * data structures in dev_priv and obj_priv.
2335 static void
2336 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2338 struct drm_device *dev = obj->dev;
2339 drm_i915_private_t *dev_priv = dev->dev_private;
2340 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2342 if (IS_I965G(dev))
2343 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2344 else {
2345 uint32_t fence_reg;
2347 if (obj_priv->fence_reg < 8)
2348 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2349 else
2350 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2351 8) * 4;
2353 I915_WRITE(fence_reg, 0);
2356 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2357 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2358 list_del_init(&obj_priv->fence_list);
2362 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2363 * to the buffer to finish, and then resets the fence register.
2364 * @obj: tiled object holding a fence register.
2366 * Zeroes out the fence register itself and clears out the associated
2367 * data structures in dev_priv and obj_priv.
2370 i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2372 struct drm_device *dev = obj->dev;
2373 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2375 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2376 return 0;
2378 /* On the i915, GPU access to tiled buffers is via a fence,
2379 * therefore we must wait for any outstanding access to complete
2380 * before clearing the fence.
2382 if (!IS_I965G(dev)) {
2383 int ret;
2385 i915_gem_object_flush_gpu_write_domain(obj);
2386 i915_gem_object_flush_gtt_write_domain(obj);
2387 ret = i915_gem_object_wait_rendering(obj);
2388 if (ret != 0)
2389 return ret;
2392 i915_gem_clear_fence_reg (obj);
2394 return 0;
2398 * Finds free space in the GTT aperture and binds the object there.
2400 static int
2401 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2403 struct drm_device *dev = obj->dev;
2404 drm_i915_private_t *dev_priv = dev->dev_private;
2405 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2406 struct drm_mm_node *free_space;
2407 int page_count, ret;
2409 if (dev_priv->mm.suspended)
2410 return -EBUSY;
2411 if (alignment == 0)
2412 alignment = i915_gem_get_gtt_alignment(obj);
2413 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
2414 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2415 return -EINVAL;
2418 search_free:
2419 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2420 obj->size, alignment, 0);
2421 if (free_space != NULL) {
2422 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2423 alignment);
2424 if (obj_priv->gtt_space != NULL) {
2425 obj_priv->gtt_space->private = obj;
2426 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2429 if (obj_priv->gtt_space == NULL) {
2430 bool lists_empty;
2432 /* If the gtt is empty and we're still having trouble
2433 * fitting our object in, we're out of memory.
2435 #if WATCH_LRU
2436 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2437 #endif
2438 spin_lock(&dev_priv->mm.active_list_lock);
2439 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2440 list_empty(&dev_priv->mm.flushing_list) &&
2441 list_empty(&dev_priv->mm.active_list));
2442 spin_unlock(&dev_priv->mm.active_list_lock);
2443 if (lists_empty) {
2444 DRM_ERROR("GTT full, but LRU list empty\n");
2445 return -ENOSPC;
2448 ret = i915_gem_evict_something(dev);
2449 if (ret != 0) {
2450 if (ret != -ERESTARTSYS)
2451 DRM_ERROR("Failed to evict a buffer %d\n", ret);
2452 return ret;
2454 goto search_free;
2457 #if WATCH_BUF
2458 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2459 obj->size, obj_priv->gtt_offset);
2460 #endif
2461 ret = i915_gem_object_get_pages(obj);
2462 if (ret) {
2463 drm_mm_put_block(obj_priv->gtt_space);
2464 obj_priv->gtt_space = NULL;
2465 return ret;
2468 page_count = obj->size / PAGE_SIZE;
2469 /* Create an AGP memory structure pointing at our pages, and bind it
2470 * into the GTT.
2472 obj_priv->agp_mem = drm_agp_bind_pages(dev,
2473 obj_priv->pages,
2474 page_count,
2475 obj_priv->gtt_offset,
2476 obj_priv->agp_type);
2477 if (obj_priv->agp_mem == NULL) {
2478 i915_gem_object_put_pages(obj);
2479 drm_mm_put_block(obj_priv->gtt_space);
2480 obj_priv->gtt_space = NULL;
2481 return -ENOMEM;
2483 atomic_inc(&dev->gtt_count);
2484 atomic_add(obj->size, &dev->gtt_memory);
2486 /* Assert that the object is not currently in any GPU domain. As it
2487 * wasn't in the GTT, there shouldn't be any way it could have been in
2488 * a GPU cache
2490 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2491 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2493 return 0;
2496 void
2497 i915_gem_clflush_object(struct drm_gem_object *obj)
2499 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2501 /* If we don't have a page list set up, then we're not pinned
2502 * to GPU, and we can ignore the cache flush because it'll happen
2503 * again at bind time.
2505 if (obj_priv->pages == NULL)
2506 return;
2508 /* XXX: The 865 in particular appears to be weird in how it handles
2509 * cache flushing. We haven't figured it out, but the
2510 * clflush+agp_chipset_flush doesn't appear to successfully get the
2511 * data visible to the PGU, while wbinvd + agp_chipset_flush does.
2513 if (IS_I865G(obj->dev)) {
2514 wbinvd();
2515 return;
2518 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2521 /** Flushes any GPU write domain for the object if it's dirty. */
2522 static void
2523 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2525 struct drm_device *dev = obj->dev;
2526 uint32_t seqno;
2528 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2529 return;
2531 /* Queue the GPU write cache flushing we need. */
2532 i915_gem_flush(dev, 0, obj->write_domain);
2533 seqno = i915_add_request(dev, NULL, obj->write_domain);
2534 obj->write_domain = 0;
2535 i915_gem_object_move_to_active(obj, seqno);
2538 /** Flushes the GTT write domain for the object if it's dirty. */
2539 static void
2540 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2542 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2543 return;
2545 /* No actual flushing is required for the GTT write domain. Writes
2546 * to it immediately go to main memory as far as we know, so there's
2547 * no chipset flush. It also doesn't land in render cache.
2549 obj->write_domain = 0;
2552 /** Flushes the CPU write domain for the object if it's dirty. */
2553 static void
2554 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2556 struct drm_device *dev = obj->dev;
2558 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2559 return;
2561 i915_gem_clflush_object(obj);
2562 drm_agp_chipset_flush(dev);
2563 obj->write_domain = 0;
2567 * Moves a single object to the GTT read, and possibly write domain.
2569 * This function returns when the move is complete, including waiting on
2570 * flushes to occur.
2573 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2575 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2576 int ret;
2578 /* Not valid to be called on unbound objects. */
2579 if (obj_priv->gtt_space == NULL)
2580 return -EINVAL;
2582 i915_gem_object_flush_gpu_write_domain(obj);
2583 /* Wait on any GPU rendering and flushing to occur. */
2584 ret = i915_gem_object_wait_rendering(obj);
2585 if (ret != 0)
2586 return ret;
2588 /* If we're writing through the GTT domain, then CPU and GPU caches
2589 * will need to be invalidated at next use.
2591 if (write)
2592 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2594 i915_gem_object_flush_cpu_write_domain(obj);
2596 /* It should now be out of any other write domains, and we can update
2597 * the domain values for our changes.
2599 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2600 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2601 if (write) {
2602 obj->write_domain = I915_GEM_DOMAIN_GTT;
2603 obj_priv->dirty = 1;
2606 return 0;
2610 * Moves a single object to the CPU read, and possibly write domain.
2612 * This function returns when the move is complete, including waiting on
2613 * flushes to occur.
2615 static int
2616 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2618 int ret;
2620 i915_gem_object_flush_gpu_write_domain(obj);
2621 /* Wait on any GPU rendering and flushing to occur. */
2622 ret = i915_gem_object_wait_rendering(obj);
2623 if (ret != 0)
2624 return ret;
2626 i915_gem_object_flush_gtt_write_domain(obj);
2628 /* If we have a partially-valid cache of the object in the CPU,
2629 * finish invalidating it and free the per-page flags.
2631 i915_gem_object_set_to_full_cpu_read_domain(obj);
2633 /* Flush the CPU cache if it's still invalid. */
2634 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2635 i915_gem_clflush_object(obj);
2637 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2640 /* It should now be out of any other write domains, and we can update
2641 * the domain values for our changes.
2643 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2645 /* If we're writing through the CPU, then the GPU read domains will
2646 * need to be invalidated at next use.
2648 if (write) {
2649 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2650 obj->write_domain = I915_GEM_DOMAIN_CPU;
2653 return 0;
2657 * Set the next domain for the specified object. This
2658 * may not actually perform the necessary flushing/invaliding though,
2659 * as that may want to be batched with other set_domain operations
2661 * This is (we hope) the only really tricky part of gem. The goal
2662 * is fairly simple -- track which caches hold bits of the object
2663 * and make sure they remain coherent. A few concrete examples may
2664 * help to explain how it works. For shorthand, we use the notation
2665 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2666 * a pair of read and write domain masks.
2668 * Case 1: the batch buffer
2670 * 1. Allocated
2671 * 2. Written by CPU
2672 * 3. Mapped to GTT
2673 * 4. Read by GPU
2674 * 5. Unmapped from GTT
2675 * 6. Freed
2677 * Let's take these a step at a time
2679 * 1. Allocated
2680 * Pages allocated from the kernel may still have
2681 * cache contents, so we set them to (CPU, CPU) always.
2682 * 2. Written by CPU (using pwrite)
2683 * The pwrite function calls set_domain (CPU, CPU) and
2684 * this function does nothing (as nothing changes)
2685 * 3. Mapped by GTT
2686 * This function asserts that the object is not
2687 * currently in any GPU-based read or write domains
2688 * 4. Read by GPU
2689 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2690 * As write_domain is zero, this function adds in the
2691 * current read domains (CPU+COMMAND, 0).
2692 * flush_domains is set to CPU.
2693 * invalidate_domains is set to COMMAND
2694 * clflush is run to get data out of the CPU caches
2695 * then i915_dev_set_domain calls i915_gem_flush to
2696 * emit an MI_FLUSH and drm_agp_chipset_flush
2697 * 5. Unmapped from GTT
2698 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2699 * flush_domains and invalidate_domains end up both zero
2700 * so no flushing/invalidating happens
2701 * 6. Freed
2702 * yay, done
2704 * Case 2: The shared render buffer
2706 * 1. Allocated
2707 * 2. Mapped to GTT
2708 * 3. Read/written by GPU
2709 * 4. set_domain to (CPU,CPU)
2710 * 5. Read/written by CPU
2711 * 6. Read/written by GPU
2713 * 1. Allocated
2714 * Same as last example, (CPU, CPU)
2715 * 2. Mapped to GTT
2716 * Nothing changes (assertions find that it is not in the GPU)
2717 * 3. Read/written by GPU
2718 * execbuffer calls set_domain (RENDER, RENDER)
2719 * flush_domains gets CPU
2720 * invalidate_domains gets GPU
2721 * clflush (obj)
2722 * MI_FLUSH and drm_agp_chipset_flush
2723 * 4. set_domain (CPU, CPU)
2724 * flush_domains gets GPU
2725 * invalidate_domains gets CPU
2726 * wait_rendering (obj) to make sure all drawing is complete.
2727 * This will include an MI_FLUSH to get the data from GPU
2728 * to memory
2729 * clflush (obj) to invalidate the CPU cache
2730 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2731 * 5. Read/written by CPU
2732 * cache lines are loaded and dirtied
2733 * 6. Read written by GPU
2734 * Same as last GPU access
2736 * Case 3: The constant buffer
2738 * 1. Allocated
2739 * 2. Written by CPU
2740 * 3. Read by GPU
2741 * 4. Updated (written) by CPU again
2742 * 5. Read by GPU
2744 * 1. Allocated
2745 * (CPU, CPU)
2746 * 2. Written by CPU
2747 * (CPU, CPU)
2748 * 3. Read by GPU
2749 * (CPU+RENDER, 0)
2750 * flush_domains = CPU
2751 * invalidate_domains = RENDER
2752 * clflush (obj)
2753 * MI_FLUSH
2754 * drm_agp_chipset_flush
2755 * 4. Updated (written) by CPU again
2756 * (CPU, CPU)
2757 * flush_domains = 0 (no previous write domain)
2758 * invalidate_domains = 0 (no new read domains)
2759 * 5. Read by GPU
2760 * (CPU+RENDER, 0)
2761 * flush_domains = CPU
2762 * invalidate_domains = RENDER
2763 * clflush (obj)
2764 * MI_FLUSH
2765 * drm_agp_chipset_flush
2767 static void
2768 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2770 struct drm_device *dev = obj->dev;
2771 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2772 uint32_t invalidate_domains = 0;
2773 uint32_t flush_domains = 0;
2775 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2776 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2778 #if WATCH_BUF
2779 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2780 __func__, obj,
2781 obj->read_domains, obj->pending_read_domains,
2782 obj->write_domain, obj->pending_write_domain);
2783 #endif
2785 * If the object isn't moving to a new write domain,
2786 * let the object stay in multiple read domains
2788 if (obj->pending_write_domain == 0)
2789 obj->pending_read_domains |= obj->read_domains;
2790 else
2791 obj_priv->dirty = 1;
2794 * Flush the current write domain if
2795 * the new read domains don't match. Invalidate
2796 * any read domains which differ from the old
2797 * write domain
2799 if (obj->write_domain &&
2800 obj->write_domain != obj->pending_read_domains) {
2801 flush_domains |= obj->write_domain;
2802 invalidate_domains |=
2803 obj->pending_read_domains & ~obj->write_domain;
2806 * Invalidate any read caches which may have
2807 * stale data. That is, any new read domains.
2809 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2810 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2811 #if WATCH_BUF
2812 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2813 __func__, flush_domains, invalidate_domains);
2814 #endif
2815 i915_gem_clflush_object(obj);
2818 /* The actual obj->write_domain will be updated with
2819 * pending_write_domain after we emit the accumulated flush for all
2820 * of our domain changes in execbuffers (which clears objects'
2821 * write_domains). So if we have a current write domain that we
2822 * aren't changing, set pending_write_domain to that.
2824 if (flush_domains == 0 && obj->pending_write_domain == 0)
2825 obj->pending_write_domain = obj->write_domain;
2826 obj->read_domains = obj->pending_read_domains;
2828 dev->invalidate_domains |= invalidate_domains;
2829 dev->flush_domains |= flush_domains;
2830 #if WATCH_BUF
2831 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2832 __func__,
2833 obj->read_domains, obj->write_domain,
2834 dev->invalidate_domains, dev->flush_domains);
2835 #endif
2839 * Moves the object from a partially CPU read to a full one.
2841 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2842 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2844 static void
2845 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2847 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2849 if (!obj_priv->page_cpu_valid)
2850 return;
2852 /* If we're partially in the CPU read domain, finish moving it in.
2854 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2855 int i;
2857 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2858 if (obj_priv->page_cpu_valid[i])
2859 continue;
2860 drm_clflush_pages(obj_priv->pages + i, 1);
2864 /* Free the page_cpu_valid mappings which are now stale, whether
2865 * or not we've got I915_GEM_DOMAIN_CPU.
2867 kfree(obj_priv->page_cpu_valid);
2868 obj_priv->page_cpu_valid = NULL;
2872 * Set the CPU read domain on a range of the object.
2874 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2875 * not entirely valid. The page_cpu_valid member of the object flags which
2876 * pages have been flushed, and will be respected by
2877 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2878 * of the whole object.
2880 * This function returns when the move is complete, including waiting on
2881 * flushes to occur.
2883 static int
2884 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2885 uint64_t offset, uint64_t size)
2887 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2888 int i, ret;
2890 if (offset == 0 && size == obj->size)
2891 return i915_gem_object_set_to_cpu_domain(obj, 0);
2893 i915_gem_object_flush_gpu_write_domain(obj);
2894 /* Wait on any GPU rendering and flushing to occur. */
2895 ret = i915_gem_object_wait_rendering(obj);
2896 if (ret != 0)
2897 return ret;
2898 i915_gem_object_flush_gtt_write_domain(obj);
2900 /* If we're already fully in the CPU read domain, we're done. */
2901 if (obj_priv->page_cpu_valid == NULL &&
2902 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
2903 return 0;
2905 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2906 * newly adding I915_GEM_DOMAIN_CPU
2908 if (obj_priv->page_cpu_valid == NULL) {
2909 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
2910 GFP_KERNEL);
2911 if (obj_priv->page_cpu_valid == NULL)
2912 return -ENOMEM;
2913 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2914 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
2916 /* Flush the cache on any pages that are still invalid from the CPU's
2917 * perspective.
2919 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2920 i++) {
2921 if (obj_priv->page_cpu_valid[i])
2922 continue;
2924 drm_clflush_pages(obj_priv->pages + i, 1);
2926 obj_priv->page_cpu_valid[i] = 1;
2929 /* It should now be out of any other write domains, and we can update
2930 * the domain values for our changes.
2932 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2934 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2936 return 0;
2940 * Pin an object to the GTT and evaluate the relocations landing in it.
2942 static int
2943 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2944 struct drm_file *file_priv,
2945 struct drm_i915_gem_exec_object *entry,
2946 struct drm_i915_gem_relocation_entry *relocs)
2948 struct drm_device *dev = obj->dev;
2949 drm_i915_private_t *dev_priv = dev->dev_private;
2950 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2951 int i, ret;
2952 void __iomem *reloc_page;
2954 /* Choose the GTT offset for our buffer and put it there. */
2955 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2956 if (ret)
2957 return ret;
2959 entry->offset = obj_priv->gtt_offset;
2961 /* Apply the relocations, using the GTT aperture to avoid cache
2962 * flushing requirements.
2964 for (i = 0; i < entry->relocation_count; i++) {
2965 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
2966 struct drm_gem_object *target_obj;
2967 struct drm_i915_gem_object *target_obj_priv;
2968 uint32_t reloc_val, reloc_offset;
2969 uint32_t __iomem *reloc_entry;
2971 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
2972 reloc->target_handle);
2973 if (target_obj == NULL) {
2974 i915_gem_object_unpin(obj);
2975 return -EBADF;
2977 target_obj_priv = target_obj->driver_private;
2979 /* The target buffer should have appeared before us in the
2980 * exec_object list, so it should have a GTT space bound by now.
2982 if (target_obj_priv->gtt_space == NULL) {
2983 DRM_ERROR("No GTT space found for object %d\n",
2984 reloc->target_handle);
2985 drm_gem_object_unreference(target_obj);
2986 i915_gem_object_unpin(obj);
2987 return -EINVAL;
2990 if (reloc->offset > obj->size - 4) {
2991 DRM_ERROR("Relocation beyond object bounds: "
2992 "obj %p target %d offset %d size %d.\n",
2993 obj, reloc->target_handle,
2994 (int) reloc->offset, (int) obj->size);
2995 drm_gem_object_unreference(target_obj);
2996 i915_gem_object_unpin(obj);
2997 return -EINVAL;
2999 if (reloc->offset & 3) {
3000 DRM_ERROR("Relocation not 4-byte aligned: "
3001 "obj %p target %d offset %d.\n",
3002 obj, reloc->target_handle,
3003 (int) reloc->offset);
3004 drm_gem_object_unreference(target_obj);
3005 i915_gem_object_unpin(obj);
3006 return -EINVAL;
3009 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3010 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3011 DRM_ERROR("reloc with read/write CPU domains: "
3012 "obj %p target %d offset %d "
3013 "read %08x write %08x",
3014 obj, reloc->target_handle,
3015 (int) reloc->offset,
3016 reloc->read_domains,
3017 reloc->write_domain);
3018 drm_gem_object_unreference(target_obj);
3019 i915_gem_object_unpin(obj);
3020 return -EINVAL;
3023 if (reloc->write_domain && target_obj->pending_write_domain &&
3024 reloc->write_domain != target_obj->pending_write_domain) {
3025 DRM_ERROR("Write domain conflict: "
3026 "obj %p target %d offset %d "
3027 "new %08x old %08x\n",
3028 obj, reloc->target_handle,
3029 (int) reloc->offset,
3030 reloc->write_domain,
3031 target_obj->pending_write_domain);
3032 drm_gem_object_unreference(target_obj);
3033 i915_gem_object_unpin(obj);
3034 return -EINVAL;
3037 #if WATCH_RELOC
3038 DRM_INFO("%s: obj %p offset %08x target %d "
3039 "read %08x write %08x gtt %08x "
3040 "presumed %08x delta %08x\n",
3041 __func__,
3042 obj,
3043 (int) reloc->offset,
3044 (int) reloc->target_handle,
3045 (int) reloc->read_domains,
3046 (int) reloc->write_domain,
3047 (int) target_obj_priv->gtt_offset,
3048 (int) reloc->presumed_offset,
3049 reloc->delta);
3050 #endif
3052 target_obj->pending_read_domains |= reloc->read_domains;
3053 target_obj->pending_write_domain |= reloc->write_domain;
3055 /* If the relocation already has the right value in it, no
3056 * more work needs to be done.
3058 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3059 drm_gem_object_unreference(target_obj);
3060 continue;
3063 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3064 if (ret != 0) {
3065 drm_gem_object_unreference(target_obj);
3066 i915_gem_object_unpin(obj);
3067 return -EINVAL;
3070 /* Map the page containing the relocation we're going to
3071 * perform.
3073 reloc_offset = obj_priv->gtt_offset + reloc->offset;
3074 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3075 (reloc_offset &
3076 ~(PAGE_SIZE - 1)));
3077 reloc_entry = (uint32_t __iomem *)(reloc_page +
3078 (reloc_offset & (PAGE_SIZE - 1)));
3079 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
3081 #if WATCH_BUF
3082 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3083 obj, (unsigned int) reloc->offset,
3084 readl(reloc_entry), reloc_val);
3085 #endif
3086 writel(reloc_val, reloc_entry);
3087 io_mapping_unmap_atomic(reloc_page);
3089 /* The updated presumed offset for this entry will be
3090 * copied back out to the user.
3092 reloc->presumed_offset = target_obj_priv->gtt_offset;
3094 drm_gem_object_unreference(target_obj);
3097 #if WATCH_BUF
3098 if (0)
3099 i915_gem_dump_object(obj, 128, __func__, ~0);
3100 #endif
3101 return 0;
3104 /** Dispatch a batchbuffer to the ring
3106 static int
3107 i915_dispatch_gem_execbuffer(struct drm_device *dev,
3108 struct drm_i915_gem_execbuffer *exec,
3109 struct drm_clip_rect *cliprects,
3110 uint64_t exec_offset)
3112 drm_i915_private_t *dev_priv = dev->dev_private;
3113 int nbox = exec->num_cliprects;
3114 int i = 0, count;
3115 uint32_t exec_start, exec_len;
3116 RING_LOCALS;
3118 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3119 exec_len = (uint32_t) exec->batch_len;
3121 count = nbox ? nbox : 1;
3123 for (i = 0; i < count; i++) {
3124 if (i < nbox) {
3125 int ret = i915_emit_box(dev, cliprects, i,
3126 exec->DR1, exec->DR4);
3127 if (ret)
3128 return ret;
3131 if (IS_I830(dev) || IS_845G(dev)) {
3132 BEGIN_LP_RING(4);
3133 OUT_RING(MI_BATCH_BUFFER);
3134 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3135 OUT_RING(exec_start + exec_len - 4);
3136 OUT_RING(0);
3137 ADVANCE_LP_RING();
3138 } else {
3139 BEGIN_LP_RING(2);
3140 if (IS_I965G(dev)) {
3141 OUT_RING(MI_BATCH_BUFFER_START |
3142 (2 << 6) |
3143 MI_BATCH_NON_SECURE_I965);
3144 OUT_RING(exec_start);
3145 } else {
3146 OUT_RING(MI_BATCH_BUFFER_START |
3147 (2 << 6));
3148 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3150 ADVANCE_LP_RING();
3154 /* XXX breadcrumb */
3155 return 0;
3158 /* Throttle our rendering by waiting until the ring has completed our requests
3159 * emitted over 20 msec ago.
3161 * Note that if we were to use the current jiffies each time around the loop,
3162 * we wouldn't escape the function with any frames outstanding if the time to
3163 * render a frame was over 20ms.
3165 * This should get us reasonable parallelism between CPU and GPU but also
3166 * relatively low latency when blocking on a particular request to finish.
3168 static int
3169 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3171 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3172 int ret = 0;
3173 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3175 mutex_lock(&dev->struct_mutex);
3176 while (!list_empty(&i915_file_priv->mm.request_list)) {
3177 struct drm_i915_gem_request *request;
3179 request = list_first_entry(&i915_file_priv->mm.request_list,
3180 struct drm_i915_gem_request,
3181 client_list);
3183 if (time_after_eq(request->emitted_jiffies, recent_enough))
3184 break;
3186 ret = i915_wait_request(dev, request->seqno);
3187 if (ret != 0)
3188 break;
3190 mutex_unlock(&dev->struct_mutex);
3192 return ret;
3195 static int
3196 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3197 uint32_t buffer_count,
3198 struct drm_i915_gem_relocation_entry **relocs)
3200 uint32_t reloc_count = 0, reloc_index = 0, i;
3201 int ret;
3203 *relocs = NULL;
3204 for (i = 0; i < buffer_count; i++) {
3205 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3206 return -EINVAL;
3207 reloc_count += exec_list[i].relocation_count;
3210 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3211 if (*relocs == NULL)
3212 return -ENOMEM;
3214 for (i = 0; i < buffer_count; i++) {
3215 struct drm_i915_gem_relocation_entry __user *user_relocs;
3217 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3219 ret = copy_from_user(&(*relocs)[reloc_index],
3220 user_relocs,
3221 exec_list[i].relocation_count *
3222 sizeof(**relocs));
3223 if (ret != 0) {
3224 drm_free_large(*relocs);
3225 *relocs = NULL;
3226 return -EFAULT;
3229 reloc_index += exec_list[i].relocation_count;
3232 return 0;
3235 static int
3236 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3237 uint32_t buffer_count,
3238 struct drm_i915_gem_relocation_entry *relocs)
3240 uint32_t reloc_count = 0, i;
3241 int ret = 0;
3243 for (i = 0; i < buffer_count; i++) {
3244 struct drm_i915_gem_relocation_entry __user *user_relocs;
3245 int unwritten;
3247 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3249 unwritten = copy_to_user(user_relocs,
3250 &relocs[reloc_count],
3251 exec_list[i].relocation_count *
3252 sizeof(*relocs));
3254 if (unwritten) {
3255 ret = -EFAULT;
3256 goto err;
3259 reloc_count += exec_list[i].relocation_count;
3262 err:
3263 drm_free_large(relocs);
3265 return ret;
3268 static int
3269 i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3270 uint64_t exec_offset)
3272 uint32_t exec_start, exec_len;
3274 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3275 exec_len = (uint32_t) exec->batch_len;
3277 if ((exec_start | exec_len) & 0x7)
3278 return -EINVAL;
3280 if (!exec_start)
3281 return -EINVAL;
3283 return 0;
3287 i915_gem_execbuffer(struct drm_device *dev, void *data,
3288 struct drm_file *file_priv)
3290 drm_i915_private_t *dev_priv = dev->dev_private;
3291 struct drm_i915_gem_execbuffer *args = data;
3292 struct drm_i915_gem_exec_object *exec_list = NULL;
3293 struct drm_gem_object **object_list = NULL;
3294 struct drm_gem_object *batch_obj;
3295 struct drm_i915_gem_object *obj_priv;
3296 struct drm_clip_rect *cliprects = NULL;
3297 struct drm_i915_gem_relocation_entry *relocs;
3298 int ret, ret2, i, pinned = 0;
3299 uint64_t exec_offset;
3300 uint32_t seqno, flush_domains, reloc_index;
3301 int pin_tries;
3303 #if WATCH_EXEC
3304 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3305 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3306 #endif
3308 if (args->buffer_count < 1) {
3309 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3310 return -EINVAL;
3312 /* Copy in the exec list from userland */
3313 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3314 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
3315 if (exec_list == NULL || object_list == NULL) {
3316 DRM_ERROR("Failed to allocate exec or object list "
3317 "for %d buffers\n",
3318 args->buffer_count);
3319 ret = -ENOMEM;
3320 goto pre_mutex_err;
3322 ret = copy_from_user(exec_list,
3323 (struct drm_i915_relocation_entry __user *)
3324 (uintptr_t) args->buffers_ptr,
3325 sizeof(*exec_list) * args->buffer_count);
3326 if (ret != 0) {
3327 DRM_ERROR("copy %d exec entries failed %d\n",
3328 args->buffer_count, ret);
3329 goto pre_mutex_err;
3332 if (args->num_cliprects != 0) {
3333 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3334 GFP_KERNEL);
3335 if (cliprects == NULL)
3336 goto pre_mutex_err;
3338 ret = copy_from_user(cliprects,
3339 (struct drm_clip_rect __user *)
3340 (uintptr_t) args->cliprects_ptr,
3341 sizeof(*cliprects) * args->num_cliprects);
3342 if (ret != 0) {
3343 DRM_ERROR("copy %d cliprects failed: %d\n",
3344 args->num_cliprects, ret);
3345 goto pre_mutex_err;
3349 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3350 &relocs);
3351 if (ret != 0)
3352 goto pre_mutex_err;
3354 mutex_lock(&dev->struct_mutex);
3356 i915_verify_inactive(dev, __FILE__, __LINE__);
3358 if (dev_priv->mm.wedged) {
3359 DRM_ERROR("Execbuf while wedged\n");
3360 mutex_unlock(&dev->struct_mutex);
3361 ret = -EIO;
3362 goto pre_mutex_err;
3365 if (dev_priv->mm.suspended) {
3366 DRM_ERROR("Execbuf while VT-switched.\n");
3367 mutex_unlock(&dev->struct_mutex);
3368 ret = -EBUSY;
3369 goto pre_mutex_err;
3372 /* Look up object handles */
3373 for (i = 0; i < args->buffer_count; i++) {
3374 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3375 exec_list[i].handle);
3376 if (object_list[i] == NULL) {
3377 DRM_ERROR("Invalid object handle %d at index %d\n",
3378 exec_list[i].handle, i);
3379 ret = -EBADF;
3380 goto err;
3383 obj_priv = object_list[i]->driver_private;
3384 if (obj_priv->in_execbuffer) {
3385 DRM_ERROR("Object %p appears more than once in object list\n",
3386 object_list[i]);
3387 ret = -EBADF;
3388 goto err;
3390 obj_priv->in_execbuffer = true;
3393 /* Pin and relocate */
3394 for (pin_tries = 0; ; pin_tries++) {
3395 ret = 0;
3396 reloc_index = 0;
3398 for (i = 0; i < args->buffer_count; i++) {
3399 object_list[i]->pending_read_domains = 0;
3400 object_list[i]->pending_write_domain = 0;
3401 ret = i915_gem_object_pin_and_relocate(object_list[i],
3402 file_priv,
3403 &exec_list[i],
3404 &relocs[reloc_index]);
3405 if (ret)
3406 break;
3407 pinned = i + 1;
3408 reloc_index += exec_list[i].relocation_count;
3410 /* success */
3411 if (ret == 0)
3412 break;
3414 /* error other than GTT full, or we've already tried again */
3415 if (ret != -ENOSPC || pin_tries >= 1) {
3416 if (ret != -ERESTARTSYS)
3417 DRM_ERROR("Failed to pin buffers %d\n", ret);
3418 goto err;
3421 /* unpin all of our buffers */
3422 for (i = 0; i < pinned; i++)
3423 i915_gem_object_unpin(object_list[i]);
3424 pinned = 0;
3426 /* evict everyone we can from the aperture */
3427 ret = i915_gem_evict_everything(dev);
3428 if (ret)
3429 goto err;
3432 /* Set the pending read domains for the batch buffer to COMMAND */
3433 batch_obj = object_list[args->buffer_count-1];
3434 if (batch_obj->pending_write_domain) {
3435 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3436 ret = -EINVAL;
3437 goto err;
3439 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3441 /* Sanity check the batch buffer, prior to moving objects */
3442 exec_offset = exec_list[args->buffer_count - 1].offset;
3443 ret = i915_gem_check_execbuffer (args, exec_offset);
3444 if (ret != 0) {
3445 DRM_ERROR("execbuf with invalid offset/length\n");
3446 goto err;
3449 i915_verify_inactive(dev, __FILE__, __LINE__);
3451 /* Zero the global flush/invalidate flags. These
3452 * will be modified as new domains are computed
3453 * for each object
3455 dev->invalidate_domains = 0;
3456 dev->flush_domains = 0;
3458 for (i = 0; i < args->buffer_count; i++) {
3459 struct drm_gem_object *obj = object_list[i];
3461 /* Compute new gpu domains and update invalidate/flush */
3462 i915_gem_object_set_to_gpu_domain(obj);
3465 i915_verify_inactive(dev, __FILE__, __LINE__);
3467 if (dev->invalidate_domains | dev->flush_domains) {
3468 #if WATCH_EXEC
3469 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3470 __func__,
3471 dev->invalidate_domains,
3472 dev->flush_domains);
3473 #endif
3474 i915_gem_flush(dev,
3475 dev->invalidate_domains,
3476 dev->flush_domains);
3477 if (dev->flush_domains)
3478 (void)i915_add_request(dev, file_priv,
3479 dev->flush_domains);
3482 for (i = 0; i < args->buffer_count; i++) {
3483 struct drm_gem_object *obj = object_list[i];
3485 obj->write_domain = obj->pending_write_domain;
3488 i915_verify_inactive(dev, __FILE__, __LINE__);
3490 #if WATCH_COHERENCY
3491 for (i = 0; i < args->buffer_count; i++) {
3492 i915_gem_object_check_coherency(object_list[i],
3493 exec_list[i].handle);
3495 #endif
3497 #if WATCH_EXEC
3498 i915_gem_dump_object(batch_obj,
3499 args->batch_len,
3500 __func__,
3501 ~0);
3502 #endif
3504 /* Exec the batchbuffer */
3505 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
3506 if (ret) {
3507 DRM_ERROR("dispatch failed %d\n", ret);
3508 goto err;
3512 * Ensure that the commands in the batch buffer are
3513 * finished before the interrupt fires
3515 flush_domains = i915_retire_commands(dev);
3517 i915_verify_inactive(dev, __FILE__, __LINE__);
3520 * Get a seqno representing the execution of the current buffer,
3521 * which we can wait on. We would like to mitigate these interrupts,
3522 * likely by only creating seqnos occasionally (so that we have
3523 * *some* interrupts representing completion of buffers that we can
3524 * wait on when trying to clear up gtt space).
3526 seqno = i915_add_request(dev, file_priv, flush_domains);
3527 BUG_ON(seqno == 0);
3528 for (i = 0; i < args->buffer_count; i++) {
3529 struct drm_gem_object *obj = object_list[i];
3531 i915_gem_object_move_to_active(obj, seqno);
3532 #if WATCH_LRU
3533 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3534 #endif
3536 #if WATCH_LRU
3537 i915_dump_lru(dev, __func__);
3538 #endif
3540 i915_verify_inactive(dev, __FILE__, __LINE__);
3542 err:
3543 for (i = 0; i < pinned; i++)
3544 i915_gem_object_unpin(object_list[i]);
3546 for (i = 0; i < args->buffer_count; i++) {
3547 if (object_list[i]) {
3548 obj_priv = object_list[i]->driver_private;
3549 obj_priv->in_execbuffer = false;
3551 drm_gem_object_unreference(object_list[i]);
3554 mutex_unlock(&dev->struct_mutex);
3556 if (!ret) {
3557 /* Copy the new buffer offsets back to the user's exec list. */
3558 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3559 (uintptr_t) args->buffers_ptr,
3560 exec_list,
3561 sizeof(*exec_list) * args->buffer_count);
3562 if (ret) {
3563 ret = -EFAULT;
3564 DRM_ERROR("failed to copy %d exec entries "
3565 "back to user (%d)\n",
3566 args->buffer_count, ret);
3570 /* Copy the updated relocations out regardless of current error
3571 * state. Failure to update the relocs would mean that the next
3572 * time userland calls execbuf, it would do so with presumed offset
3573 * state that didn't match the actual object state.
3575 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3576 relocs);
3577 if (ret2 != 0) {
3578 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3580 if (ret == 0)
3581 ret = ret2;
3584 pre_mutex_err:
3585 drm_free_large(object_list);
3586 drm_free_large(exec_list);
3587 kfree(cliprects);
3589 return ret;
3593 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3595 struct drm_device *dev = obj->dev;
3596 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3597 int ret;
3599 i915_verify_inactive(dev, __FILE__, __LINE__);
3600 if (obj_priv->gtt_space == NULL) {
3601 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3602 if (ret != 0) {
3603 if (ret != -EBUSY && ret != -ERESTARTSYS)
3604 DRM_ERROR("Failure to bind: %d\n", ret);
3605 return ret;
3609 * Pre-965 chips need a fence register set up in order to
3610 * properly handle tiled surfaces.
3612 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
3613 ret = i915_gem_object_get_fence_reg(obj);
3614 if (ret != 0) {
3615 if (ret != -EBUSY && ret != -ERESTARTSYS)
3616 DRM_ERROR("Failure to install fence: %d\n",
3617 ret);
3618 return ret;
3621 obj_priv->pin_count++;
3623 /* If the object is not active and not pending a flush,
3624 * remove it from the inactive list
3626 if (obj_priv->pin_count == 1) {
3627 atomic_inc(&dev->pin_count);
3628 atomic_add(obj->size, &dev->pin_memory);
3629 if (!obj_priv->active &&
3630 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
3631 !list_empty(&obj_priv->list))
3632 list_del_init(&obj_priv->list);
3634 i915_verify_inactive(dev, __FILE__, __LINE__);
3636 return 0;
3639 void
3640 i915_gem_object_unpin(struct drm_gem_object *obj)
3642 struct drm_device *dev = obj->dev;
3643 drm_i915_private_t *dev_priv = dev->dev_private;
3644 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3646 i915_verify_inactive(dev, __FILE__, __LINE__);
3647 obj_priv->pin_count--;
3648 BUG_ON(obj_priv->pin_count < 0);
3649 BUG_ON(obj_priv->gtt_space == NULL);
3651 /* If the object is no longer pinned, and is
3652 * neither active nor being flushed, then stick it on
3653 * the inactive list
3655 if (obj_priv->pin_count == 0) {
3656 if (!obj_priv->active &&
3657 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
3658 list_move_tail(&obj_priv->list,
3659 &dev_priv->mm.inactive_list);
3660 atomic_dec(&dev->pin_count);
3661 atomic_sub(obj->size, &dev->pin_memory);
3663 i915_verify_inactive(dev, __FILE__, __LINE__);
3667 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3668 struct drm_file *file_priv)
3670 struct drm_i915_gem_pin *args = data;
3671 struct drm_gem_object *obj;
3672 struct drm_i915_gem_object *obj_priv;
3673 int ret;
3675 mutex_lock(&dev->struct_mutex);
3677 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3678 if (obj == NULL) {
3679 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3680 args->handle);
3681 mutex_unlock(&dev->struct_mutex);
3682 return -EBADF;
3684 obj_priv = obj->driver_private;
3686 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3687 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3688 args->handle);
3689 drm_gem_object_unreference(obj);
3690 mutex_unlock(&dev->struct_mutex);
3691 return -EINVAL;
3694 obj_priv->user_pin_count++;
3695 obj_priv->pin_filp = file_priv;
3696 if (obj_priv->user_pin_count == 1) {
3697 ret = i915_gem_object_pin(obj, args->alignment);
3698 if (ret != 0) {
3699 drm_gem_object_unreference(obj);
3700 mutex_unlock(&dev->struct_mutex);
3701 return ret;
3705 /* XXX - flush the CPU caches for pinned objects
3706 * as the X server doesn't manage domains yet
3708 i915_gem_object_flush_cpu_write_domain(obj);
3709 args->offset = obj_priv->gtt_offset;
3710 drm_gem_object_unreference(obj);
3711 mutex_unlock(&dev->struct_mutex);
3713 return 0;
3717 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3718 struct drm_file *file_priv)
3720 struct drm_i915_gem_pin *args = data;
3721 struct drm_gem_object *obj;
3722 struct drm_i915_gem_object *obj_priv;
3724 mutex_lock(&dev->struct_mutex);
3726 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3727 if (obj == NULL) {
3728 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3729 args->handle);
3730 mutex_unlock(&dev->struct_mutex);
3731 return -EBADF;
3734 obj_priv = obj->driver_private;
3735 if (obj_priv->pin_filp != file_priv) {
3736 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3737 args->handle);
3738 drm_gem_object_unreference(obj);
3739 mutex_unlock(&dev->struct_mutex);
3740 return -EINVAL;
3742 obj_priv->user_pin_count--;
3743 if (obj_priv->user_pin_count == 0) {
3744 obj_priv->pin_filp = NULL;
3745 i915_gem_object_unpin(obj);
3748 drm_gem_object_unreference(obj);
3749 mutex_unlock(&dev->struct_mutex);
3750 return 0;
3754 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3755 struct drm_file *file_priv)
3757 struct drm_i915_gem_busy *args = data;
3758 struct drm_gem_object *obj;
3759 struct drm_i915_gem_object *obj_priv;
3761 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3762 if (obj == NULL) {
3763 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3764 args->handle);
3765 return -EBADF;
3768 mutex_lock(&dev->struct_mutex);
3769 /* Update the active list for the hardware's current position.
3770 * Otherwise this only updates on a delayed timer or when irqs are
3771 * actually unmasked, and our working set ends up being larger than
3772 * required.
3774 i915_gem_retire_requests(dev);
3776 obj_priv = obj->driver_private;
3777 /* Don't count being on the flushing list against the object being
3778 * done. Otherwise, a buffer left on the flushing list but not getting
3779 * flushed (because nobody's flushing that domain) won't ever return
3780 * unbusy and get reused by libdrm's bo cache. The other expected
3781 * consumer of this interface, OpenGL's occlusion queries, also specs
3782 * that the objects get unbusy "eventually" without any interference.
3784 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
3786 drm_gem_object_unreference(obj);
3787 mutex_unlock(&dev->struct_mutex);
3788 return 0;
3792 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3793 struct drm_file *file_priv)
3795 return i915_gem_ring_throttle(dev, file_priv);
3798 int i915_gem_init_object(struct drm_gem_object *obj)
3800 struct drm_i915_gem_object *obj_priv;
3802 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
3803 if (obj_priv == NULL)
3804 return -ENOMEM;
3807 * We've just allocated pages from the kernel,
3808 * so they've just been written by the CPU with
3809 * zeros. They'll need to be clflushed before we
3810 * use them with the GPU.
3812 obj->write_domain = I915_GEM_DOMAIN_CPU;
3813 obj->read_domains = I915_GEM_DOMAIN_CPU;
3815 obj_priv->agp_type = AGP_USER_MEMORY;
3817 obj->driver_private = obj_priv;
3818 obj_priv->obj = obj;
3819 obj_priv->fence_reg = I915_FENCE_REG_NONE;
3820 INIT_LIST_HEAD(&obj_priv->list);
3821 INIT_LIST_HEAD(&obj_priv->fence_list);
3823 return 0;
3826 void i915_gem_free_object(struct drm_gem_object *obj)
3828 struct drm_device *dev = obj->dev;
3829 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3831 while (obj_priv->pin_count > 0)
3832 i915_gem_object_unpin(obj);
3834 if (obj_priv->phys_obj)
3835 i915_gem_detach_phys_object(dev, obj);
3837 i915_gem_object_unbind(obj);
3839 if (obj_priv->mmap_offset)
3840 i915_gem_free_mmap_offset(obj);
3842 kfree(obj_priv->page_cpu_valid);
3843 kfree(obj_priv->bit_17);
3844 kfree(obj->driver_private);
3847 /** Unbinds all objects that are on the given buffer list. */
3848 static int
3849 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3851 struct drm_gem_object *obj;
3852 struct drm_i915_gem_object *obj_priv;
3853 int ret;
3855 while (!list_empty(head)) {
3856 obj_priv = list_first_entry(head,
3857 struct drm_i915_gem_object,
3858 list);
3859 obj = obj_priv->obj;
3861 if (obj_priv->pin_count != 0) {
3862 DRM_ERROR("Pinned object in unbind list\n");
3863 mutex_unlock(&dev->struct_mutex);
3864 return -EINVAL;
3867 ret = i915_gem_object_unbind(obj);
3868 if (ret != 0) {
3869 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3870 ret);
3871 mutex_unlock(&dev->struct_mutex);
3872 return ret;
3877 return 0;
3881 i915_gem_idle(struct drm_device *dev)
3883 drm_i915_private_t *dev_priv = dev->dev_private;
3884 uint32_t seqno, cur_seqno, last_seqno;
3885 int stuck, ret;
3887 mutex_lock(&dev->struct_mutex);
3889 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
3890 mutex_unlock(&dev->struct_mutex);
3891 return 0;
3894 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3895 * We need to replace this with a semaphore, or something.
3897 dev_priv->mm.suspended = 1;
3899 /* Cancel the retire work handler, wait for it to finish if running
3901 mutex_unlock(&dev->struct_mutex);
3902 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3903 mutex_lock(&dev->struct_mutex);
3905 i915_kernel_lost_context(dev);
3907 /* Flush the GPU along with all non-CPU write domains
3909 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
3910 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
3912 if (seqno == 0) {
3913 mutex_unlock(&dev->struct_mutex);
3914 return -ENOMEM;
3917 dev_priv->mm.waiting_gem_seqno = seqno;
3918 last_seqno = 0;
3919 stuck = 0;
3920 for (;;) {
3921 cur_seqno = i915_get_gem_seqno(dev);
3922 if (i915_seqno_passed(cur_seqno, seqno))
3923 break;
3924 if (last_seqno == cur_seqno) {
3925 if (stuck++ > 100) {
3926 DRM_ERROR("hardware wedged\n");
3927 dev_priv->mm.wedged = 1;
3928 DRM_WAKEUP(&dev_priv->irq_queue);
3929 break;
3932 msleep(10);
3933 last_seqno = cur_seqno;
3935 dev_priv->mm.waiting_gem_seqno = 0;
3937 i915_gem_retire_requests(dev);
3939 spin_lock(&dev_priv->mm.active_list_lock);
3940 if (!dev_priv->mm.wedged) {
3941 /* Active and flushing should now be empty as we've
3942 * waited for a sequence higher than any pending execbuffer
3944 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3945 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3946 /* Request should now be empty as we've also waited
3947 * for the last request in the list
3949 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3952 /* Empty the active and flushing lists to inactive. If there's
3953 * anything left at this point, it means that we're wedged and
3954 * nothing good's going to happen by leaving them there. So strip
3955 * the GPU domains and just stuff them onto inactive.
3957 while (!list_empty(&dev_priv->mm.active_list)) {
3958 struct drm_i915_gem_object *obj_priv;
3960 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3961 struct drm_i915_gem_object,
3962 list);
3963 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3964 i915_gem_object_move_to_inactive(obj_priv->obj);
3966 spin_unlock(&dev_priv->mm.active_list_lock);
3968 while (!list_empty(&dev_priv->mm.flushing_list)) {
3969 struct drm_i915_gem_object *obj_priv;
3971 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
3972 struct drm_i915_gem_object,
3973 list);
3974 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3975 i915_gem_object_move_to_inactive(obj_priv->obj);
3979 /* Move all inactive buffers out of the GTT. */
3980 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
3981 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
3982 if (ret) {
3983 mutex_unlock(&dev->struct_mutex);
3984 return ret;
3987 i915_gem_cleanup_ringbuffer(dev);
3988 mutex_unlock(&dev->struct_mutex);
3990 return 0;
3993 static int
3994 i915_gem_init_hws(struct drm_device *dev)
3996 drm_i915_private_t *dev_priv = dev->dev_private;
3997 struct drm_gem_object *obj;
3998 struct drm_i915_gem_object *obj_priv;
3999 int ret;
4001 /* If we need a physical address for the status page, it's already
4002 * initialized at driver load time.
4004 if (!I915_NEED_GFX_HWS(dev))
4005 return 0;
4007 obj = drm_gem_object_alloc(dev, 4096);
4008 if (obj == NULL) {
4009 DRM_ERROR("Failed to allocate status page\n");
4010 return -ENOMEM;
4012 obj_priv = obj->driver_private;
4013 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4015 ret = i915_gem_object_pin(obj, 4096);
4016 if (ret != 0) {
4017 drm_gem_object_unreference(obj);
4018 return ret;
4021 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
4023 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
4024 if (dev_priv->hw_status_page == NULL) {
4025 DRM_ERROR("Failed to map status page.\n");
4026 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4027 i915_gem_object_unpin(obj);
4028 drm_gem_object_unreference(obj);
4029 return -EINVAL;
4031 dev_priv->hws_obj = obj;
4032 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4033 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4034 I915_READ(HWS_PGA); /* posting read */
4035 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4037 return 0;
4040 static void
4041 i915_gem_cleanup_hws(struct drm_device *dev)
4043 drm_i915_private_t *dev_priv = dev->dev_private;
4044 struct drm_gem_object *obj;
4045 struct drm_i915_gem_object *obj_priv;
4047 if (dev_priv->hws_obj == NULL)
4048 return;
4050 obj = dev_priv->hws_obj;
4051 obj_priv = obj->driver_private;
4053 kunmap(obj_priv->pages[0]);
4054 i915_gem_object_unpin(obj);
4055 drm_gem_object_unreference(obj);
4056 dev_priv->hws_obj = NULL;
4058 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4059 dev_priv->hw_status_page = NULL;
4061 /* Write high address into HWS_PGA when disabling. */
4062 I915_WRITE(HWS_PGA, 0x1ffff000);
4066 i915_gem_init_ringbuffer(struct drm_device *dev)
4068 drm_i915_private_t *dev_priv = dev->dev_private;
4069 struct drm_gem_object *obj;
4070 struct drm_i915_gem_object *obj_priv;
4071 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
4072 int ret;
4073 u32 head;
4075 ret = i915_gem_init_hws(dev);
4076 if (ret != 0)
4077 return ret;
4079 obj = drm_gem_object_alloc(dev, 128 * 1024);
4080 if (obj == NULL) {
4081 DRM_ERROR("Failed to allocate ringbuffer\n");
4082 i915_gem_cleanup_hws(dev);
4083 return -ENOMEM;
4085 obj_priv = obj->driver_private;
4087 ret = i915_gem_object_pin(obj, 4096);
4088 if (ret != 0) {
4089 drm_gem_object_unreference(obj);
4090 i915_gem_cleanup_hws(dev);
4091 return ret;
4094 /* Set up the kernel mapping for the ring. */
4095 ring->Size = obj->size;
4096 ring->tail_mask = obj->size - 1;
4098 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
4099 ring->map.size = obj->size;
4100 ring->map.type = 0;
4101 ring->map.flags = 0;
4102 ring->map.mtrr = 0;
4104 drm_core_ioremap_wc(&ring->map, dev);
4105 if (ring->map.handle == NULL) {
4106 DRM_ERROR("Failed to map ringbuffer.\n");
4107 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4108 i915_gem_object_unpin(obj);
4109 drm_gem_object_unreference(obj);
4110 i915_gem_cleanup_hws(dev);
4111 return -EINVAL;
4113 ring->ring_obj = obj;
4114 ring->virtual_start = ring->map.handle;
4116 /* Stop the ring if it's running. */
4117 I915_WRITE(PRB0_CTL, 0);
4118 I915_WRITE(PRB0_TAIL, 0);
4119 I915_WRITE(PRB0_HEAD, 0);
4121 /* Initialize the ring. */
4122 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
4123 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4125 /* G45 ring initialization fails to reset head to zero */
4126 if (head != 0) {
4127 DRM_ERROR("Ring head not reset to zero "
4128 "ctl %08x head %08x tail %08x start %08x\n",
4129 I915_READ(PRB0_CTL),
4130 I915_READ(PRB0_HEAD),
4131 I915_READ(PRB0_TAIL),
4132 I915_READ(PRB0_START));
4133 I915_WRITE(PRB0_HEAD, 0);
4135 DRM_ERROR("Ring head forced to zero "
4136 "ctl %08x head %08x tail %08x start %08x\n",
4137 I915_READ(PRB0_CTL),
4138 I915_READ(PRB0_HEAD),
4139 I915_READ(PRB0_TAIL),
4140 I915_READ(PRB0_START));
4143 I915_WRITE(PRB0_CTL,
4144 ((obj->size - 4096) & RING_NR_PAGES) |
4145 RING_NO_REPORT |
4146 RING_VALID);
4148 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4150 /* If the head is still not zero, the ring is dead */
4151 if (head != 0) {
4152 DRM_ERROR("Ring initialization failed "
4153 "ctl %08x head %08x tail %08x start %08x\n",
4154 I915_READ(PRB0_CTL),
4155 I915_READ(PRB0_HEAD),
4156 I915_READ(PRB0_TAIL),
4157 I915_READ(PRB0_START));
4158 return -EIO;
4161 /* Update our cache of the ring state */
4162 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4163 i915_kernel_lost_context(dev);
4164 else {
4165 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4166 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4167 ring->space = ring->head - (ring->tail + 8);
4168 if (ring->space < 0)
4169 ring->space += ring->Size;
4172 return 0;
4175 void
4176 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4178 drm_i915_private_t *dev_priv = dev->dev_private;
4180 if (dev_priv->ring.ring_obj == NULL)
4181 return;
4183 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4185 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4186 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4187 dev_priv->ring.ring_obj = NULL;
4188 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4190 i915_gem_cleanup_hws(dev);
4194 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4195 struct drm_file *file_priv)
4197 drm_i915_private_t *dev_priv = dev->dev_private;
4198 int ret;
4200 if (drm_core_check_feature(dev, DRIVER_MODESET))
4201 return 0;
4203 if (dev_priv->mm.wedged) {
4204 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4205 dev_priv->mm.wedged = 0;
4208 mutex_lock(&dev->struct_mutex);
4209 dev_priv->mm.suspended = 0;
4211 ret = i915_gem_init_ringbuffer(dev);
4212 if (ret != 0) {
4213 mutex_unlock(&dev->struct_mutex);
4214 return ret;
4217 spin_lock(&dev_priv->mm.active_list_lock);
4218 BUG_ON(!list_empty(&dev_priv->mm.active_list));
4219 spin_unlock(&dev_priv->mm.active_list_lock);
4221 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4222 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4223 BUG_ON(!list_empty(&dev_priv->mm.request_list));
4224 mutex_unlock(&dev->struct_mutex);
4226 drm_irq_install(dev);
4228 return 0;
4232 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4233 struct drm_file *file_priv)
4235 if (drm_core_check_feature(dev, DRIVER_MODESET))
4236 return 0;
4238 drm_irq_uninstall(dev);
4239 return i915_gem_idle(dev);
4242 void
4243 i915_gem_lastclose(struct drm_device *dev)
4245 int ret;
4247 if (drm_core_check_feature(dev, DRIVER_MODESET))
4248 return;
4250 ret = i915_gem_idle(dev);
4251 if (ret)
4252 DRM_ERROR("failed to idle hardware: %d\n", ret);
4255 void
4256 i915_gem_load(struct drm_device *dev)
4258 int i;
4259 drm_i915_private_t *dev_priv = dev->dev_private;
4261 spin_lock_init(&dev_priv->mm.active_list_lock);
4262 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4263 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4264 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4265 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4266 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4267 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4268 i915_gem_retire_work_handler);
4269 dev_priv->mm.next_gem_seqno = 1;
4271 /* Old X drivers will take 0-2 for front, back, depth buffers */
4272 dev_priv->fence_reg_start = 3;
4274 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4275 dev_priv->num_fence_regs = 16;
4276 else
4277 dev_priv->num_fence_regs = 8;
4279 /* Initialize fence registers to zero */
4280 if (IS_I965G(dev)) {
4281 for (i = 0; i < 16; i++)
4282 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4283 } else {
4284 for (i = 0; i < 8; i++)
4285 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4286 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4287 for (i = 0; i < 8; i++)
4288 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4291 i915_gem_detect_bit_6_swizzle(dev);
4295 * Create a physically contiguous memory object for this object
4296 * e.g. for cursor + overlay regs
4298 int i915_gem_init_phys_object(struct drm_device *dev,
4299 int id, int size)
4301 drm_i915_private_t *dev_priv = dev->dev_private;
4302 struct drm_i915_gem_phys_object *phys_obj;
4303 int ret;
4305 if (dev_priv->mm.phys_objs[id - 1] || !size)
4306 return 0;
4308 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4309 if (!phys_obj)
4310 return -ENOMEM;
4312 phys_obj->id = id;
4314 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4315 if (!phys_obj->handle) {
4316 ret = -ENOMEM;
4317 goto kfree_obj;
4319 #ifdef CONFIG_X86
4320 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4321 #endif
4323 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4325 return 0;
4326 kfree_obj:
4327 kfree(phys_obj);
4328 return ret;
4331 void i915_gem_free_phys_object(struct drm_device *dev, int id)
4333 drm_i915_private_t *dev_priv = dev->dev_private;
4334 struct drm_i915_gem_phys_object *phys_obj;
4336 if (!dev_priv->mm.phys_objs[id - 1])
4337 return;
4339 phys_obj = dev_priv->mm.phys_objs[id - 1];
4340 if (phys_obj->cur_obj) {
4341 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4344 #ifdef CONFIG_X86
4345 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4346 #endif
4347 drm_pci_free(dev, phys_obj->handle);
4348 kfree(phys_obj);
4349 dev_priv->mm.phys_objs[id - 1] = NULL;
4352 void i915_gem_free_all_phys_object(struct drm_device *dev)
4354 int i;
4356 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4357 i915_gem_free_phys_object(dev, i);
4360 void i915_gem_detach_phys_object(struct drm_device *dev,
4361 struct drm_gem_object *obj)
4363 struct drm_i915_gem_object *obj_priv;
4364 int i;
4365 int ret;
4366 int page_count;
4368 obj_priv = obj->driver_private;
4369 if (!obj_priv->phys_obj)
4370 return;
4372 ret = i915_gem_object_get_pages(obj);
4373 if (ret)
4374 goto out;
4376 page_count = obj->size / PAGE_SIZE;
4378 for (i = 0; i < page_count; i++) {
4379 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
4380 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4382 memcpy(dst, src, PAGE_SIZE);
4383 kunmap_atomic(dst, KM_USER0);
4385 drm_clflush_pages(obj_priv->pages, page_count);
4386 drm_agp_chipset_flush(dev);
4388 i915_gem_object_put_pages(obj);
4389 out:
4390 obj_priv->phys_obj->cur_obj = NULL;
4391 obj_priv->phys_obj = NULL;
4395 i915_gem_attach_phys_object(struct drm_device *dev,
4396 struct drm_gem_object *obj, int id)
4398 drm_i915_private_t *dev_priv = dev->dev_private;
4399 struct drm_i915_gem_object *obj_priv;
4400 int ret = 0;
4401 int page_count;
4402 int i;
4404 if (id > I915_MAX_PHYS_OBJECT)
4405 return -EINVAL;
4407 obj_priv = obj->driver_private;
4409 if (obj_priv->phys_obj) {
4410 if (obj_priv->phys_obj->id == id)
4411 return 0;
4412 i915_gem_detach_phys_object(dev, obj);
4416 /* create a new object */
4417 if (!dev_priv->mm.phys_objs[id - 1]) {
4418 ret = i915_gem_init_phys_object(dev, id,
4419 obj->size);
4420 if (ret) {
4421 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4422 goto out;
4426 /* bind to the object */
4427 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4428 obj_priv->phys_obj->cur_obj = obj;
4430 ret = i915_gem_object_get_pages(obj);
4431 if (ret) {
4432 DRM_ERROR("failed to get page list\n");
4433 goto out;
4436 page_count = obj->size / PAGE_SIZE;
4438 for (i = 0; i < page_count; i++) {
4439 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
4440 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4442 memcpy(dst, src, PAGE_SIZE);
4443 kunmap_atomic(src, KM_USER0);
4446 i915_gem_object_put_pages(obj);
4448 return 0;
4449 out:
4450 return ret;
4453 static int
4454 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4455 struct drm_i915_gem_pwrite *args,
4456 struct drm_file *file_priv)
4458 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4459 void *obj_addr;
4460 int ret;
4461 char __user *user_data;
4463 user_data = (char __user *) (uintptr_t) args->data_ptr;
4464 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4466 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
4467 ret = copy_from_user(obj_addr, user_data, args->size);
4468 if (ret)
4469 return -EFAULT;
4471 drm_agp_chipset_flush(dev);
4472 return 0;
4475 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4477 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4479 /* Clean up our request list when the client is going away, so that
4480 * later retire_requests won't dereference our soon-to-be-gone
4481 * file_priv.
4483 mutex_lock(&dev->struct_mutex);
4484 while (!list_empty(&i915_file_priv->mm.request_list))
4485 list_del_init(i915_file_priv->mm.request_list.next);
4486 mutex_unlock(&dev->struct_mutex);