1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <linux/vmalloc.h>
32 #include <linux/sched.h>
33 #include <linux/highmem.h>
34 #include <linux/pagemap.h>
35 #include <linux/file.h>
36 #include <linux/swap.h>
37 #include "ttm/ttm_module.h"
38 #include "ttm/ttm_bo_driver.h"
39 #include "ttm/ttm_placement.h"
41 static int ttm_tt_swapin(struct ttm_tt
*ttm
);
43 #if defined(CONFIG_X86)
44 static void ttm_tt_clflush_page(struct page
*page
)
46 uint8_t *page_virtual
;
49 if (unlikely(page
== NULL
))
52 page_virtual
= kmap_atomic(page
, KM_USER0
);
54 for (i
= 0; i
< PAGE_SIZE
; i
+= boot_cpu_data
.x86_clflush_size
)
55 clflush(page_virtual
+ i
);
57 kunmap_atomic(page_virtual
, KM_USER0
);
60 static void ttm_tt_cache_flush_clflush(struct page
*pages
[],
61 unsigned long num_pages
)
66 for (i
= 0; i
< num_pages
; ++i
)
67 ttm_tt_clflush_page(*pages
++);
70 #elif !defined(__powerpc__)
71 static void ttm_tt_ipi_handler(void *null
)
77 void ttm_tt_cache_flush(struct page
*pages
[], unsigned long num_pages
)
80 #if defined(CONFIG_X86)
81 if (cpu_has_clflush
) {
82 ttm_tt_cache_flush_clflush(pages
, num_pages
);
85 #elif defined(__powerpc__)
88 for (i
= 0; i
< num_pages
; ++i
) {
90 unsigned long start
= (unsigned long)page_address(pages
[i
]);
91 flush_dcache_range(start
, start
+ PAGE_SIZE
);
95 if (on_each_cpu(ttm_tt_ipi_handler
, NULL
, 1) != 0)
96 printk(KERN_ERR TTM_PFX
97 "Timed out waiting for drm cache flush.\n");
102 * Allocates storage for pointers to the pages that back the ttm.
104 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
106 static void ttm_tt_alloc_page_directory(struct ttm_tt
*ttm
)
108 unsigned long size
= ttm
->num_pages
* sizeof(*ttm
->pages
);
111 if (size
<= PAGE_SIZE
)
112 ttm
->pages
= kzalloc(size
, GFP_KERNEL
);
115 ttm
->pages
= vmalloc_user(size
);
117 ttm
->page_flags
|= TTM_PAGE_FLAG_VMALLOC
;
121 static void ttm_tt_free_page_directory(struct ttm_tt
*ttm
)
123 if (ttm
->page_flags
& TTM_PAGE_FLAG_VMALLOC
) {
125 ttm
->page_flags
&= ~TTM_PAGE_FLAG_VMALLOC
;
132 static struct page
*ttm_tt_alloc_page(unsigned page_flags
)
134 if (page_flags
& TTM_PAGE_FLAG_ZERO_ALLOC
)
135 return alloc_page(GFP_HIGHUSER
| __GFP_ZERO
);
137 return alloc_page(GFP_HIGHUSER
);
140 static void ttm_tt_free_user_pages(struct ttm_tt
*ttm
)
146 struct ttm_backend
*be
= ttm
->be
;
148 BUG_ON(!(ttm
->page_flags
& TTM_PAGE_FLAG_USER
));
149 write
= ((ttm
->page_flags
& TTM_PAGE_FLAG_WRITE
) != 0);
150 dirty
= ((ttm
->page_flags
& TTM_PAGE_FLAG_USER_DIRTY
) != 0);
155 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
156 page
= ttm
->pages
[i
];
160 if (page
== ttm
->dummy_read_page
) {
165 if (write
&& dirty
&& !PageReserved(page
))
166 set_page_dirty_lock(page
);
168 ttm
->pages
[i
] = NULL
;
169 ttm_mem_global_free(ttm
->bdev
->mem_glob
, PAGE_SIZE
, false);
172 ttm
->state
= tt_unpopulated
;
173 ttm
->first_himem_page
= ttm
->num_pages
;
174 ttm
->last_lomem_page
= -1;
177 static struct page
*__ttm_tt_get_page(struct ttm_tt
*ttm
, int index
)
180 struct ttm_bo_device
*bdev
= ttm
->bdev
;
181 struct ttm_mem_global
*mem_glob
= bdev
->mem_glob
;
184 while (NULL
== (p
= ttm
->pages
[index
])) {
185 p
= ttm_tt_alloc_page(ttm
->page_flags
);
190 if (PageHighMem(p
)) {
192 ttm_mem_global_alloc(mem_glob
, PAGE_SIZE
,
194 if (unlikely(ret
!= 0))
196 ttm
->pages
[--ttm
->first_himem_page
] = p
;
199 ttm_mem_global_alloc(mem_glob
, PAGE_SIZE
,
200 false, false, false);
201 if (unlikely(ret
!= 0))
203 ttm
->pages
[++ttm
->last_lomem_page
] = p
;
212 struct page
*ttm_tt_get_page(struct ttm_tt
*ttm
, int index
)
216 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
217 ret
= ttm_tt_swapin(ttm
);
218 if (unlikely(ret
!= 0))
221 return __ttm_tt_get_page(ttm
, index
);
224 int ttm_tt_populate(struct ttm_tt
*ttm
)
228 struct ttm_backend
*be
;
231 if (ttm
->state
!= tt_unpopulated
)
234 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
235 ret
= ttm_tt_swapin(ttm
);
236 if (unlikely(ret
!= 0))
242 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
243 page
= __ttm_tt_get_page(ttm
, i
);
248 be
->func
->populate(be
, ttm
->num_pages
, ttm
->pages
,
249 ttm
->dummy_read_page
);
250 ttm
->state
= tt_unbound
;
255 static inline int ttm_tt_set_page_caching(struct page
*p
,
256 enum ttm_caching_state c_state
)
263 return set_pages_wb(p
, 1);
265 return set_memory_wc((unsigned long) page_address(p
), 1);
267 return set_pages_uc(p
, 1);
270 #else /* CONFIG_X86 */
271 static inline int ttm_tt_set_page_caching(struct page
*p
,
272 enum ttm_caching_state c_state
)
276 #endif /* CONFIG_X86 */
279 * Change caching policy for the linear kernel map
280 * for range of pages in a ttm.
283 static int ttm_tt_set_caching(struct ttm_tt
*ttm
,
284 enum ttm_caching_state c_state
)
287 struct page
*cur_page
;
290 if (ttm
->caching_state
== c_state
)
293 if (c_state
!= tt_cached
) {
294 ret
= ttm_tt_populate(ttm
);
295 if (unlikely(ret
!= 0))
299 if (ttm
->caching_state
== tt_cached
)
300 ttm_tt_cache_flush(ttm
->pages
, ttm
->num_pages
);
302 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
303 cur_page
= ttm
->pages
[i
];
304 if (likely(cur_page
!= NULL
)) {
305 ret
= ttm_tt_set_page_caching(cur_page
, c_state
);
306 if (unlikely(ret
!= 0))
311 ttm
->caching_state
= c_state
;
316 for (j
= 0; j
< i
; ++j
) {
317 cur_page
= ttm
->pages
[j
];
318 if (likely(cur_page
!= NULL
)) {
319 (void)ttm_tt_set_page_caching(cur_page
,
327 int ttm_tt_set_placement_caching(struct ttm_tt
*ttm
, uint32_t placement
)
329 enum ttm_caching_state state
;
331 if (placement
& TTM_PL_FLAG_WC
)
333 else if (placement
& TTM_PL_FLAG_UNCACHED
)
338 return ttm_tt_set_caching(ttm
, state
);
341 static void ttm_tt_free_alloced_pages(struct ttm_tt
*ttm
)
344 struct page
*cur_page
;
345 struct ttm_backend
*be
= ttm
->be
;
349 (void)ttm_tt_set_caching(ttm
, tt_cached
);
350 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
351 cur_page
= ttm
->pages
[i
];
352 ttm
->pages
[i
] = NULL
;
354 if (page_count(cur_page
) != 1)
355 printk(KERN_ERR TTM_PFX
356 "Erroneous page count. "
358 ttm_mem_global_free(ttm
->bdev
->mem_glob
, PAGE_SIZE
,
359 PageHighMem(cur_page
));
360 __free_page(cur_page
);
363 ttm
->state
= tt_unpopulated
;
364 ttm
->first_himem_page
= ttm
->num_pages
;
365 ttm
->last_lomem_page
= -1;
368 void ttm_tt_destroy(struct ttm_tt
*ttm
)
370 struct ttm_backend
*be
;
372 if (unlikely(ttm
== NULL
))
376 if (likely(be
!= NULL
)) {
377 be
->func
->destroy(be
);
381 if (likely(ttm
->pages
!= NULL
)) {
382 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
)
383 ttm_tt_free_user_pages(ttm
);
385 ttm_tt_free_alloced_pages(ttm
);
387 ttm_tt_free_page_directory(ttm
);
390 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTANT_SWAP
) &&
392 fput(ttm
->swap_storage
);
397 int ttm_tt_set_user(struct ttm_tt
*ttm
,
398 struct task_struct
*tsk
,
399 unsigned long start
, unsigned long num_pages
)
401 struct mm_struct
*mm
= tsk
->mm
;
403 int write
= (ttm
->page_flags
& TTM_PAGE_FLAG_WRITE
) != 0;
404 struct ttm_mem_global
*mem_glob
= ttm
->bdev
->mem_glob
;
406 BUG_ON(num_pages
!= ttm
->num_pages
);
407 BUG_ON((ttm
->page_flags
& TTM_PAGE_FLAG_USER
) == 0);
410 * Account user pages as lowmem pages for now.
413 ret
= ttm_mem_global_alloc(mem_glob
, num_pages
* PAGE_SIZE
,
414 false, false, false);
415 if (unlikely(ret
!= 0))
418 down_read(&mm
->mmap_sem
);
419 ret
= get_user_pages(tsk
, mm
, start
, num_pages
,
420 write
, 0, ttm
->pages
, NULL
);
421 up_read(&mm
->mmap_sem
);
423 if (ret
!= num_pages
&& write
) {
424 ttm_tt_free_user_pages(ttm
);
425 ttm_mem_global_free(mem_glob
, num_pages
* PAGE_SIZE
, false);
431 ttm
->state
= tt_unbound
;
436 struct ttm_tt
*ttm_tt_create(struct ttm_bo_device
*bdev
, unsigned long size
,
437 uint32_t page_flags
, struct page
*dummy_read_page
)
439 struct ttm_bo_driver
*bo_driver
= bdev
->driver
;
445 ttm
= kzalloc(sizeof(*ttm
), GFP_KERNEL
);
451 ttm
->num_pages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
452 ttm
->first_himem_page
= ttm
->num_pages
;
453 ttm
->last_lomem_page
= -1;
454 ttm
->caching_state
= tt_cached
;
455 ttm
->page_flags
= page_flags
;
457 ttm
->dummy_read_page
= dummy_read_page
;
459 ttm_tt_alloc_page_directory(ttm
);
462 printk(KERN_ERR TTM_PFX
"Failed allocating page table\n");
465 ttm
->be
= bo_driver
->create_ttm_backend_entry(bdev
);
468 printk(KERN_ERR TTM_PFX
"Failed creating ttm backend entry\n");
471 ttm
->state
= tt_unpopulated
;
475 void ttm_tt_unbind(struct ttm_tt
*ttm
)
478 struct ttm_backend
*be
= ttm
->be
;
480 if (ttm
->state
== tt_bound
) {
481 ret
= be
->func
->unbind(be
);
483 ttm
->state
= tt_unbound
;
487 int ttm_tt_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*bo_mem
)
490 struct ttm_backend
*be
;
495 if (ttm
->state
== tt_bound
)
500 ret
= ttm_tt_populate(ttm
);
504 ret
= be
->func
->bind(be
, bo_mem
);
506 printk(KERN_ERR TTM_PFX
"Couldn't bind backend.\n");
510 ttm
->state
= tt_bound
;
512 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
)
513 ttm
->page_flags
|= TTM_PAGE_FLAG_USER_DIRTY
;
516 EXPORT_SYMBOL(ttm_tt_bind
);
518 static int ttm_tt_swapin(struct ttm_tt
*ttm
)
520 struct address_space
*swap_space
;
521 struct file
*swap_storage
;
522 struct page
*from_page
;
523 struct page
*to_page
;
529 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
) {
530 ret
= ttm_tt_set_user(ttm
, ttm
->tsk
, ttm
->start
,
532 if (unlikely(ret
!= 0))
535 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SWAPPED
;
539 swap_storage
= ttm
->swap_storage
;
540 BUG_ON(swap_storage
== NULL
);
542 swap_space
= swap_storage
->f_path
.dentry
->d_inode
->i_mapping
;
544 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
545 from_page
= read_mapping_page(swap_space
, i
, NULL
);
546 if (IS_ERR(from_page
))
548 to_page
= __ttm_tt_get_page(ttm
, i
);
549 if (unlikely(to_page
== NULL
))
553 from_virtual
= kmap_atomic(from_page
, KM_USER0
);
554 to_virtual
= kmap_atomic(to_page
, KM_USER1
);
555 memcpy(to_virtual
, from_virtual
, PAGE_SIZE
);
556 kunmap_atomic(to_virtual
, KM_USER1
);
557 kunmap_atomic(from_virtual
, KM_USER0
);
559 page_cache_release(from_page
);
562 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTANT_SWAP
))
564 ttm
->swap_storage
= NULL
;
565 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SWAPPED
;
569 ttm_tt_free_alloced_pages(ttm
);
573 int ttm_tt_swapout(struct ttm_tt
*ttm
, struct file
*persistant_swap_storage
)
575 struct address_space
*swap_space
;
576 struct file
*swap_storage
;
577 struct page
*from_page
;
578 struct page
*to_page
;
583 BUG_ON(ttm
->state
!= tt_unbound
&& ttm
->state
!= tt_unpopulated
);
584 BUG_ON(ttm
->caching_state
!= tt_cached
);
587 * For user buffers, just unpin the pages, as there should be
591 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
) {
592 ttm_tt_free_user_pages(ttm
);
593 ttm
->page_flags
|= TTM_PAGE_FLAG_SWAPPED
;
594 ttm
->swap_storage
= NULL
;
598 if (!persistant_swap_storage
) {
599 swap_storage
= shmem_file_setup("ttm swap",
600 ttm
->num_pages
<< PAGE_SHIFT
,
602 if (unlikely(IS_ERR(swap_storage
))) {
603 printk(KERN_ERR
"Failed allocating swap storage.\n");
607 swap_storage
= persistant_swap_storage
;
609 swap_space
= swap_storage
->f_path
.dentry
->d_inode
->i_mapping
;
611 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
612 from_page
= ttm
->pages
[i
];
613 if (unlikely(from_page
== NULL
))
615 to_page
= read_mapping_page(swap_space
, i
, NULL
);
616 if (unlikely(to_page
== NULL
))
620 from_virtual
= kmap_atomic(from_page
, KM_USER0
);
621 to_virtual
= kmap_atomic(to_page
, KM_USER1
);
622 memcpy(to_virtual
, from_virtual
, PAGE_SIZE
);
623 kunmap_atomic(to_virtual
, KM_USER1
);
624 kunmap_atomic(from_virtual
, KM_USER0
);
626 set_page_dirty(to_page
);
627 mark_page_accessed(to_page
);
628 page_cache_release(to_page
);
631 ttm_tt_free_alloced_pages(ttm
);
632 ttm
->swap_storage
= swap_storage
;
633 ttm
->page_flags
|= TTM_PAGE_FLAG_SWAPPED
;
634 if (persistant_swap_storage
)
635 ttm
->page_flags
|= TTM_PAGE_FLAG_PERSISTANT_SWAP
;
639 if (!persistant_swap_storage
)