2 * Copyright (c) Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
28 * Copyright (c) 2013 The FreeBSD Foundation
29 * All rights reserved.
31 * Portions of this software were developed by Konstantin Belousov
32 * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
34 * $FreeBSD: head/sys/dev/drm2/ttm/ttm_page_alloc.c 247849 2013-03-05 16:15:34Z kib $
37 /* simple list based uncached page pool
38 * - Pool collects resently freed pages for reuse
39 * - Use page->lru to keep a free list
40 * - doesn't track currently in use pages
43 #define pr_fmt(fmt) "[TTM] " fmt
45 #include <sys/eventhandler.h>
48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_page_alloc.h>
55 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
56 #define SMALL_ALLOCATION 16
57 #define FREE_ALL_PAGES (~0U)
58 /* times are in msecs */
59 #define PAGE_FREE_INTERVAL 1000
62 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
64 * @lock: Protects the shared pool from concurrnet access. Must be used with
65 * irqsave/irqrestore variants because pool allocator maybe called from
67 * @fill_lock: Prevent concurrent calls to fill.
68 * @list: Pool of free uc/wc pages for fast reuse.
69 * @gfp_flags: Flags to pass for alloc_page.
70 * @npages: Number of pages in pool.
72 struct ttm_page_pool
{
77 int ttm_page_alloc_flags
;
81 unsigned long nrefills
;
85 * Limits for the pool. They are handled without locks because only place where
86 * they may change is in sysfs store. They won't have immediate effect anyway
87 * so forcing serialization to access them is pointless.
90 struct ttm_pool_opts
{
99 * struct ttm_pool_manager - Holds memory pools for fst allocation
101 * Manager is read only object for pool code so it doesn't need locking.
103 * @free_interval: minimum number of jiffies between freeing pages from pool.
104 * @page_alloc_inited: reference counting for pool allocation.
105 * @work: Work that is used to shrink the pool. Work is only run when there is
106 * some pages to free.
107 * @small_allocation: Limit in number of pages what is small allocation.
109 * @pools: All pool objects in use.
111 struct ttm_pool_manager
{
112 unsigned int kobj_ref
;
113 eventhandler_tag lowmem_handler
;
114 struct ttm_pool_opts options
;
117 struct ttm_page_pool u_pools
[NUM_POOLS
];
119 struct ttm_page_pool u_wc_pool
;
120 struct ttm_page_pool u_uc_pool
;
121 struct ttm_page_pool u_wc_pool_dma32
;
122 struct ttm_page_pool u_uc_pool_dma32
;
127 #define pools _u.u_pools
128 #define wc_pool _u._ut.u_wc_pool
129 #define uc_pool _u._ut.u_uc_pool
130 #define wc_pool_dma32 _u._ut.u_wc_pool_dma32
131 #define uc_pool_dma32 _u._ut.u_uc_pool_dma32
134 ttm_vm_page_free(struct page
*p
)
136 struct vm_page
*m
= (struct vm_page
*)p
;
138 KASSERT(m
->object
== NULL
, ("ttm page %p is owned", m
));
139 KASSERT(m
->wire_count
== 1, ("ttm lost wire %p", m
));
140 KASSERT((m
->flags
& PG_FICTITIOUS
) != 0, ("ttm lost fictitious %p", m
));
142 KASSERT((m
->oflags
& VPO_UNMANAGED
) == 0, ("ttm got unmanaged %p", m
));
143 m
->oflags
|= VPO_UNMANAGED
;
145 m
->flags
&= ~PG_FICTITIOUS
;
146 vm_page_busy_wait(m
, FALSE
, "ttmvpf");
148 vm_page_free_contig(m
, PAGE_SIZE
);
150 vm_page_unwire(m, 0);
156 ttm_caching_state_to_vm(enum ttm_caching_state cstate
)
161 return (VM_MEMATTR_UNCACHEABLE
);
163 return (VM_MEMATTR_WRITE_COMBINING
);
165 return (VM_MEMATTR_WRITE_BACK
);
167 panic("caching state %d\n", cstate
);
170 static void ttm_pool_kobj_release(struct ttm_pool_manager
*m
)
177 static ssize_t
ttm_pool_store(struct ttm_pool_manager
*m
,
178 struct attribute
*attr
, const char *buffer
, size_t size
)
182 chars
= sscanf(buffer
, "%u", &val
);
186 /* Convert kb to number of pages */
187 val
= val
/ (PAGE_SIZE
>> 10);
189 if (attr
== &ttm_page_pool_max
)
190 m
->options
.max_size
= val
;
191 else if (attr
== &ttm_page_pool_small
)
192 m
->options
.small
= val
;
193 else if (attr
== &ttm_page_pool_alloc_size
) {
194 if (val
> NUM_PAGES_TO_ALLOC
*8) {
195 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
196 NUM_PAGES_TO_ALLOC
*(PAGE_SIZE
>> 7),
197 NUM_PAGES_TO_ALLOC
*(PAGE_SIZE
>> 10));
199 } else if (val
> NUM_PAGES_TO_ALLOC
) {
200 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
201 NUM_PAGES_TO_ALLOC
*(PAGE_SIZE
>> 10));
203 m
->options
.alloc_size
= val
;
209 static ssize_t
ttm_pool_show(struct ttm_pool_manager
*m
,
210 struct attribute
*attr
, char *buffer
)
214 if (attr
== &ttm_page_pool_max
)
215 val
= m
->options
.max_size
;
216 else if (attr
== &ttm_page_pool_small
)
217 val
= m
->options
.small
;
218 else if (attr
== &ttm_page_pool_alloc_size
)
219 val
= m
->options
.alloc_size
;
221 val
= val
* (PAGE_SIZE
>> 10);
223 return snprintf(buffer
, PAGE_SIZE
, "%u\n", val
);
227 static struct ttm_pool_manager
*_manager
;
229 static int set_pages_array_wb(struct page
**pages
, int addrinarray
)
234 for (i
= 0; i
< addrinarray
; i
++) {
235 m
= (struct vm_page
*)pages
[i
];
237 unmap_page_from_agp(pages
[i
]);
239 pmap_page_set_memattr(m
, VM_MEMATTR_WRITE_BACK
);
244 static int set_pages_array_wc(struct page
**pages
, int addrinarray
)
249 for (i
= 0; i
< addrinarray
; i
++) {
250 m
= (struct vm_page
*)pages
[i
];
252 map_page_into_agp(pages
[i
]);
254 pmap_page_set_memattr(m
, VM_MEMATTR_WRITE_COMBINING
);
259 static int set_pages_array_uc(struct page
**pages
, int addrinarray
)
264 for (i
= 0; i
< addrinarray
; i
++) {
265 m
= (struct vm_page
*)pages
[i
];
267 map_page_into_agp(pages
[i
]);
269 pmap_page_set_memattr(m
, VM_MEMATTR_UNCACHEABLE
);
275 * Select the right pool or requested caching state and ttm flags. */
276 static struct ttm_page_pool
*ttm_get_pool(int flags
,
277 enum ttm_caching_state cstate
)
281 if (cstate
== tt_cached
)
289 if (flags
& TTM_PAGE_FLAG_DMA32
)
292 return &_manager
->pools
[pool_index
];
295 /* set memory back to wb and free the pages. */
296 static void ttm_pages_put(struct page
*pages
[], unsigned npages
)
300 /* Our VM handles vm memattr automatically on the page free. */
301 if (set_pages_array_wb(pages
, npages
))
302 pr_err("Failed to set %d pages to wb!\n", npages
);
303 for (i
= 0; i
< npages
; ++i
)
304 ttm_vm_page_free(pages
[i
]);
307 static void ttm_pool_update_free_locked(struct ttm_page_pool
*pool
,
308 unsigned freed_pages
)
310 pool
->npages
-= freed_pages
;
311 pool
->nfrees
+= freed_pages
;
315 * Free pages from pool.
317 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
318 * number of pages in one go.
320 * @pool: to free the pages from
321 * @free_all: If set to true will free all pages in pool
323 static int ttm_page_pool_free(struct ttm_page_pool
*pool
, unsigned nr_free
)
326 struct page
**pages_to_free
;
327 unsigned freed_pages
= 0,
328 npages_to_free
= nr_free
;
331 if (NUM_PAGES_TO_ALLOC
< nr_free
)
332 npages_to_free
= NUM_PAGES_TO_ALLOC
;
334 pages_to_free
= kmalloc(npages_to_free
* sizeof(struct page
*),
335 M_TEMP
, M_WAITOK
| M_ZERO
);
338 lockmgr(&pool
->lock
, LK_EXCLUSIVE
);
340 TAILQ_FOREACH_REVERSE_MUTABLE(p
, &pool
->list
, pglist
, pageq
, p1
) {
341 if (freed_pages
>= npages_to_free
)
344 pages_to_free
[freed_pages
++] = (struct page
*)p
;
345 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
346 if (freed_pages
>= NUM_PAGES_TO_ALLOC
) {
347 /* remove range of pages from the pool */
348 for (i
= 0; i
< freed_pages
; i
++)
349 TAILQ_REMOVE(&pool
->list
, (struct vm_page
*)pages_to_free
[i
], pageq
);
351 ttm_pool_update_free_locked(pool
, freed_pages
);
353 * Because changing page caching is costly
354 * we unlock the pool to prevent stalling.
356 lockmgr(&pool
->lock
, LK_RELEASE
);
358 ttm_pages_put(pages_to_free
, freed_pages
);
359 if (likely(nr_free
!= FREE_ALL_PAGES
))
360 nr_free
-= freed_pages
;
362 if (NUM_PAGES_TO_ALLOC
>= nr_free
)
363 npages_to_free
= nr_free
;
365 npages_to_free
= NUM_PAGES_TO_ALLOC
;
369 /* free all so restart the processing */
373 /* Not allowed to fall through or break because
374 * following context is inside spinlock while we are
382 /* remove range of pages from the pool */
384 for (i
= 0; i
< freed_pages
; i
++)
385 TAILQ_REMOVE(&pool
->list
, (struct vm_page
*)pages_to_free
[i
], pageq
);
387 ttm_pool_update_free_locked(pool
, freed_pages
);
388 nr_free
-= freed_pages
;
391 lockmgr(&pool
->lock
, LK_RELEASE
);
394 ttm_pages_put(pages_to_free
, freed_pages
);
396 drm_free(pages_to_free
, M_TEMP
);
400 /* Get good estimation how many pages are free in pools */
401 static int ttm_pool_get_num_unused_pages(void)
405 for (i
= 0; i
< NUM_POOLS
; ++i
)
406 total
+= _manager
->pools
[i
].npages
;
412 * Callback for mm to request pool to reduce number of page held.
414 static int ttm_pool_mm_shrink(void *arg
)
416 static unsigned int start_pool
= 0;
418 unsigned pool_offset
= atomic_fetchadd_int(&start_pool
, 1);
419 struct ttm_page_pool
*pool
;
420 int shrink_pages
= 100; /* XXXKIB */
422 pool_offset
= pool_offset
% NUM_POOLS
;
423 /* select start pool in round robin fashion */
424 for (i
= 0; i
< NUM_POOLS
; ++i
) {
425 unsigned nr_free
= shrink_pages
;
426 if (shrink_pages
== 0)
428 pool
= &_manager
->pools
[(i
+ pool_offset
)%NUM_POOLS
];
429 shrink_pages
= ttm_page_pool_free(pool
, nr_free
);
431 /* return estimated number of unused pages in pool */
432 return ttm_pool_get_num_unused_pages();
435 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager
*manager
)
438 manager
->lowmem_handler
= EVENTHANDLER_REGISTER(vm_lowmem
,
439 ttm_pool_mm_shrink
, manager
, EVENTHANDLER_PRI_ANY
);
442 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager
*manager
)
445 EVENTHANDLER_DEREGISTER(vm_lowmem
, manager
->lowmem_handler
);
448 static int ttm_set_pages_caching(struct page
**pages
,
449 enum ttm_caching_state cstate
, unsigned cpages
)
452 /* Set page caching */
455 r
= set_pages_array_uc(pages
, cpages
);
457 pr_err("Failed to set %d pages to uc!\n", cpages
);
460 r
= set_pages_array_wc(pages
, cpages
);
462 pr_err("Failed to set %d pages to wc!\n", cpages
);
471 * Free pages the pages that failed to change the caching state. If there is
472 * any pages that have changed their caching state already put them to the
475 static void ttm_handle_caching_state_failure(struct pglist
*pages
,
476 int ttm_flags
, enum ttm_caching_state cstate
,
477 struct page
**failed_pages
, unsigned cpages
)
480 /* Failed pages have to be freed */
481 for (i
= 0; i
< cpages
; ++i
) {
482 TAILQ_REMOVE(pages
, (struct vm_page
*)failed_pages
[i
], pageq
);
483 ttm_vm_page_free(failed_pages
[i
]);
488 * Allocate new pages with correct caching.
490 * This function is reentrant if caller updates count depending on number of
491 * pages returned in pages array.
493 static int ttm_alloc_new_pages(struct pglist
*pages
, int ttm_alloc_flags
,
494 int ttm_flags
, enum ttm_caching_state cstate
, unsigned count
)
496 struct page
**caching_array
;
499 unsigned i
, cpages
, aflags
;
500 unsigned max_cpages
= min(count
,
501 (unsigned)(PAGE_SIZE
/sizeof(vm_page_t
)));
503 aflags
= VM_ALLOC_NORMAL
|
504 ((ttm_alloc_flags
& TTM_PAGE_FLAG_ZERO_ALLOC
) != 0 ?
507 /* allocate array for page caching change */
508 caching_array
= kmalloc(max_cpages
* sizeof(vm_page_t
), M_TEMP
,
511 for (i
= 0, cpages
= 0; i
< count
; ++i
) {
512 p
= vm_page_alloc_contig(0,
513 (ttm_alloc_flags
& TTM_PAGE_FLAG_DMA32
) ? 0xffffffff :
514 VM_MAX_ADDRESS
, PAGE_SIZE
, 0,
515 1*PAGE_SIZE
, ttm_caching_state_to_vm(cstate
));
517 pr_err("Unable to get page %u\n", i
);
519 /* store already allocated pages in the pool after
520 * setting the caching state */
522 r
= ttm_set_pages_caching(caching_array
,
525 ttm_handle_caching_state_failure(pages
,
527 caching_array
, cpages
);
533 p
->oflags
&= ~VPO_UNMANAGED
;
535 p
->flags
|= PG_FICTITIOUS
;
537 #ifdef CONFIG_HIGHMEM /* KIB: nop */
538 /* gfp flags of highmem page should never be dma32 so we
539 * we should be fine in such case
544 caching_array
[cpages
++] = (struct page
*)p
;
545 if (cpages
== max_cpages
) {
547 r
= ttm_set_pages_caching(caching_array
,
550 ttm_handle_caching_state_failure(pages
,
552 caching_array
, cpages
);
559 TAILQ_INSERT_HEAD(pages
, p
, pageq
);
563 r
= ttm_set_pages_caching(caching_array
, cstate
, cpages
);
565 ttm_handle_caching_state_failure(pages
,
567 caching_array
, cpages
);
570 drm_free(caching_array
, M_TEMP
);
576 * Fill the given pool if there aren't enough pages and the requested number of
579 static void ttm_page_pool_fill_locked(struct ttm_page_pool
*pool
,
580 int ttm_flags
, enum ttm_caching_state cstate
, unsigned count
)
586 * Only allow one pool fill operation at a time.
587 * If pool doesn't have enough pages for the allocation new pages are
588 * allocated from outside of pool.
593 pool
->fill_lock
= true;
595 /* If allocation request is small and there are not enough
596 * pages in a pool we fill the pool up first. */
597 if (count
< _manager
->options
.small
598 && count
> pool
->npages
) {
599 struct pglist new_pages
;
600 unsigned alloc_size
= _manager
->options
.alloc_size
;
603 * Can't change page caching if in irqsave context. We have to
604 * drop the pool->lock.
606 lockmgr(&pool
->lock
, LK_RELEASE
);
608 TAILQ_INIT(&new_pages
);
609 r
= ttm_alloc_new_pages(&new_pages
, pool
->ttm_page_alloc_flags
,
610 ttm_flags
, cstate
, alloc_size
);
611 lockmgr(&pool
->lock
, LK_EXCLUSIVE
);
614 TAILQ_CONCAT(&pool
->list
, &new_pages
, pageq
);
616 pool
->npages
+= alloc_size
;
618 pr_err("Failed to fill pool (%p)\n", pool
);
619 /* If we have any pages left put them to the pool. */
620 TAILQ_FOREACH(p
, &pool
->list
, pageq
) {
623 TAILQ_CONCAT(&pool
->list
, &new_pages
, pageq
);
624 pool
->npages
+= cpages
;
628 pool
->fill_lock
= false;
632 * Cut 'count' number of pages from the pool and put them on the return list.
634 * @return count of pages still required to fulfill the request.
636 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool
*pool
,
637 struct pglist
*pages
,
639 enum ttm_caching_state cstate
,
645 lockmgr(&pool
->lock
, LK_EXCLUSIVE
);
646 ttm_page_pool_fill_locked(pool
, ttm_flags
, cstate
, count
);
648 if (count
>= pool
->npages
) {
649 /* take all pages from the pool */
650 TAILQ_CONCAT(pages
, &pool
->list
, pageq
);
651 count
-= pool
->npages
;
655 for (i
= 0; i
< count
; i
++) {
656 p
= TAILQ_FIRST(&pool
->list
);
657 TAILQ_REMOVE(&pool
->list
, p
, pageq
);
658 TAILQ_INSERT_TAIL(pages
, p
, pageq
);
660 pool
->npages
-= count
;
663 lockmgr(&pool
->lock
, LK_RELEASE
);
667 /* Put all pages in pages list to correct pool to wait for reuse */
668 static void ttm_put_pages(struct page
**pages
, unsigned npages
, int flags
,
669 enum ttm_caching_state cstate
)
671 struct ttm_page_pool
*pool
= ttm_get_pool(flags
, cstate
);
673 struct vm_page
*page
;
676 /* No pool for this memory type so free the pages */
677 for (i
= 0; i
< npages
; i
++) {
679 ttm_vm_page_free(pages
[i
]);
686 lockmgr(&pool
->lock
, LK_EXCLUSIVE
);
687 for (i
= 0; i
< npages
; i
++) {
689 page
= (struct vm_page
*)pages
[i
];
690 TAILQ_INSERT_TAIL(&pool
->list
, page
, pageq
);
695 /* Check that we don't go over the pool limit */
697 if (pool
->npages
> _manager
->options
.max_size
) {
698 npages
= pool
->npages
- _manager
->options
.max_size
;
699 /* free at least NUM_PAGES_TO_ALLOC number of pages
700 * to reduce calls to set_memory_wb */
701 if (npages
< NUM_PAGES_TO_ALLOC
)
702 npages
= NUM_PAGES_TO_ALLOC
;
704 lockmgr(&pool
->lock
, LK_RELEASE
);
706 ttm_page_pool_free(pool
, npages
);
710 * On success pages list will hold count number of correctly
713 static int ttm_get_pages(struct page
**pages
, unsigned npages
, int flags
,
714 enum ttm_caching_state cstate
)
716 struct ttm_page_pool
*pool
= ttm_get_pool(flags
, cstate
);
718 struct vm_page
*p
= NULL
;
719 int gfp_flags
, aflags
;
723 aflags
= VM_ALLOC_NORMAL
|
724 ((flags
& TTM_PAGE_FLAG_ZERO_ALLOC
) != 0 ? VM_ALLOC_ZERO
: 0);
726 /* No pool for cached pages */
728 for (r
= 0; r
< npages
; ++r
) {
729 p
= vm_page_alloc_contig(0,
730 (flags
& TTM_PAGE_FLAG_DMA32
) ? 0xffffffff :
731 VM_MAX_ADDRESS
, PAGE_SIZE
,
732 0, 1*PAGE_SIZE
, ttm_caching_state_to_vm(cstate
));
734 pr_err("Unable to allocate page\n");
738 p
->oflags
&= ~VPO_UNMANAGED
;
740 p
->flags
|= PG_FICTITIOUS
;
741 pages
[r
] = (struct page
*)p
;
746 /* combine zero flag to pool flags */
747 gfp_flags
= flags
| pool
->ttm_page_alloc_flags
;
749 /* First we take pages from the pool */
751 npages
= ttm_page_pool_get_pages(pool
, &plist
, flags
, cstate
, npages
);
753 TAILQ_FOREACH(p
, &plist
, pageq
) {
754 pages
[count
++] = (struct page
*)p
;
757 /* clear the pages coming from the pool if requested */
758 if (flags
& TTM_PAGE_FLAG_ZERO_ALLOC
) {
759 TAILQ_FOREACH(p
, &plist
, pageq
) {
760 pmap_zero_page(VM_PAGE_TO_PHYS(p
));
764 /* If pool didn't have enough pages allocate new one. */
766 /* ttm_alloc_new_pages doesn't reference pool so we can run
767 * multiple requests in parallel.
770 r
= ttm_alloc_new_pages(&plist
, gfp_flags
, flags
, cstate
,
772 TAILQ_FOREACH(p
, &plist
, pageq
) {
773 pages
[count
++] = (struct page
*)p
;
776 /* If there is any pages in the list put them back to
778 pr_err("Failed to allocate extra pages for large request\n");
779 ttm_put_pages(pages
, count
, flags
, cstate
);
787 static void ttm_page_pool_init_locked(struct ttm_page_pool
*pool
, gfp_t flags
,
790 lockinit(&pool
->lock
, "ttmpool", 0, LK_CANRECURSE
);
791 pool
->fill_lock
= false;
792 TAILQ_INIT(&pool
->list
);
793 pool
->npages
= pool
->nfrees
= 0;
794 pool
->ttm_page_alloc_flags
= flags
;
798 int ttm_page_alloc_init(struct ttm_mem_global
*glob
, unsigned max_pages
)
802 pr_info("Initializing pool allocator\n");
804 _manager
= kzalloc(sizeof(*_manager
), GFP_KERNEL
);
806 ttm_page_pool_init_locked(&_manager
->wc_pool
, 0, "wc");
807 ttm_page_pool_init_locked(&_manager
->uc_pool
, 0, "uc");
808 ttm_page_pool_init_locked(&_manager
->wc_pool_dma32
,
809 TTM_PAGE_FLAG_DMA32
, "wc dma");
810 ttm_page_pool_init_locked(&_manager
->uc_pool_dma32
,
811 TTM_PAGE_FLAG_DMA32
, "uc dma");
813 _manager
->options
.max_size
= max_pages
;
814 _manager
->options
.small
= SMALL_ALLOCATION
;
815 _manager
->options
.alloc_size
= NUM_PAGES_TO_ALLOC
;
817 refcount_init(&_manager
->kobj_ref
, 1);
818 ttm_pool_mm_shrink_init(_manager
);
823 void ttm_page_alloc_fini(void)
827 pr_info("Finalizing pool allocator\n");
828 ttm_pool_mm_shrink_fini(_manager
);
830 for (i
= 0; i
< NUM_POOLS
; ++i
)
831 ttm_page_pool_free(&_manager
->pools
[i
], FREE_ALL_PAGES
);
833 if (refcount_release(&_manager
->kobj_ref
))
834 ttm_pool_kobj_release(_manager
);
838 int ttm_pool_populate(struct ttm_tt
*ttm
)
840 struct ttm_mem_global
*mem_glob
= ttm
->glob
->mem_glob
;
844 if (ttm
->state
!= tt_unpopulated
)
847 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
848 ret
= ttm_get_pages(&ttm
->pages
[i
], 1,
852 ttm_pool_unpopulate(ttm
);
856 ret
= ttm_mem_global_alloc_page(mem_glob
, ttm
->pages
[i
],
858 if (unlikely(ret
!= 0)) {
859 ttm_pool_unpopulate(ttm
);
864 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
865 ret
= ttm_tt_swapin(ttm
);
866 if (unlikely(ret
!= 0)) {
867 ttm_pool_unpopulate(ttm
);
872 ttm
->state
= tt_unbound
;
876 void ttm_pool_unpopulate(struct ttm_tt
*ttm
)
880 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
882 ttm_mem_global_free_page(ttm
->glob
->mem_glob
,
884 ttm_put_pages(&ttm
->pages
[i
], 1,
889 ttm
->state
= tt_unpopulated
;
894 int ttm_page_alloc_debugfs(struct seq_file
*m
, void *data
)
896 struct ttm_page_pool
*p
;
898 char *h
[] = {"pool", "refills", "pages freed", "size"};
900 seq_printf(m
, "No pool allocator running.\n");
903 seq_printf(m
, "%6s %12s %13s %8s\n",
904 h
[0], h
[1], h
[2], h
[3]);
905 for (i
= 0; i
< NUM_POOLS
; ++i
) {
906 p
= &_manager
->pools
[i
];
908 seq_printf(m
, "%6s %12ld %13ld %8d\n",
909 p
->name
, p
->nrefills
,
910 p
->nfrees
, p
->npages
);