4 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 * This implementation is based on zbud written by Seth Jennings.
9 * z3fold is an special purpose allocator for storing compressed pages. It
10 * can store up to three compressed pages per page which improves the
11 * compression ratio of zbud while retaining its main concepts (e. g. always
12 * storing an integral number of objects per page) and simplicity.
13 * It still has simple and deterministic reclaim properties that make it
14 * preferable to a higher density approach (with no requirement on integral
15 * number of object per page) when reclaim is used.
17 * As in zbud, pages are divided into "chunks". The size of the chunks is
18 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 * z3fold doesn't export any API and is meant to be used via zpool API.
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/atomic.h>
26 #include <linux/sched.h>
27 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/preempt.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/zpool.h>
42 int (*evict
)(struct z3fold_pool
*pool
, unsigned long handle
);
54 * struct z3fold_header - z3fold page metadata occupying first chunks of each
55 * z3fold page, except for HEADLESS pages
56 * @buddy: links the z3fold page into the relevant list in the
58 * @page_lock: per-page lock
59 * @refcount: reference count for the z3fold page
60 * @work: work_struct for page layout optimization
61 * @pool: pointer to the pool which this page belongs to
62 * @cpu: CPU which this page "belongs" to
63 * @first_chunks: the size of the first buddy in chunks, 0 if free
64 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
65 * @last_chunks: the size of the last buddy in chunks, 0 if free
66 * @first_num: the starting number (for the first handle)
68 struct z3fold_header
{
69 struct list_head buddy
;
72 struct work_struct work
;
73 struct z3fold_pool
*pool
;
75 unsigned short first_chunks
;
76 unsigned short middle_chunks
;
77 unsigned short last_chunks
;
78 unsigned short start_middle
;
79 unsigned short first_num
:2;
83 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
84 * adjusting internal fragmentation. It also determines the number of
85 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
86 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
87 * in the beginning of an allocated page are occupied by z3fold header, so
88 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
89 * which shows the max number of free chunks in z3fold page, also there will
90 * be 63, or 62, respectively, freelists per pool.
92 #define NCHUNKS_ORDER 6
94 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
95 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
96 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
97 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
98 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
99 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
101 #define BUDDY_MASK (0x3)
104 * struct z3fold_pool - stores metadata for each z3fold pool
106 * @lock: protects pool unbuddied/lru lists
107 * @stale_lock: protects pool stale page list
108 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
109 * buddies; the list each z3fold page is added to depends on
110 * the size of its free region.
111 * @lru: list tracking the z3fold pages in LRU order by most recently
113 * @stale: list of pages marked for freeing
114 * @pages_nr: number of z3fold pages in the pool.
115 * @ops: pointer to a structure of user defined operations specified at
116 * pool creation time.
117 * @compact_wq: workqueue for page layout background optimization
118 * @release_wq: workqueue for safe page release
119 * @work: work_struct for safe page release
121 * This structure is allocated at pool creation time and maintains metadata
122 * pertaining to a particular z3fold pool.
127 spinlock_t stale_lock
;
128 struct list_head
*unbuddied
;
129 struct list_head lru
;
130 struct list_head stale
;
132 const struct z3fold_ops
*ops
;
134 const struct zpool_ops
*zpool_ops
;
135 struct workqueue_struct
*compact_wq
;
136 struct workqueue_struct
*release_wq
;
137 struct work_struct work
;
141 * Internal z3fold page flags
143 enum z3fold_page_flags
{
155 /* Converts an allocation size in bytes to size in z3fold chunks */
156 static int size_to_chunks(size_t size
)
158 return (size
+ CHUNK_SIZE
- 1) >> CHUNK_SHIFT
;
161 #define for_each_unbuddied_list(_iter, _begin) \
162 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
164 static void compact_page_work(struct work_struct
*w
);
166 /* Initializes the z3fold header of a newly allocated z3fold page */
167 static struct z3fold_header
*init_z3fold_page(struct page
*page
,
168 struct z3fold_pool
*pool
)
170 struct z3fold_header
*zhdr
= page_address(page
);
172 INIT_LIST_HEAD(&page
->lru
);
173 clear_bit(PAGE_HEADLESS
, &page
->private);
174 clear_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
175 clear_bit(NEEDS_COMPACTING
, &page
->private);
176 clear_bit(PAGE_STALE
, &page
->private);
177 clear_bit(UNDER_RECLAIM
, &page
->private);
179 spin_lock_init(&zhdr
->page_lock
);
180 kref_init(&zhdr
->refcount
);
181 zhdr
->first_chunks
= 0;
182 zhdr
->middle_chunks
= 0;
183 zhdr
->last_chunks
= 0;
185 zhdr
->start_middle
= 0;
188 INIT_LIST_HEAD(&zhdr
->buddy
);
189 INIT_WORK(&zhdr
->work
, compact_page_work
);
193 /* Resets the struct page fields and frees the page */
194 static void free_z3fold_page(struct page
*page
)
199 /* Lock a z3fold page */
200 static inline void z3fold_page_lock(struct z3fold_header
*zhdr
)
202 spin_lock(&zhdr
->page_lock
);
205 /* Try to lock a z3fold page */
206 static inline int z3fold_page_trylock(struct z3fold_header
*zhdr
)
208 return spin_trylock(&zhdr
->page_lock
);
211 /* Unlock a z3fold page */
212 static inline void z3fold_page_unlock(struct z3fold_header
*zhdr
)
214 spin_unlock(&zhdr
->page_lock
);
218 * Encodes the handle of a particular buddy within a z3fold page
219 * Pool lock should be held as this function accesses first_num
221 static unsigned long encode_handle(struct z3fold_header
*zhdr
, enum buddy bud
)
223 unsigned long handle
;
225 handle
= (unsigned long)zhdr
;
227 handle
+= (bud
+ zhdr
->first_num
) & BUDDY_MASK
;
231 /* Returns the z3fold page where a given handle is stored */
232 static struct z3fold_header
*handle_to_z3fold_header(unsigned long handle
)
234 return (struct z3fold_header
*)(handle
& PAGE_MASK
);
238 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
239 * but that doesn't matter. because the masking will result in the
240 * correct buddy number.
242 static enum buddy
handle_to_buddy(unsigned long handle
)
244 struct z3fold_header
*zhdr
= handle_to_z3fold_header(handle
);
245 return (handle
- zhdr
->first_num
) & BUDDY_MASK
;
248 static void __release_z3fold_page(struct z3fold_header
*zhdr
, bool locked
)
250 struct page
*page
= virt_to_page(zhdr
);
251 struct z3fold_pool
*pool
= zhdr
->pool
;
253 WARN_ON(!list_empty(&zhdr
->buddy
));
254 set_bit(PAGE_STALE
, &page
->private);
255 clear_bit(NEEDS_COMPACTING
, &page
->private);
256 spin_lock(&pool
->lock
);
257 if (!list_empty(&page
->lru
))
258 list_del(&page
->lru
);
259 spin_unlock(&pool
->lock
);
261 z3fold_page_unlock(zhdr
);
262 spin_lock(&pool
->stale_lock
);
263 list_add(&zhdr
->buddy
, &pool
->stale
);
264 queue_work(pool
->release_wq
, &pool
->work
);
265 spin_unlock(&pool
->stale_lock
);
268 static void __attribute__((__unused__
))
269 release_z3fold_page(struct kref
*ref
)
271 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
273 __release_z3fold_page(zhdr
, false);
276 static void release_z3fold_page_locked(struct kref
*ref
)
278 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
280 WARN_ON(z3fold_page_trylock(zhdr
));
281 __release_z3fold_page(zhdr
, true);
284 static void release_z3fold_page_locked_list(struct kref
*ref
)
286 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
288 spin_lock(&zhdr
->pool
->lock
);
289 list_del_init(&zhdr
->buddy
);
290 spin_unlock(&zhdr
->pool
->lock
);
292 WARN_ON(z3fold_page_trylock(zhdr
));
293 __release_z3fold_page(zhdr
, true);
296 static void free_pages_work(struct work_struct
*w
)
298 struct z3fold_pool
*pool
= container_of(w
, struct z3fold_pool
, work
);
300 spin_lock(&pool
->stale_lock
);
301 while (!list_empty(&pool
->stale
)) {
302 struct z3fold_header
*zhdr
= list_first_entry(&pool
->stale
,
303 struct z3fold_header
, buddy
);
304 struct page
*page
= virt_to_page(zhdr
);
306 list_del(&zhdr
->buddy
);
307 if (WARN_ON(!test_bit(PAGE_STALE
, &page
->private)))
309 spin_unlock(&pool
->stale_lock
);
310 cancel_work_sync(&zhdr
->work
);
311 free_z3fold_page(page
);
313 spin_lock(&pool
->stale_lock
);
315 spin_unlock(&pool
->stale_lock
);
319 * Returns the number of free chunks in a z3fold page.
320 * NB: can't be used with HEADLESS pages.
322 static int num_free_chunks(struct z3fold_header
*zhdr
)
326 * If there is a middle object, pick up the bigger free space
327 * either before or after it. Otherwise just subtract the number
328 * of chunks occupied by the first and the last objects.
330 if (zhdr
->middle_chunks
!= 0) {
331 int nfree_before
= zhdr
->first_chunks
?
332 0 : zhdr
->start_middle
- ZHDR_CHUNKS
;
333 int nfree_after
= zhdr
->last_chunks
?
335 (zhdr
->start_middle
+ zhdr
->middle_chunks
);
336 nfree
= max(nfree_before
, nfree_after
);
338 nfree
= NCHUNKS
- zhdr
->first_chunks
- zhdr
->last_chunks
;
342 static inline void *mchunk_memmove(struct z3fold_header
*zhdr
,
343 unsigned short dst_chunk
)
346 return memmove(beg
+ (dst_chunk
<< CHUNK_SHIFT
),
347 beg
+ (zhdr
->start_middle
<< CHUNK_SHIFT
),
348 zhdr
->middle_chunks
<< CHUNK_SHIFT
);
351 #define BIG_CHUNK_GAP 3
352 /* Has to be called with lock held */
353 static int z3fold_compact_page(struct z3fold_header
*zhdr
)
355 struct page
*page
= virt_to_page(zhdr
);
357 if (test_bit(MIDDLE_CHUNK_MAPPED
, &page
->private))
358 return 0; /* can't move middle chunk, it's used */
360 if (zhdr
->middle_chunks
== 0)
361 return 0; /* nothing to compact */
363 if (zhdr
->first_chunks
== 0 && zhdr
->last_chunks
== 0) {
364 /* move to the beginning */
365 mchunk_memmove(zhdr
, ZHDR_CHUNKS
);
366 zhdr
->first_chunks
= zhdr
->middle_chunks
;
367 zhdr
->middle_chunks
= 0;
368 zhdr
->start_middle
= 0;
374 * moving data is expensive, so let's only do that if
375 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
377 if (zhdr
->first_chunks
!= 0 && zhdr
->last_chunks
== 0 &&
378 zhdr
->start_middle
- (zhdr
->first_chunks
+ ZHDR_CHUNKS
) >=
380 mchunk_memmove(zhdr
, zhdr
->first_chunks
+ ZHDR_CHUNKS
);
381 zhdr
->start_middle
= zhdr
->first_chunks
+ ZHDR_CHUNKS
;
383 } else if (zhdr
->last_chunks
!= 0 && zhdr
->first_chunks
== 0 &&
384 TOTAL_CHUNKS
- (zhdr
->last_chunks
+ zhdr
->start_middle
385 + zhdr
->middle_chunks
) >=
387 unsigned short new_start
= TOTAL_CHUNKS
- zhdr
->last_chunks
-
389 mchunk_memmove(zhdr
, new_start
);
390 zhdr
->start_middle
= new_start
;
397 static void do_compact_page(struct z3fold_header
*zhdr
, bool locked
)
399 struct z3fold_pool
*pool
= zhdr
->pool
;
401 struct list_head
*unbuddied
;
404 page
= virt_to_page(zhdr
);
406 WARN_ON(z3fold_page_trylock(zhdr
));
408 z3fold_page_lock(zhdr
);
409 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING
, &page
->private))) {
410 z3fold_page_unlock(zhdr
);
413 spin_lock(&pool
->lock
);
414 list_del_init(&zhdr
->buddy
);
415 spin_unlock(&pool
->lock
);
417 if (kref_put(&zhdr
->refcount
, release_z3fold_page_locked
)) {
418 atomic64_dec(&pool
->pages_nr
);
422 z3fold_compact_page(zhdr
);
423 unbuddied
= get_cpu_ptr(pool
->unbuddied
);
424 fchunks
= num_free_chunks(zhdr
);
425 if (fchunks
< NCHUNKS
&&
426 (!zhdr
->first_chunks
|| !zhdr
->middle_chunks
||
427 !zhdr
->last_chunks
)) {
428 /* the page's not completely free and it's unbuddied */
429 spin_lock(&pool
->lock
);
430 list_add(&zhdr
->buddy
, &unbuddied
[fchunks
]);
431 spin_unlock(&pool
->lock
);
432 zhdr
->cpu
= smp_processor_id();
434 put_cpu_ptr(pool
->unbuddied
);
435 z3fold_page_unlock(zhdr
);
438 static void compact_page_work(struct work_struct
*w
)
440 struct z3fold_header
*zhdr
= container_of(w
, struct z3fold_header
,
443 do_compact_page(zhdr
, false);
452 * z3fold_create_pool() - create a new z3fold pool
454 * @gfp: gfp flags when allocating the z3fold pool structure
455 * @ops: user-defined operations for the z3fold pool
457 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
460 static struct z3fold_pool
*z3fold_create_pool(const char *name
, gfp_t gfp
,
461 const struct z3fold_ops
*ops
)
463 struct z3fold_pool
*pool
= NULL
;
466 pool
= kzalloc(sizeof(struct z3fold_pool
), gfp
);
469 spin_lock_init(&pool
->lock
);
470 spin_lock_init(&pool
->stale_lock
);
471 pool
->unbuddied
= __alloc_percpu(sizeof(struct list_head
)*NCHUNKS
, 2);
472 if (!pool
->unbuddied
)
474 for_each_possible_cpu(cpu
) {
475 struct list_head
*unbuddied
=
476 per_cpu_ptr(pool
->unbuddied
, cpu
);
477 for_each_unbuddied_list(i
, 0)
478 INIT_LIST_HEAD(&unbuddied
[i
]);
480 INIT_LIST_HEAD(&pool
->lru
);
481 INIT_LIST_HEAD(&pool
->stale
);
482 atomic64_set(&pool
->pages_nr
, 0);
484 pool
->compact_wq
= create_singlethread_workqueue(pool
->name
);
485 if (!pool
->compact_wq
)
487 pool
->release_wq
= create_singlethread_workqueue(pool
->name
);
488 if (!pool
->release_wq
)
490 INIT_WORK(&pool
->work
, free_pages_work
);
495 destroy_workqueue(pool
->compact_wq
);
497 free_percpu(pool
->unbuddied
);
505 * z3fold_destroy_pool() - destroys an existing z3fold pool
506 * @pool: the z3fold pool to be destroyed
508 * The pool should be emptied before this function is called.
510 static void z3fold_destroy_pool(struct z3fold_pool
*pool
)
512 destroy_workqueue(pool
->release_wq
);
513 destroy_workqueue(pool
->compact_wq
);
518 * z3fold_alloc() - allocates a region of a given size
519 * @pool: z3fold pool from which to allocate
520 * @size: size in bytes of the desired allocation
521 * @gfp: gfp flags used if the pool needs to grow
522 * @handle: handle of the new allocation
524 * This function will attempt to find a free region in the pool large enough to
525 * satisfy the allocation request. A search of the unbuddied lists is
526 * performed first. If no suitable free region is found, then a new page is
527 * allocated and added to the pool to satisfy the request.
529 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
530 * as z3fold pool pages.
532 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
533 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
536 static int z3fold_alloc(struct z3fold_pool
*pool
, size_t size
, gfp_t gfp
,
537 unsigned long *handle
)
539 int chunks
= 0, i
, freechunks
;
540 struct z3fold_header
*zhdr
= NULL
;
541 struct page
*page
= NULL
;
543 bool can_sleep
= gfpflags_allow_blocking(gfp
);
545 if (!size
|| (gfp
& __GFP_HIGHMEM
))
548 if (size
> PAGE_SIZE
)
551 if (size
> PAGE_SIZE
- ZHDR_SIZE_ALIGNED
- CHUNK_SIZE
)
554 struct list_head
*unbuddied
;
555 chunks
= size_to_chunks(size
);
558 /* First, try to find an unbuddied z3fold page. */
559 unbuddied
= get_cpu_ptr(pool
->unbuddied
);
560 for_each_unbuddied_list(i
, chunks
) {
561 struct list_head
*l
= &unbuddied
[i
];
563 zhdr
= list_first_entry_or_null(READ_ONCE(l
),
564 struct z3fold_header
, buddy
);
569 /* Re-check under lock. */
570 spin_lock(&pool
->lock
);
572 if (unlikely(zhdr
!= list_first_entry(READ_ONCE(l
),
573 struct z3fold_header
, buddy
)) ||
574 !z3fold_page_trylock(zhdr
)) {
575 spin_unlock(&pool
->lock
);
576 put_cpu_ptr(pool
->unbuddied
);
579 list_del_init(&zhdr
->buddy
);
581 spin_unlock(&pool
->lock
);
583 page
= virt_to_page(zhdr
);
584 if (test_bit(NEEDS_COMPACTING
, &page
->private)) {
585 z3fold_page_unlock(zhdr
);
587 put_cpu_ptr(pool
->unbuddied
);
594 * this page could not be removed from its unbuddied
595 * list while pool lock was held, and then we've taken
596 * page lock so kref_put could not be called before
597 * we got here, so it's safe to just call kref_get()
599 kref_get(&zhdr
->refcount
);
602 put_cpu_ptr(pool
->unbuddied
);
605 if (zhdr
->first_chunks
== 0) {
606 if (zhdr
->middle_chunks
!= 0 &&
607 chunks
>= zhdr
->start_middle
)
611 } else if (zhdr
->last_chunks
== 0)
613 else if (zhdr
->middle_chunks
== 0)
616 if (kref_put(&zhdr
->refcount
,
617 release_z3fold_page_locked
))
618 atomic64_dec(&pool
->pages_nr
);
620 z3fold_page_unlock(zhdr
);
621 pr_err("No free chunks in unbuddied\n");
632 spin_lock(&pool
->stale_lock
);
633 zhdr
= list_first_entry_or_null(&pool
->stale
,
634 struct z3fold_header
, buddy
);
636 * Before allocating a page, let's see if we can take one from
637 * the stale pages list. cancel_work_sync() can sleep so we
638 * limit this case to the contexts where we can sleep
641 list_del(&zhdr
->buddy
);
642 spin_unlock(&pool
->stale_lock
);
643 cancel_work_sync(&zhdr
->work
);
644 page
= virt_to_page(zhdr
);
646 spin_unlock(&pool
->stale_lock
);
650 page
= alloc_page(gfp
);
655 atomic64_inc(&pool
->pages_nr
);
656 zhdr
= init_z3fold_page(page
, pool
);
658 if (bud
== HEADLESS
) {
659 set_bit(PAGE_HEADLESS
, &page
->private);
662 z3fold_page_lock(zhdr
);
666 zhdr
->first_chunks
= chunks
;
667 else if (bud
== LAST
)
668 zhdr
->last_chunks
= chunks
;
670 zhdr
->middle_chunks
= chunks
;
671 zhdr
->start_middle
= zhdr
->first_chunks
+ ZHDR_CHUNKS
;
674 if (zhdr
->first_chunks
== 0 || zhdr
->last_chunks
== 0 ||
675 zhdr
->middle_chunks
== 0) {
676 struct list_head
*unbuddied
= get_cpu_ptr(pool
->unbuddied
);
678 /* Add to unbuddied list */
679 freechunks
= num_free_chunks(zhdr
);
680 spin_lock(&pool
->lock
);
681 list_add(&zhdr
->buddy
, &unbuddied
[freechunks
]);
682 spin_unlock(&pool
->lock
);
683 zhdr
->cpu
= smp_processor_id();
684 put_cpu_ptr(pool
->unbuddied
);
688 spin_lock(&pool
->lock
);
689 /* Add/move z3fold page to beginning of LRU */
690 if (!list_empty(&page
->lru
))
691 list_del(&page
->lru
);
693 list_add(&page
->lru
, &pool
->lru
);
695 *handle
= encode_handle(zhdr
, bud
);
696 spin_unlock(&pool
->lock
);
698 z3fold_page_unlock(zhdr
);
704 * z3fold_free() - frees the allocation associated with the given handle
705 * @pool: pool in which the allocation resided
706 * @handle: handle associated with the allocation returned by z3fold_alloc()
708 * In the case that the z3fold page in which the allocation resides is under
709 * reclaim, as indicated by the PG_reclaim flag being set, this function
710 * only sets the first|last_chunks to 0. The page is actually freed
711 * once both buddies are evicted (see z3fold_reclaim_page() below).
713 static void z3fold_free(struct z3fold_pool
*pool
, unsigned long handle
)
715 struct z3fold_header
*zhdr
;
719 zhdr
= handle_to_z3fold_header(handle
);
720 page
= virt_to_page(zhdr
);
722 if (test_bit(PAGE_HEADLESS
, &page
->private)) {
723 /* HEADLESS page stored */
726 z3fold_page_lock(zhdr
);
727 bud
= handle_to_buddy(handle
);
731 zhdr
->first_chunks
= 0;
734 zhdr
->middle_chunks
= 0;
735 zhdr
->start_middle
= 0;
738 zhdr
->last_chunks
= 0;
741 pr_err("%s: unknown bud %d\n", __func__
, bud
);
743 z3fold_page_unlock(zhdr
);
748 if (bud
== HEADLESS
) {
749 spin_lock(&pool
->lock
);
750 list_del(&page
->lru
);
751 spin_unlock(&pool
->lock
);
752 free_z3fold_page(page
);
753 atomic64_dec(&pool
->pages_nr
);
757 if (kref_put(&zhdr
->refcount
, release_z3fold_page_locked_list
)) {
758 atomic64_dec(&pool
->pages_nr
);
761 if (test_bit(UNDER_RECLAIM
, &page
->private)) {
762 z3fold_page_unlock(zhdr
);
765 if (test_and_set_bit(NEEDS_COMPACTING
, &page
->private)) {
766 z3fold_page_unlock(zhdr
);
769 if (zhdr
->cpu
< 0 || !cpu_online(zhdr
->cpu
)) {
770 spin_lock(&pool
->lock
);
771 list_del_init(&zhdr
->buddy
);
772 spin_unlock(&pool
->lock
);
774 kref_get(&zhdr
->refcount
);
775 do_compact_page(zhdr
, true);
778 kref_get(&zhdr
->refcount
);
779 queue_work_on(zhdr
->cpu
, pool
->compact_wq
, &zhdr
->work
);
780 z3fold_page_unlock(zhdr
);
784 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
785 * @pool: pool from which a page will attempt to be evicted
786 * @retries: number of pages on the LRU list for which eviction will
787 * be attempted before failing
789 * z3fold reclaim is different from normal system reclaim in that it is done
790 * from the bottom, up. This is because only the bottom layer, z3fold, has
791 * information on how the allocations are organized within each z3fold page.
792 * This has the potential to create interesting locking situations between
793 * z3fold and the user, however.
795 * To avoid these, this is how z3fold_reclaim_page() should be called:
797 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
798 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
799 * call the user-defined eviction handler with the pool and handle as
802 * If the handle can not be evicted, the eviction handler should return
803 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
804 * appropriate list and try the next z3fold page on the LRU up to
805 * a user defined number of retries.
807 * If the handle is successfully evicted, the eviction handler should
808 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
809 * contains logic to delay freeing the page if the page is under reclaim,
810 * as indicated by the setting of the PG_reclaim flag on the underlying page.
812 * If all buddies in the z3fold page are successfully evicted, then the
813 * z3fold page can be freed.
815 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
816 * no pages to evict or an eviction handler is not registered, -EAGAIN if
817 * the retry limit was hit.
819 static int z3fold_reclaim_page(struct z3fold_pool
*pool
, unsigned int retries
)
822 struct z3fold_header
*zhdr
= NULL
;
823 struct page
*page
= NULL
;
824 struct list_head
*pos
;
825 unsigned long first_handle
= 0, middle_handle
= 0, last_handle
= 0;
827 spin_lock(&pool
->lock
);
828 if (!pool
->ops
|| !pool
->ops
->evict
|| retries
== 0) {
829 spin_unlock(&pool
->lock
);
832 for (i
= 0; i
< retries
; i
++) {
833 if (list_empty(&pool
->lru
)) {
834 spin_unlock(&pool
->lock
);
837 list_for_each_prev(pos
, &pool
->lru
) {
838 page
= list_entry(pos
, struct page
, lru
);
839 if (test_bit(PAGE_HEADLESS
, &page
->private))
840 /* candidate found */
843 zhdr
= page_address(page
);
844 if (!z3fold_page_trylock(zhdr
))
845 continue; /* can't evict at this point */
846 kref_get(&zhdr
->refcount
);
847 list_del_init(&zhdr
->buddy
);
849 set_bit(UNDER_RECLAIM
, &page
->private);
853 list_del_init(&page
->lru
);
854 spin_unlock(&pool
->lock
);
856 if (!test_bit(PAGE_HEADLESS
, &page
->private)) {
858 * We need encode the handles before unlocking, since
859 * we can race with free that will set
860 * (first|last)_chunks to 0
865 if (zhdr
->first_chunks
)
866 first_handle
= encode_handle(zhdr
, FIRST
);
867 if (zhdr
->middle_chunks
)
868 middle_handle
= encode_handle(zhdr
, MIDDLE
);
869 if (zhdr
->last_chunks
)
870 last_handle
= encode_handle(zhdr
, LAST
);
872 * it's safe to unlock here because we hold a
873 * reference to this page
875 z3fold_page_unlock(zhdr
);
877 first_handle
= encode_handle(zhdr
, HEADLESS
);
878 last_handle
= middle_handle
= 0;
881 /* Issue the eviction callback(s) */
883 ret
= pool
->ops
->evict(pool
, middle_handle
);
888 ret
= pool
->ops
->evict(pool
, first_handle
);
893 ret
= pool
->ops
->evict(pool
, last_handle
);
898 if (test_bit(PAGE_HEADLESS
, &page
->private)) {
900 free_z3fold_page(page
);
903 spin_lock(&pool
->lock
);
904 list_add(&page
->lru
, &pool
->lru
);
905 spin_unlock(&pool
->lock
);
907 z3fold_page_lock(zhdr
);
908 clear_bit(UNDER_RECLAIM
, &page
->private);
909 if (kref_put(&zhdr
->refcount
,
910 release_z3fold_page_locked
)) {
911 atomic64_dec(&pool
->pages_nr
);
915 * if we are here, the page is still not completely
916 * free. Take the global pool lock then to be able
917 * to add it back to the lru list
919 spin_lock(&pool
->lock
);
920 list_add(&page
->lru
, &pool
->lru
);
921 spin_unlock(&pool
->lock
);
922 z3fold_page_unlock(zhdr
);
925 /* We started off locked to we need to lock the pool back */
926 spin_lock(&pool
->lock
);
928 spin_unlock(&pool
->lock
);
933 * z3fold_map() - maps the allocation associated with the given handle
934 * @pool: pool in which the allocation resides
935 * @handle: handle associated with the allocation to be mapped
937 * Extracts the buddy number from handle and constructs the pointer to the
938 * correct starting chunk within the page.
940 * Returns: a pointer to the mapped allocation
942 static void *z3fold_map(struct z3fold_pool
*pool
, unsigned long handle
)
944 struct z3fold_header
*zhdr
;
949 zhdr
= handle_to_z3fold_header(handle
);
951 page
= virt_to_page(zhdr
);
953 if (test_bit(PAGE_HEADLESS
, &page
->private))
956 z3fold_page_lock(zhdr
);
957 buddy
= handle_to_buddy(handle
);
960 addr
+= ZHDR_SIZE_ALIGNED
;
963 addr
+= zhdr
->start_middle
<< CHUNK_SHIFT
;
964 set_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
967 addr
+= PAGE_SIZE
- (zhdr
->last_chunks
<< CHUNK_SHIFT
);
970 pr_err("unknown buddy id %d\n", buddy
);
976 z3fold_page_unlock(zhdr
);
982 * z3fold_unmap() - unmaps the allocation associated with the given handle
983 * @pool: pool in which the allocation resides
984 * @handle: handle associated with the allocation to be unmapped
986 static void z3fold_unmap(struct z3fold_pool
*pool
, unsigned long handle
)
988 struct z3fold_header
*zhdr
;
992 zhdr
= handle_to_z3fold_header(handle
);
993 page
= virt_to_page(zhdr
);
995 if (test_bit(PAGE_HEADLESS
, &page
->private))
998 z3fold_page_lock(zhdr
);
999 buddy
= handle_to_buddy(handle
);
1000 if (buddy
== MIDDLE
)
1001 clear_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
1002 z3fold_page_unlock(zhdr
);
1006 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1007 * @pool: pool whose size is being queried
1009 * Returns: size in pages of the given pool.
1011 static u64
z3fold_get_pool_size(struct z3fold_pool
*pool
)
1013 return atomic64_read(&pool
->pages_nr
);
1020 static int z3fold_zpool_evict(struct z3fold_pool
*pool
, unsigned long handle
)
1022 if (pool
->zpool
&& pool
->zpool_ops
&& pool
->zpool_ops
->evict
)
1023 return pool
->zpool_ops
->evict(pool
->zpool
, handle
);
1028 static const struct z3fold_ops z3fold_zpool_ops
= {
1029 .evict
= z3fold_zpool_evict
1032 static void *z3fold_zpool_create(const char *name
, gfp_t gfp
,
1033 const struct zpool_ops
*zpool_ops
,
1034 struct zpool
*zpool
)
1036 struct z3fold_pool
*pool
;
1038 pool
= z3fold_create_pool(name
, gfp
,
1039 zpool_ops
? &z3fold_zpool_ops
: NULL
);
1041 pool
->zpool
= zpool
;
1042 pool
->zpool_ops
= zpool_ops
;
1047 static void z3fold_zpool_destroy(void *pool
)
1049 z3fold_destroy_pool(pool
);
1052 static int z3fold_zpool_malloc(void *pool
, size_t size
, gfp_t gfp
,
1053 unsigned long *handle
)
1055 return z3fold_alloc(pool
, size
, gfp
, handle
);
1057 static void z3fold_zpool_free(void *pool
, unsigned long handle
)
1059 z3fold_free(pool
, handle
);
1062 static int z3fold_zpool_shrink(void *pool
, unsigned int pages
,
1063 unsigned int *reclaimed
)
1065 unsigned int total
= 0;
1068 while (total
< pages
) {
1069 ret
= z3fold_reclaim_page(pool
, 8);
1081 static void *z3fold_zpool_map(void *pool
, unsigned long handle
,
1082 enum zpool_mapmode mm
)
1084 return z3fold_map(pool
, handle
);
1086 static void z3fold_zpool_unmap(void *pool
, unsigned long handle
)
1088 z3fold_unmap(pool
, handle
);
1091 static u64
z3fold_zpool_total_size(void *pool
)
1093 return z3fold_get_pool_size(pool
) * PAGE_SIZE
;
1096 static struct zpool_driver z3fold_zpool_driver
= {
1098 .owner
= THIS_MODULE
,
1099 .create
= z3fold_zpool_create
,
1100 .destroy
= z3fold_zpool_destroy
,
1101 .malloc
= z3fold_zpool_malloc
,
1102 .free
= z3fold_zpool_free
,
1103 .shrink
= z3fold_zpool_shrink
,
1104 .map
= z3fold_zpool_map
,
1105 .unmap
= z3fold_zpool_unmap
,
1106 .total_size
= z3fold_zpool_total_size
,
1109 MODULE_ALIAS("zpool-z3fold");
1111 static int __init
init_z3fold(void)
1113 /* Make sure the z3fold header is not larger than the page size */
1114 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED
> PAGE_SIZE
);
1115 zpool_register_driver(&z3fold_zpool_driver
);
1120 static void __exit
exit_z3fold(void)
1122 zpool_unregister_driver(&z3fold_zpool_driver
);
1125 module_init(init_z3fold
);
1126 module_exit(exit_z3fold
);
1128 MODULE_LICENSE("GPL");
1129 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1130 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");