4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * This file contains the default values for the opereation of the
9 * Linux VM subsystem. Fine-tuning documentation can be found in
10 * linux/Documentation/sysctl/vm.txt.
12 * Swap aging added 23.2.95, Stephen Tweedie.
13 * Buffermem limits added 12.3.98, Rik van Riel.
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/pagevec.h>
21 #include <linux/init.h>
22 #include <linux/mm_inline.h>
23 #include <linux/prefetch.h>
25 /* How many pages do we try to swap or page in/out together? */
29 * FIXME: speed this up?
31 void activate_page(struct page
*page
)
33 struct zone
*zone
= page_zone(page
);
35 spin_lock_irq(&zone
->lru_lock
);
36 if (PageLRU(page
) && !PageActive(page
)) {
37 del_page_from_inactive_list(zone
, page
);
39 add_page_to_active_list(zone
, page
);
40 KERNEL_STAT_INC(pgactivate
);
42 spin_unlock_irq(&zone
->lru_lock
);
46 * lru_cache_add: add a page to the page lists
47 * @page: the page to add
49 static struct pagevec lru_add_pvecs
[NR_CPUS
];
51 void lru_cache_add(struct page
*page
)
53 struct pagevec
*pvec
= &lru_add_pvecs
[get_cpu()];
56 if (!pagevec_add(pvec
, page
))
57 __pagevec_lru_add(pvec
);
61 void lru_add_drain(void)
63 struct pagevec
*pvec
= &lru_add_pvecs
[get_cpu()];
65 if (pagevec_count(pvec
))
66 __pagevec_lru_add(pvec
);
71 * This path almost never happens - pages are normally freed via pagevecs.
73 void __page_cache_release(struct page
*page
)
76 struct zone
*zone
= page_zone(page
);
78 spin_lock_irqsave(&zone
->lru_lock
, flags
);
79 if (TestClearPageLRU(page
))
80 del_page_from_lru(zone
, page
);
81 if (page_count(page
) != 0)
83 spin_unlock_irqrestore(&zone
->lru_lock
, flags
);
85 __free_pages_ok(page
, 0);
89 * Batched page_cache_release(). Decrement the reference count on all the
90 * passed pages. If it fell to zero then remove the page from the LRU and
93 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
94 * for the remainder of the operation.
96 * The locking in this function is against shrink_cache(): we recheck the
97 * page count inside the lock to see whether shrink_cache grabbed the page
98 * via the LRU. If it did, give up: shrink_cache will free it.
100 void release_pages(struct page
**pages
, int nr
)
103 struct pagevec pages_to_free
;
104 struct zone
*zone
= NULL
;
106 pagevec_init(&pages_to_free
);
107 for (i
= 0; i
< nr
; i
++) {
108 struct page
*page
= pages
[i
];
109 struct zone
*pagezone
;
111 if (PageReserved(page
) || !put_page_testzero(page
))
114 pagezone
= page_zone(page
);
115 if (pagezone
!= zone
) {
117 spin_unlock_irq(&zone
->lru_lock
);
119 spin_lock_irq(&zone
->lru_lock
);
121 if (TestClearPageLRU(page
))
122 del_page_from_lru(zone
, page
);
123 if (page_count(page
) == 0) {
124 if (!pagevec_add(&pages_to_free
, page
)) {
125 spin_unlock_irq(&zone
->lru_lock
);
126 pagevec_free(&pages_to_free
);
127 pagevec_init(&pages_to_free
);
128 spin_lock_irq(&zone
->lru_lock
);
133 spin_unlock_irq(&zone
->lru_lock
);
135 pagevec_free(&pages_to_free
);
138 void __pagevec_release(struct pagevec
*pvec
)
140 release_pages(pvec
->pages
, pagevec_count(pvec
));
145 * pagevec_release() for pages which are known to not be on the LRU
147 * This function reinitialises the caller's pagevec.
149 void __pagevec_release_nonlru(struct pagevec
*pvec
)
152 struct pagevec pages_to_free
;
154 pagevec_init(&pages_to_free
);
155 for (i
= 0; i
< pagevec_count(pvec
); i
++) {
156 struct page
*page
= pvec
->pages
[i
];
158 BUG_ON(PageLRU(page
));
159 if (put_page_testzero(page
))
160 pagevec_add(&pages_to_free
, page
);
162 pagevec_free(&pages_to_free
);
167 * Move all the inactive pages to the head of the inactive list
168 * and release them. Reinitialises the caller's pagevec.
170 void pagevec_deactivate_inactive(struct pagevec
*pvec
)
173 struct zone
*zone
= NULL
;
175 if (pagevec_count(pvec
) == 0)
177 for (i
= 0; i
< pagevec_count(pvec
); i
++) {
178 struct page
*page
= pvec
->pages
[i
];
179 struct zone
*pagezone
= page_zone(page
);
181 if (pagezone
!= zone
) {
182 if (PageActive(page
) || !PageLRU(page
))
185 spin_unlock_irq(&zone
->lru_lock
);
187 spin_lock_irq(&zone
->lru_lock
);
189 if (!PageActive(page
) && PageLRU(page
))
190 list_move(&page
->lru
, &pagezone
->inactive_list
);
193 spin_unlock_irq(&zone
->lru_lock
);
194 __pagevec_release(pvec
);
198 * Add the passed pages to the inactive_list, then drop the caller's refcount
199 * on them. Reinitialises the caller's pagevec.
201 void __pagevec_lru_add(struct pagevec
*pvec
)
204 struct zone
*zone
= NULL
;
206 for (i
= 0; i
< pagevec_count(pvec
); i
++) {
207 struct page
*page
= pvec
->pages
[i
];
208 struct zone
*pagezone
= page_zone(page
);
210 if (pagezone
!= zone
) {
212 spin_unlock_irq(&zone
->lru_lock
);
214 spin_lock_irq(&zone
->lru_lock
);
216 if (TestSetPageLRU(page
))
218 add_page_to_inactive_list(zone
, page
);
221 spin_unlock_irq(&zone
->lru_lock
);
222 pagevec_release(pvec
);
226 * Perform any setup for the swap system
228 void __init
swap_setup(void)
230 unsigned long megs
= num_physpages
>> (20 - PAGE_SHIFT
);
232 /* Use a smaller cluster for small-memory machines */
238 * Right now other parts of the system means that we
239 * _really_ don't want to cluster much more