new pci init code
[linux-2.6/history.git] / mm / swap.c
blob516a547d524711d15368799cd95f2a1d402e4ba6
1 /*
2 * linux/mm/swap.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
7 /*
8 * This file contains the default values for the opereation of the
9 * Linux VM subsystem. Fine-tuning documentation can be found in
10 * linux/Documentation/sysctl/vm.txt.
11 * Started 18.12.91
12 * Swap aging added 23.2.95, Stephen Tweedie.
13 * Buffermem limits added 12.3.98, Rik van Riel.
16 #include <linux/mm.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/pagevec.h>
21 #include <linux/init.h>
22 #include <linux/mm_inline.h>
23 #include <linux/prefetch.h>
25 /* How many pages do we try to swap or page in/out together? */
26 int page_cluster;
29 * FIXME: speed this up?
31 void activate_page(struct page *page)
33 struct zone *zone = page_zone(page);
35 spin_lock_irq(&zone->lru_lock);
36 if (PageLRU(page) && !PageActive(page)) {
37 del_page_from_inactive_list(zone, page);
38 SetPageActive(page);
39 add_page_to_active_list(zone, page);
40 KERNEL_STAT_INC(pgactivate);
42 spin_unlock_irq(&zone->lru_lock);
45 /**
46 * lru_cache_add: add a page to the page lists
47 * @page: the page to add
49 static struct pagevec lru_add_pvecs[NR_CPUS];
51 void lru_cache_add(struct page *page)
53 struct pagevec *pvec = &lru_add_pvecs[get_cpu()];
55 page_cache_get(page);
56 if (!pagevec_add(pvec, page))
57 __pagevec_lru_add(pvec);
58 put_cpu();
61 void lru_add_drain(void)
63 struct pagevec *pvec = &lru_add_pvecs[get_cpu()];
65 if (pagevec_count(pvec))
66 __pagevec_lru_add(pvec);
67 put_cpu();
71 * This path almost never happens - pages are normally freed via pagevecs.
73 void __page_cache_release(struct page *page)
75 unsigned long flags;
76 struct zone *zone = page_zone(page);
78 spin_lock_irqsave(&zone->lru_lock, flags);
79 if (TestClearPageLRU(page))
80 del_page_from_lru(zone, page);
81 if (page_count(page) != 0)
82 page = NULL;
83 spin_unlock_irqrestore(&zone->lru_lock, flags);
84 if (page)
85 __free_pages_ok(page, 0);
89 * Batched page_cache_release(). Decrement the reference count on all the
90 * passed pages. If it fell to zero then remove the page from the LRU and
91 * free it.
93 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
94 * for the remainder of the operation.
96 * The locking in this function is against shrink_cache(): we recheck the
97 * page count inside the lock to see whether shrink_cache grabbed the page
98 * via the LRU. If it did, give up: shrink_cache will free it.
100 void release_pages(struct page **pages, int nr)
102 int i;
103 struct pagevec pages_to_free;
104 struct zone *zone = NULL;
106 pagevec_init(&pages_to_free);
107 for (i = 0; i < nr; i++) {
108 struct page *page = pages[i];
109 struct zone *pagezone;
111 if (PageReserved(page) || !put_page_testzero(page))
112 continue;
114 pagezone = page_zone(page);
115 if (pagezone != zone) {
116 if (zone)
117 spin_unlock_irq(&zone->lru_lock);
118 zone = pagezone;
119 spin_lock_irq(&zone->lru_lock);
121 if (TestClearPageLRU(page))
122 del_page_from_lru(zone, page);
123 if (page_count(page) == 0) {
124 if (!pagevec_add(&pages_to_free, page)) {
125 spin_unlock_irq(&zone->lru_lock);
126 pagevec_free(&pages_to_free);
127 pagevec_init(&pages_to_free);
128 spin_lock_irq(&zone->lru_lock);
132 if (zone)
133 spin_unlock_irq(&zone->lru_lock);
135 pagevec_free(&pages_to_free);
138 void __pagevec_release(struct pagevec *pvec)
140 release_pages(pvec->pages, pagevec_count(pvec));
141 pagevec_init(pvec);
145 * pagevec_release() for pages which are known to not be on the LRU
147 * This function reinitialises the caller's pagevec.
149 void __pagevec_release_nonlru(struct pagevec *pvec)
151 int i;
152 struct pagevec pages_to_free;
154 pagevec_init(&pages_to_free);
155 for (i = 0; i < pagevec_count(pvec); i++) {
156 struct page *page = pvec->pages[i];
158 BUG_ON(PageLRU(page));
159 if (put_page_testzero(page))
160 pagevec_add(&pages_to_free, page);
162 pagevec_free(&pages_to_free);
163 pagevec_init(pvec);
167 * Move all the inactive pages to the head of the inactive list
168 * and release them. Reinitialises the caller's pagevec.
170 void pagevec_deactivate_inactive(struct pagevec *pvec)
172 int i;
173 struct zone *zone = NULL;
175 if (pagevec_count(pvec) == 0)
176 return;
177 for (i = 0; i < pagevec_count(pvec); i++) {
178 struct page *page = pvec->pages[i];
179 struct zone *pagezone = page_zone(page);
181 if (pagezone != zone) {
182 if (PageActive(page) || !PageLRU(page))
183 continue;
184 if (zone)
185 spin_unlock_irq(&zone->lru_lock);
186 zone = pagezone;
187 spin_lock_irq(&zone->lru_lock);
189 if (!PageActive(page) && PageLRU(page))
190 list_move(&page->lru, &pagezone->inactive_list);
192 if (zone)
193 spin_unlock_irq(&zone->lru_lock);
194 __pagevec_release(pvec);
198 * Add the passed pages to the inactive_list, then drop the caller's refcount
199 * on them. Reinitialises the caller's pagevec.
201 void __pagevec_lru_add(struct pagevec *pvec)
203 int i;
204 struct zone *zone = NULL;
206 for (i = 0; i < pagevec_count(pvec); i++) {
207 struct page *page = pvec->pages[i];
208 struct zone *pagezone = page_zone(page);
210 if (pagezone != zone) {
211 if (zone)
212 spin_unlock_irq(&zone->lru_lock);
213 zone = pagezone;
214 spin_lock_irq(&zone->lru_lock);
216 if (TestSetPageLRU(page))
217 BUG();
218 add_page_to_inactive_list(zone, page);
220 if (zone)
221 spin_unlock_irq(&zone->lru_lock);
222 pagevec_release(pvec);
226 * Perform any setup for the swap system
228 void __init swap_setup(void)
230 unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
232 /* Use a smaller cluster for small-memory machines */
233 if (megs < 16)
234 page_cluster = 2;
235 else
236 page_cluster = 3;
238 * Right now other parts of the system means that we
239 * _really_ don't want to cluster much more