- Stephen Rothwell: APM updates
[davej-history.git] / include / linux / pagemap.h
blob9d2082f446d67cd4810f407313af9cc443a752ae
1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
4 /*
5 * Page-mapping primitive inline functions
7 * Copyright 1995 Linus Torvalds
8 */
10 #include <linux/mm.h>
11 #include <linux/fs.h>
12 #include <linux/list.h>
14 #include <asm/system.h>
15 #include <asm/pgtable.h>
16 #include <linux/highmem.h>
19 * The page cache can done in larger chunks than
20 * one page, because it allows for more efficient
21 * throughput (it can then be mapped into user
22 * space in smaller chunks for same flexibility).
24 * Or rather, it _will_ be done in larger chunks.
26 #define PAGE_CACHE_SHIFT PAGE_SHIFT
27 #define PAGE_CACHE_SIZE PAGE_SIZE
28 #define PAGE_CACHE_MASK PAGE_MASK
29 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
31 #define page_cache_get(x) get_page(x)
32 #define page_cache_alloc() alloc_pages(GFP_HIGHUSER, 0)
33 #define page_cache_free(x) __free_page(x)
34 #define page_cache_release(x) __free_page(x)
37 * From a kernel address, get the "struct page *"
39 #define page_cache_entry(x) virt_to_page(x)
41 extern unsigned int page_hash_bits;
42 #define PAGE_HASH_BITS (page_hash_bits)
43 #define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
45 extern atomic_t page_cache_size; /* # of pages currently in the hash table */
46 extern struct page **page_hash_table;
48 extern void page_cache_init(unsigned long);
51 * We use a power-of-two hash table to avoid a modulus,
52 * and get a reasonable hash by knowing roughly how the
53 * inode pointer and indexes are distributed (ie, we
54 * roughly know which bits are "significant")
56 * For the time being it will work for struct address_space too (most of
57 * them sitting inside the inodes). We might want to change it later.
59 extern inline unsigned long _page_hashfn(struct address_space * mapping, unsigned long index)
61 #define i (((unsigned long) mapping)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1)))
62 #define s(x) ((x)+((x)>>PAGE_HASH_BITS))
63 return s(i+index) & (PAGE_HASH_SIZE-1);
64 #undef i
65 #undef s
68 #define page_hash(mapping,index) (page_hash_table+_page_hashfn(mapping,index))
70 extern struct page * __find_lock_page (struct address_space * mapping,
71 unsigned long index, struct page **hash);
72 extern void lock_page(struct page *page);
73 #define find_lock_page(mapping, index) \
74 __find_lock_page(mapping, index, page_hash(mapping, index))
76 extern void __add_page_to_hash_queue(struct page * page, struct page **p);
78 extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index);
79 extern void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index);
81 extern inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long index)
83 __add_page_to_hash_queue(page, page_hash(inode->i_mapping,index));
86 extern inline void add_page_to_inode_queue(struct address_space *mapping, struct page * page)
88 struct list_head *head = &mapping->pages;
90 if (!mapping->nrpages++) {
91 if (!list_empty(head))
92 BUG();
93 } else {
94 if (list_empty(head))
95 BUG();
97 list_add(&page->list, head);
98 page->mapping = mapping;
101 extern inline void remove_page_from_inode_queue(struct page * page)
103 struct address_space * mapping = page->mapping;
105 mapping->nrpages--;
106 list_del(&page->list);
109 extern void ___wait_on_page(struct page *);
111 extern inline void wait_on_page(struct page * page)
113 if (PageLocked(page))
114 ___wait_on_page(page);
117 extern struct page * grab_cache_page (struct address_space *, unsigned long);
119 typedef int filler_t(void *, struct page*);
121 extern struct page *read_cache_page(struct address_space *, unsigned long,
122 filler_t *, void *);
123 #endif