Linux 2.3.7pre1
[davej-history.git] / include / linux / pagemap.h
blob1e0f1265b2bad67c398e2d2e89634e11c43047d5
1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
4 #include <asm/system.h>
6 /*
7 * Page-mapping primitive inline functions
9 * Copyright 1995 Linus Torvalds
12 #include <linux/mm.h>
13 #include <linux/fs.h>
15 static inline unsigned long page_address(struct page * page)
17 return PAGE_OFFSET + ((page - mem_map) << PAGE_SHIFT);
21 * The page cache can done in larger chunks than
22 * one page, because it allows for more efficient
23 * throughput (it can then be mapped into user
24 * space in smaller chunks for same flexibility).
26 * Or rather, it _will_ be done in larger chunks.
28 #define PAGE_CACHE_SHIFT PAGE_SHIFT
29 #define PAGE_CACHE_SIZE PAGE_SIZE
30 #define PAGE_CACHE_MASK PAGE_MASK
31 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
33 #define page_cache_alloc() __get_free_page(GFP_USER)
34 #define page_cache_free(x) free_page(x)
35 #define page_cache_release(x) __free_page(x)
38 * From a kernel address, get the "struct page *"
40 #define page_cache_entry(x) (mem_map + MAP_NR(x))
42 #define PAGE_HASH_BITS 16
43 #define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
45 extern atomic_t page_cache_size; /* # of pages currently in the hash table */
46 extern struct page * page_hash_table[PAGE_HASH_SIZE];
49 * We use a power-of-two hash table to avoid a modulus,
50 * and get a reasonable hash by knowing roughly how the
51 * inode pointer and offsets are distributed (ie, we
52 * roughly know which bits are "significant")
54 static inline unsigned long _page_hashfn(struct inode * inode, unsigned long offset)
56 #define i (((unsigned long) inode)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1)))
57 #define o (offset >> PAGE_SHIFT)
58 #define s(x) ((x)+((x)>>PAGE_HASH_BITS))
59 return s(i+o) & (PAGE_HASH_SIZE-1);
60 #undef i
61 #undef o
62 #undef s
65 #define page_hash(inode,offset) (page_hash_table+_page_hashfn(inode,offset))
67 extern struct page * __find_get_page (struct inode * inode,
68 unsigned long offset, struct page *page);
69 #define find_get_page(inode, offset) \
70 __find_get_page(inode, offset, *page_hash(inode, offset))
71 extern struct page * __find_lock_page (struct inode * inode,
72 unsigned long offset, struct page *page);
73 #define find_lock_page(inode, offset) \
74 __find_lock_page(inode, offset, *page_hash(inode, offset))
76 extern void __add_page_to_hash_queue(struct page * page, struct page **p);
78 extern int add_to_page_cache_unique(struct page * page, struct inode * inode, unsigned long offset, struct page **hash);
80 static inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long offset)
82 __add_page_to_hash_queue(page, page_hash(inode,offset));
85 static inline void remove_page_from_inode_queue(struct page * page)
87 struct inode * inode = page->inode;
89 inode->i_nrpages--;
90 if (inode->i_pages == page)
91 inode->i_pages = page->next;
92 if (page->next)
93 page->next->prev = page->prev;
94 if (page->prev)
95 page->prev->next = page->next;
96 page->next = NULL;
97 page->prev = NULL;
100 static inline void add_page_to_inode_queue(struct inode * inode, struct page * page)
102 struct page **p = &inode->i_pages;
104 inode->i_nrpages++;
105 page->inode = inode;
106 page->prev = NULL;
107 if ((page->next = *p) != NULL)
108 page->next->prev = page;
109 *p = page;
112 extern void ___wait_on_page(struct page *);
114 static inline void wait_on_page(struct page * page)
117 if (PageLocked(page))
118 ___wait_on_page(page);
121 extern void update_vm_cache(struct inode *, unsigned long, const char *, int);
123 #endif