Merge branch 'sched/urgent'
[linux-2.6/x86.git] / net / ceph / pagevec.c
blobcd9c21df87d172fa0c7bfe1af7ee289ab5dd0494
1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/namei.h>
8 #include <linux/writeback.h>
10 #include <linux/ceph/libceph.h>
13 * build a vector of user pages
15 struct page **ceph_get_direct_page_vector(const char __user *data,
16 int num_pages, bool write_page)
18 struct page **pages;
19 int got = 0;
20 int rc = 0;
22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
23 if (!pages)
24 return ERR_PTR(-ENOMEM);
26 down_read(&current->mm->mmap_sem);
27 while (got < num_pages) {
28 rc = get_user_pages(current, current->mm,
29 (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
30 num_pages - got, write_page, 0, pages + got, NULL);
31 if (rc < 0)
32 break;
33 BUG_ON(rc == 0);
34 got += rc;
36 up_read(&current->mm->mmap_sem);
37 if (rc < 0)
38 goto fail;
39 return pages;
41 fail:
42 ceph_put_page_vector(pages, got, false);
43 return ERR_PTR(rc);
45 EXPORT_SYMBOL(ceph_get_direct_page_vector);
47 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
49 int i;
51 for (i = 0; i < num_pages; i++) {
52 if (dirty)
53 set_page_dirty_lock(pages[i]);
54 put_page(pages[i]);
56 kfree(pages);
58 EXPORT_SYMBOL(ceph_put_page_vector);
60 void ceph_release_page_vector(struct page **pages, int num_pages)
62 int i;
64 for (i = 0; i < num_pages; i++)
65 __free_pages(pages[i], 0);
66 kfree(pages);
68 EXPORT_SYMBOL(ceph_release_page_vector);
71 * allocate a vector new pages
73 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
75 struct page **pages;
76 int i;
78 pages = kmalloc(sizeof(*pages) * num_pages, flags);
79 if (!pages)
80 return ERR_PTR(-ENOMEM);
81 for (i = 0; i < num_pages; i++) {
82 pages[i] = __page_cache_alloc(flags);
83 if (pages[i] == NULL) {
84 ceph_release_page_vector(pages, i);
85 return ERR_PTR(-ENOMEM);
88 return pages;
90 EXPORT_SYMBOL(ceph_alloc_page_vector);
93 * copy user data into a page vector
95 int ceph_copy_user_to_page_vector(struct page **pages,
96 const char __user *data,
97 loff_t off, size_t len)
99 int i = 0;
100 int po = off & ~PAGE_CACHE_MASK;
101 int left = len;
102 int l, bad;
104 while (left > 0) {
105 l = min_t(int, PAGE_CACHE_SIZE-po, left);
106 bad = copy_from_user(page_address(pages[i]) + po, data, l);
107 if (bad == l)
108 return -EFAULT;
109 data += l - bad;
110 left -= l - bad;
111 po += l - bad;
112 if (po == PAGE_CACHE_SIZE) {
113 po = 0;
114 i++;
117 return len;
119 EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
121 int ceph_copy_to_page_vector(struct page **pages,
122 const char *data,
123 loff_t off, size_t len)
125 int i = 0;
126 size_t po = off & ~PAGE_CACHE_MASK;
127 size_t left = len;
128 size_t l;
130 while (left > 0) {
131 l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
132 memcpy(page_address(pages[i]) + po, data, l);
133 data += l;
134 left -= l;
135 po += l;
136 if (po == PAGE_CACHE_SIZE) {
137 po = 0;
138 i++;
141 return len;
143 EXPORT_SYMBOL(ceph_copy_to_page_vector);
145 int ceph_copy_from_page_vector(struct page **pages,
146 char *data,
147 loff_t off, size_t len)
149 int i = 0;
150 size_t po = off & ~PAGE_CACHE_MASK;
151 size_t left = len;
152 size_t l;
154 while (left > 0) {
155 l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
156 memcpy(data, page_address(pages[i]) + po, l);
157 data += l;
158 left -= l;
159 po += l;
160 if (po == PAGE_CACHE_SIZE) {
161 po = 0;
162 i++;
165 return len;
167 EXPORT_SYMBOL(ceph_copy_from_page_vector);
170 * copy user data from a page vector into a user pointer
172 int ceph_copy_page_vector_to_user(struct page **pages,
173 char __user *data,
174 loff_t off, size_t len)
176 int i = 0;
177 int po = off & ~PAGE_CACHE_MASK;
178 int left = len;
179 int l, bad;
181 while (left > 0) {
182 l = min_t(int, left, PAGE_CACHE_SIZE-po);
183 bad = copy_to_user(data, page_address(pages[i]) + po, l);
184 if (bad == l)
185 return -EFAULT;
186 data += l - bad;
187 left -= l - bad;
188 if (po) {
189 po += l - bad;
190 if (po == PAGE_CACHE_SIZE)
191 po = 0;
193 i++;
195 return len;
197 EXPORT_SYMBOL(ceph_copy_page_vector_to_user);
200 * Zero an extent within a page vector. Offset is relative to the
201 * start of the first page.
203 void ceph_zero_page_vector_range(int off, int len, struct page **pages)
205 int i = off >> PAGE_CACHE_SHIFT;
207 off &= ~PAGE_CACHE_MASK;
209 dout("zero_page_vector_page %u~%u\n", off, len);
211 /* leading partial page? */
212 if (off) {
213 int end = min((int)PAGE_CACHE_SIZE, off + len);
214 dout("zeroing %d %p head from %d\n", i, pages[i],
215 (int)off);
216 zero_user_segment(pages[i], off, end);
217 len -= (end - off);
218 i++;
220 while (len >= PAGE_CACHE_SIZE) {
221 dout("zeroing %d %p len=%d\n", i, pages[i], len);
222 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
223 len -= PAGE_CACHE_SIZE;
224 i++;
226 /* trailing partial page? */
227 if (len) {
228 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
229 zero_user_segment(pages[i], 0, len);
232 EXPORT_SYMBOL(ceph_zero_page_vector_range);