scsi: lpfc: NVME Target: Receive buffer updates
[linux-2.6/btrfs-unstable.git] / net / ceph / pagelist.c
blob6864007e64fc3236f6d118f8a4a1869255bbba92
1 #include <linux/module.h>
2 #include <linux/gfp.h>
3 #include <linux/slab.h>
4 #include <linux/pagemap.h>
5 #include <linux/highmem.h>
6 #include <linux/ceph/pagelist.h>
8 static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
10 if (pl->mapped_tail) {
11 struct page *page = list_entry(pl->head.prev, struct page, lru);
12 kunmap(page);
13 pl->mapped_tail = NULL;
17 void ceph_pagelist_release(struct ceph_pagelist *pl)
19 if (!atomic_dec_and_test(&pl->refcnt))
20 return;
21 ceph_pagelist_unmap_tail(pl);
22 while (!list_empty(&pl->head)) {
23 struct page *page = list_first_entry(&pl->head, struct page,
24 lru);
25 list_del(&page->lru);
26 __free_page(page);
28 ceph_pagelist_free_reserve(pl);
29 kfree(pl);
31 EXPORT_SYMBOL(ceph_pagelist_release);
33 static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
35 struct page *page;
37 if (!pl->num_pages_free) {
38 page = __page_cache_alloc(GFP_NOFS);
39 } else {
40 page = list_first_entry(&pl->free_list, struct page, lru);
41 list_del(&page->lru);
42 --pl->num_pages_free;
44 if (!page)
45 return -ENOMEM;
46 pl->room += PAGE_SIZE;
47 ceph_pagelist_unmap_tail(pl);
48 list_add_tail(&page->lru, &pl->head);
49 pl->mapped_tail = kmap(page);
50 return 0;
53 int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
55 while (pl->room < len) {
56 size_t bit = pl->room;
57 int ret;
59 memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK),
60 buf, bit);
61 pl->length += bit;
62 pl->room -= bit;
63 buf += bit;
64 len -= bit;
65 ret = ceph_pagelist_addpage(pl);
66 if (ret)
67 return ret;
70 memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, len);
71 pl->length += len;
72 pl->room -= len;
73 return 0;
75 EXPORT_SYMBOL(ceph_pagelist_append);
77 /* Allocate enough pages for a pagelist to append the given amount
78 * of data without without allocating.
79 * Returns: 0 on success, -ENOMEM on error.
81 int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space)
83 if (space <= pl->room)
84 return 0;
85 space -= pl->room;
86 space = (space + PAGE_SIZE - 1) >> PAGE_SHIFT; /* conv to num pages */
88 while (space > pl->num_pages_free) {
89 struct page *page = __page_cache_alloc(GFP_NOFS);
90 if (!page)
91 return -ENOMEM;
92 list_add_tail(&page->lru, &pl->free_list);
93 ++pl->num_pages_free;
95 return 0;
97 EXPORT_SYMBOL(ceph_pagelist_reserve);
99 /* Free any pages that have been preallocated. */
100 int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
102 while (!list_empty(&pl->free_list)) {
103 struct page *page = list_first_entry(&pl->free_list,
104 struct page, lru);
105 list_del(&page->lru);
106 __free_page(page);
107 --pl->num_pages_free;
109 BUG_ON(pl->num_pages_free);
110 return 0;
112 EXPORT_SYMBOL(ceph_pagelist_free_reserve);
114 /* Create a truncation point. */
115 void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
116 struct ceph_pagelist_cursor *c)
118 c->pl = pl;
119 c->page_lru = pl->head.prev;
120 c->room = pl->room;
122 EXPORT_SYMBOL(ceph_pagelist_set_cursor);
124 /* Truncate a pagelist to the given point. Move extra pages to reserve.
125 * This won't sleep.
126 * Returns: 0 on success,
127 * -EINVAL if the pagelist doesn't match the trunc point pagelist
129 int ceph_pagelist_truncate(struct ceph_pagelist *pl,
130 struct ceph_pagelist_cursor *c)
132 struct page *page;
134 if (pl != c->pl)
135 return -EINVAL;
136 ceph_pagelist_unmap_tail(pl);
137 while (pl->head.prev != c->page_lru) {
138 page = list_entry(pl->head.prev, struct page, lru);
139 /* move from pagelist to reserve */
140 list_move_tail(&page->lru, &pl->free_list);
141 ++pl->num_pages_free;
143 pl->room = c->room;
144 if (!list_empty(&pl->head)) {
145 page = list_entry(pl->head.prev, struct page, lru);
146 pl->mapped_tail = kmap(page);
148 return 0;
150 EXPORT_SYMBOL(ceph_pagelist_truncate);