Merge tag 'gpio-v3.13-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-2.6.git] / drivers / md / bcache / movinggc.c
blob7c1275e66025b691ec8ee4896448d46e28a2c5d9
1 /*
2 * Moving/copying garbage collector
4 * Copyright 2012 Google, Inc.
5 */
7 #include "bcache.h"
8 #include "btree.h"
9 #include "debug.h"
10 #include "request.h"
12 #include <trace/events/bcache.h>
14 struct moving_io {
15 struct closure cl;
16 struct keybuf_key *w;
17 struct data_insert_op op;
18 struct bbio bio;
21 static bool moving_pred(struct keybuf *buf, struct bkey *k)
23 struct cache_set *c = container_of(buf, struct cache_set,
24 moving_gc_keys);
25 unsigned i;
27 for (i = 0; i < KEY_PTRS(k); i++) {
28 struct cache *ca = PTR_CACHE(c, k, i);
29 struct bucket *g = PTR_BUCKET(c, k, i);
31 if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
32 return true;
35 return false;
38 /* Moving GC - IO loop */
40 static void moving_io_destructor(struct closure *cl)
42 struct moving_io *io = container_of(cl, struct moving_io, cl);
43 kfree(io);
46 static void write_moving_finish(struct closure *cl)
48 struct moving_io *io = container_of(cl, struct moving_io, cl);
49 struct bio *bio = &io->bio.bio;
50 struct bio_vec *bv;
51 int i;
53 bio_for_each_segment_all(bv, bio, i)
54 __free_page(bv->bv_page);
56 if (io->op.replace_collision)
57 trace_bcache_gc_copy_collision(&io->w->key);
59 bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
61 up(&io->op.c->moving_in_flight);
63 closure_return_with_destructor(cl, moving_io_destructor);
66 static void read_moving_endio(struct bio *bio, int error)
68 struct moving_io *io = container_of(bio->bi_private,
69 struct moving_io, cl);
71 if (error)
72 io->op.error = error;
74 bch_bbio_endio(io->op.c, bio, error, "reading data to move");
77 static void moving_init(struct moving_io *io)
79 struct bio *bio = &io->bio.bio;
81 bio_init(bio);
82 bio_get(bio);
83 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
85 bio->bi_size = KEY_SIZE(&io->w->key) << 9;
86 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
87 PAGE_SECTORS);
88 bio->bi_private = &io->cl;
89 bio->bi_io_vec = bio->bi_inline_vecs;
90 bch_bio_map(bio, NULL);
93 static void write_moving(struct closure *cl)
95 struct moving_io *io = container_of(cl, struct moving_io, cl);
96 struct data_insert_op *op = &io->op;
98 if (!op->error) {
99 moving_init(io);
101 io->bio.bio.bi_sector = KEY_START(&io->w->key);
102 op->write_prio = 1;
103 op->bio = &io->bio.bio;
105 op->writeback = KEY_DIRTY(&io->w->key);
106 op->csum = KEY_CSUM(&io->w->key);
108 bkey_copy(&op->replace_key, &io->w->key);
109 op->replace = true;
111 closure_call(&op->cl, bch_data_insert, NULL, cl);
114 continue_at(cl, write_moving_finish, system_wq);
117 static void read_moving_submit(struct closure *cl)
119 struct moving_io *io = container_of(cl, struct moving_io, cl);
120 struct bio *bio = &io->bio.bio;
122 bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
124 continue_at(cl, write_moving, system_wq);
127 static void read_moving(struct cache_set *c)
129 struct keybuf_key *w;
130 struct moving_io *io;
131 struct bio *bio;
132 struct closure cl;
134 closure_init_stack(&cl);
136 /* XXX: if we error, background writeback could stall indefinitely */
138 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
139 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
140 &MAX_KEY, moving_pred);
141 if (!w)
142 break;
144 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
145 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
146 GFP_KERNEL);
147 if (!io)
148 goto err;
150 w->private = io;
151 io->w = w;
152 io->op.inode = KEY_INODE(&w->key);
153 io->op.c = c;
155 moving_init(io);
156 bio = &io->bio.bio;
158 bio->bi_rw = READ;
159 bio->bi_end_io = read_moving_endio;
161 if (bio_alloc_pages(bio, GFP_KERNEL))
162 goto err;
164 trace_bcache_gc_copy(&w->key);
166 down(&c->moving_in_flight);
167 closure_call(&io->cl, read_moving_submit, NULL, &cl);
170 if (0) {
171 err: if (!IS_ERR_OR_NULL(w->private))
172 kfree(w->private);
174 bch_keybuf_del(&c->moving_gc_keys, w);
177 closure_sync(&cl);
180 static bool bucket_cmp(struct bucket *l, struct bucket *r)
182 return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
185 static unsigned bucket_heap_top(struct cache *ca)
187 return GC_SECTORS_USED(heap_peek(&ca->heap));
190 void bch_moving_gc(struct cache_set *c)
192 struct cache *ca;
193 struct bucket *b;
194 unsigned i;
196 if (!c->copy_gc_enabled)
197 return;
199 mutex_lock(&c->bucket_lock);
201 for_each_cache(ca, c, i) {
202 unsigned sectors_to_move = 0;
203 unsigned reserve_sectors = ca->sb.bucket_size *
204 min(fifo_used(&ca->free), ca->free.size / 2);
206 ca->heap.used = 0;
208 for_each_bucket(b, ca) {
209 if (!GC_SECTORS_USED(b))
210 continue;
212 if (!heap_full(&ca->heap)) {
213 sectors_to_move += GC_SECTORS_USED(b);
214 heap_add(&ca->heap, b, bucket_cmp);
215 } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
216 sectors_to_move -= bucket_heap_top(ca);
217 sectors_to_move += GC_SECTORS_USED(b);
219 ca->heap.data[0] = b;
220 heap_sift(&ca->heap, 0, bucket_cmp);
224 while (sectors_to_move > reserve_sectors) {
225 heap_pop(&ca->heap, b, bucket_cmp);
226 sectors_to_move -= GC_SECTORS_USED(b);
229 ca->gc_move_threshold = bucket_heap_top(ca);
231 pr_debug("threshold %u", ca->gc_move_threshold);
234 mutex_unlock(&c->bucket_lock);
236 c->moving_gc_keys.last_scanned = ZERO_KEY;
238 read_moving(c);
241 void bch_moving_init_cache_set(struct cache_set *c)
243 bch_keybuf_init(&c->moving_gc_keys);
244 sema_init(&c->moving_in_flight, 64);