rbtree: add __rb_change_child() helper function
[linux-2.6.git] / net / ceph / osdmap.c
blob5433fb0eb3c68596b375388b3df74a03c6e8441c
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/slab.h>
6 #include <asm/div64.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
14 char *ceph_osdmap_state_str(char *str, int len, int state)
16 int flag = 0;
18 if (!len)
19 goto done;
21 *str = '\0';
22 if (state) {
23 if (state & CEPH_OSD_EXISTS) {
24 snprintf(str, len, "exists");
25 flag = 1;
27 if (state & CEPH_OSD_UP) {
28 snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""),
29 "up");
30 flag = 1;
32 } else {
33 snprintf(str, len, "doesn't exist");
35 done:
36 return str;
39 /* maps */
41 static int calc_bits_of(unsigned int t)
43 int b = 0;
44 while (t) {
45 t = t >> 1;
46 b++;
48 return b;
52 * the foo_mask is the smallest value 2^n-1 that is >= foo.
54 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
56 pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
57 pi->pgp_num_mask =
58 (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
59 pi->lpg_num_mask =
60 (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
61 pi->lpgp_num_mask =
62 (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
66 * decode crush map
68 static int crush_decode_uniform_bucket(void **p, void *end,
69 struct crush_bucket_uniform *b)
71 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
72 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
73 b->item_weight = ceph_decode_32(p);
74 return 0;
75 bad:
76 return -EINVAL;
79 static int crush_decode_list_bucket(void **p, void *end,
80 struct crush_bucket_list *b)
82 int j;
83 dout("crush_decode_list_bucket %p to %p\n", *p, end);
84 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
85 if (b->item_weights == NULL)
86 return -ENOMEM;
87 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
88 if (b->sum_weights == NULL)
89 return -ENOMEM;
90 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
91 for (j = 0; j < b->h.size; j++) {
92 b->item_weights[j] = ceph_decode_32(p);
93 b->sum_weights[j] = ceph_decode_32(p);
95 return 0;
96 bad:
97 return -EINVAL;
100 static int crush_decode_tree_bucket(void **p, void *end,
101 struct crush_bucket_tree *b)
103 int j;
104 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
105 ceph_decode_32_safe(p, end, b->num_nodes, bad);
106 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
107 if (b->node_weights == NULL)
108 return -ENOMEM;
109 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
110 for (j = 0; j < b->num_nodes; j++)
111 b->node_weights[j] = ceph_decode_32(p);
112 return 0;
113 bad:
114 return -EINVAL;
117 static int crush_decode_straw_bucket(void **p, void *end,
118 struct crush_bucket_straw *b)
120 int j;
121 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
122 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
123 if (b->item_weights == NULL)
124 return -ENOMEM;
125 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
126 if (b->straws == NULL)
127 return -ENOMEM;
128 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
129 for (j = 0; j < b->h.size; j++) {
130 b->item_weights[j] = ceph_decode_32(p);
131 b->straws[j] = ceph_decode_32(p);
133 return 0;
134 bad:
135 return -EINVAL;
138 static int skip_name_map(void **p, void *end)
140 int len;
141 ceph_decode_32_safe(p, end, len ,bad);
142 while (len--) {
143 int strlen;
144 *p += sizeof(u32);
145 ceph_decode_32_safe(p, end, strlen, bad);
146 *p += strlen;
148 return 0;
149 bad:
150 return -EINVAL;
153 static struct crush_map *crush_decode(void *pbyval, void *end)
155 struct crush_map *c;
156 int err = -EINVAL;
157 int i, j;
158 void **p = &pbyval;
159 void *start = pbyval;
160 u32 magic;
161 u32 num_name_maps;
163 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
165 c = kzalloc(sizeof(*c), GFP_NOFS);
166 if (c == NULL)
167 return ERR_PTR(-ENOMEM);
169 /* set tunables to default values */
170 c->choose_local_tries = 2;
171 c->choose_local_fallback_tries = 5;
172 c->choose_total_tries = 19;
174 ceph_decode_need(p, end, 4*sizeof(u32), bad);
175 magic = ceph_decode_32(p);
176 if (magic != CRUSH_MAGIC) {
177 pr_err("crush_decode magic %x != current %x\n",
178 (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
179 goto bad;
181 c->max_buckets = ceph_decode_32(p);
182 c->max_rules = ceph_decode_32(p);
183 c->max_devices = ceph_decode_32(p);
185 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
186 if (c->buckets == NULL)
187 goto badmem;
188 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
189 if (c->rules == NULL)
190 goto badmem;
192 /* buckets */
193 for (i = 0; i < c->max_buckets; i++) {
194 int size = 0;
195 u32 alg;
196 struct crush_bucket *b;
198 ceph_decode_32_safe(p, end, alg, bad);
199 if (alg == 0) {
200 c->buckets[i] = NULL;
201 continue;
203 dout("crush_decode bucket %d off %x %p to %p\n",
204 i, (int)(*p-start), *p, end);
206 switch (alg) {
207 case CRUSH_BUCKET_UNIFORM:
208 size = sizeof(struct crush_bucket_uniform);
209 break;
210 case CRUSH_BUCKET_LIST:
211 size = sizeof(struct crush_bucket_list);
212 break;
213 case CRUSH_BUCKET_TREE:
214 size = sizeof(struct crush_bucket_tree);
215 break;
216 case CRUSH_BUCKET_STRAW:
217 size = sizeof(struct crush_bucket_straw);
218 break;
219 default:
220 err = -EINVAL;
221 goto bad;
223 BUG_ON(size == 0);
224 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
225 if (b == NULL)
226 goto badmem;
228 ceph_decode_need(p, end, 4*sizeof(u32), bad);
229 b->id = ceph_decode_32(p);
230 b->type = ceph_decode_16(p);
231 b->alg = ceph_decode_8(p);
232 b->hash = ceph_decode_8(p);
233 b->weight = ceph_decode_32(p);
234 b->size = ceph_decode_32(p);
236 dout("crush_decode bucket size %d off %x %p to %p\n",
237 b->size, (int)(*p-start), *p, end);
239 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
240 if (b->items == NULL)
241 goto badmem;
242 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
243 if (b->perm == NULL)
244 goto badmem;
245 b->perm_n = 0;
247 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
248 for (j = 0; j < b->size; j++)
249 b->items[j] = ceph_decode_32(p);
251 switch (b->alg) {
252 case CRUSH_BUCKET_UNIFORM:
253 err = crush_decode_uniform_bucket(p, end,
254 (struct crush_bucket_uniform *)b);
255 if (err < 0)
256 goto bad;
257 break;
258 case CRUSH_BUCKET_LIST:
259 err = crush_decode_list_bucket(p, end,
260 (struct crush_bucket_list *)b);
261 if (err < 0)
262 goto bad;
263 break;
264 case CRUSH_BUCKET_TREE:
265 err = crush_decode_tree_bucket(p, end,
266 (struct crush_bucket_tree *)b);
267 if (err < 0)
268 goto bad;
269 break;
270 case CRUSH_BUCKET_STRAW:
271 err = crush_decode_straw_bucket(p, end,
272 (struct crush_bucket_straw *)b);
273 if (err < 0)
274 goto bad;
275 break;
279 /* rules */
280 dout("rule vec is %p\n", c->rules);
281 for (i = 0; i < c->max_rules; i++) {
282 u32 yes;
283 struct crush_rule *r;
285 ceph_decode_32_safe(p, end, yes, bad);
286 if (!yes) {
287 dout("crush_decode NO rule %d off %x %p to %p\n",
288 i, (int)(*p-start), *p, end);
289 c->rules[i] = NULL;
290 continue;
293 dout("crush_decode rule %d off %x %p to %p\n",
294 i, (int)(*p-start), *p, end);
296 /* len */
297 ceph_decode_32_safe(p, end, yes, bad);
298 #if BITS_PER_LONG == 32
299 err = -EINVAL;
300 if (yes > (ULONG_MAX - sizeof(*r))
301 / sizeof(struct crush_rule_step))
302 goto bad;
303 #endif
304 r = c->rules[i] = kmalloc(sizeof(*r) +
305 yes*sizeof(struct crush_rule_step),
306 GFP_NOFS);
307 if (r == NULL)
308 goto badmem;
309 dout(" rule %d is at %p\n", i, r);
310 r->len = yes;
311 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
312 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
313 for (j = 0; j < r->len; j++) {
314 r->steps[j].op = ceph_decode_32(p);
315 r->steps[j].arg1 = ceph_decode_32(p);
316 r->steps[j].arg2 = ceph_decode_32(p);
320 /* ignore trailing name maps. */
321 for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) {
322 err = skip_name_map(p, end);
323 if (err < 0)
324 goto done;
327 /* tunables */
328 ceph_decode_need(p, end, 3*sizeof(u32), done);
329 c->choose_local_tries = ceph_decode_32(p);
330 c->choose_local_fallback_tries = ceph_decode_32(p);
331 c->choose_total_tries = ceph_decode_32(p);
332 dout("crush decode tunable choose_local_tries = %d",
333 c->choose_local_tries);
334 dout("crush decode tunable choose_local_fallback_tries = %d",
335 c->choose_local_fallback_tries);
336 dout("crush decode tunable choose_total_tries = %d",
337 c->choose_total_tries);
339 done:
340 dout("crush_decode success\n");
341 return c;
343 badmem:
344 err = -ENOMEM;
345 bad:
346 dout("crush_decode fail %d\n", err);
347 crush_destroy(c);
348 return ERR_PTR(err);
352 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
353 * to a set of osds)
355 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
357 u64 a = *(u64 *)&l;
358 u64 b = *(u64 *)&r;
360 if (a < b)
361 return -1;
362 if (a > b)
363 return 1;
364 return 0;
367 static int __insert_pg_mapping(struct ceph_pg_mapping *new,
368 struct rb_root *root)
370 struct rb_node **p = &root->rb_node;
371 struct rb_node *parent = NULL;
372 struct ceph_pg_mapping *pg = NULL;
373 int c;
375 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new);
376 while (*p) {
377 parent = *p;
378 pg = rb_entry(parent, struct ceph_pg_mapping, node);
379 c = pgid_cmp(new->pgid, pg->pgid);
380 if (c < 0)
381 p = &(*p)->rb_left;
382 else if (c > 0)
383 p = &(*p)->rb_right;
384 else
385 return -EEXIST;
388 rb_link_node(&new->node, parent, p);
389 rb_insert_color(&new->node, root);
390 return 0;
393 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
394 struct ceph_pg pgid)
396 struct rb_node *n = root->rb_node;
397 struct ceph_pg_mapping *pg;
398 int c;
400 while (n) {
401 pg = rb_entry(n, struct ceph_pg_mapping, node);
402 c = pgid_cmp(pgid, pg->pgid);
403 if (c < 0) {
404 n = n->rb_left;
405 } else if (c > 0) {
406 n = n->rb_right;
407 } else {
408 dout("__lookup_pg_mapping %llx got %p\n",
409 *(u64 *)&pgid, pg);
410 return pg;
413 return NULL;
416 static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
418 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
420 if (pg) {
421 dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg);
422 rb_erase(&pg->node, root);
423 kfree(pg);
424 return 0;
426 dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid);
427 return -ENOENT;
431 * rbtree of pg pool info
433 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
435 struct rb_node **p = &root->rb_node;
436 struct rb_node *parent = NULL;
437 struct ceph_pg_pool_info *pi = NULL;
439 while (*p) {
440 parent = *p;
441 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
442 if (new->id < pi->id)
443 p = &(*p)->rb_left;
444 else if (new->id > pi->id)
445 p = &(*p)->rb_right;
446 else
447 return -EEXIST;
450 rb_link_node(&new->node, parent, p);
451 rb_insert_color(&new->node, root);
452 return 0;
455 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
457 struct ceph_pg_pool_info *pi;
458 struct rb_node *n = root->rb_node;
460 while (n) {
461 pi = rb_entry(n, struct ceph_pg_pool_info, node);
462 if (id < pi->id)
463 n = n->rb_left;
464 else if (id > pi->id)
465 n = n->rb_right;
466 else
467 return pi;
469 return NULL;
472 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
474 struct rb_node *rbp;
476 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
477 struct ceph_pg_pool_info *pi =
478 rb_entry(rbp, struct ceph_pg_pool_info, node);
479 if (pi->name && strcmp(pi->name, name) == 0)
480 return pi->id;
482 return -ENOENT;
484 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
486 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
488 rb_erase(&pi->node, root);
489 kfree(pi->name);
490 kfree(pi);
493 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
495 unsigned int n, m;
497 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
498 calc_pg_masks(pi);
500 /* num_snaps * snap_info_t */
501 n = le32_to_cpu(pi->v.num_snaps);
502 while (n--) {
503 ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) +
504 sizeof(struct ceph_timespec), bad);
505 *p += sizeof(u64) + /* key */
506 1 + sizeof(u64) + /* u8, snapid */
507 sizeof(struct ceph_timespec);
508 m = ceph_decode_32(p); /* snap name */
509 *p += m;
512 *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
513 return 0;
515 bad:
516 return -EINVAL;
519 static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
521 struct ceph_pg_pool_info *pi;
522 u32 num, len, pool;
524 ceph_decode_32_safe(p, end, num, bad);
525 dout(" %d pool names\n", num);
526 while (num--) {
527 ceph_decode_32_safe(p, end, pool, bad);
528 ceph_decode_32_safe(p, end, len, bad);
529 dout(" pool %d len %d\n", pool, len);
530 ceph_decode_need(p, end, len, bad);
531 pi = __lookup_pg_pool(&map->pg_pools, pool);
532 if (pi) {
533 char *name = kstrndup(*p, len, GFP_NOFS);
535 if (!name)
536 return -ENOMEM;
537 kfree(pi->name);
538 pi->name = name;
539 dout(" name is %s\n", pi->name);
541 *p += len;
543 return 0;
545 bad:
546 return -EINVAL;
550 * osd map
552 void ceph_osdmap_destroy(struct ceph_osdmap *map)
554 dout("osdmap_destroy %p\n", map);
555 if (map->crush)
556 crush_destroy(map->crush);
557 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
558 struct ceph_pg_mapping *pg =
559 rb_entry(rb_first(&map->pg_temp),
560 struct ceph_pg_mapping, node);
561 rb_erase(&pg->node, &map->pg_temp);
562 kfree(pg);
564 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
565 struct ceph_pg_pool_info *pi =
566 rb_entry(rb_first(&map->pg_pools),
567 struct ceph_pg_pool_info, node);
568 __remove_pg_pool(&map->pg_pools, pi);
570 kfree(map->osd_state);
571 kfree(map->osd_weight);
572 kfree(map->osd_addr);
573 kfree(map);
577 * adjust max osd value. reallocate arrays.
579 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
581 u8 *state;
582 struct ceph_entity_addr *addr;
583 u32 *weight;
585 state = kcalloc(max, sizeof(*state), GFP_NOFS);
586 addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
587 weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
588 if (state == NULL || addr == NULL || weight == NULL) {
589 kfree(state);
590 kfree(addr);
591 kfree(weight);
592 return -ENOMEM;
595 /* copy old? */
596 if (map->osd_state) {
597 memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
598 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
599 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
600 kfree(map->osd_state);
601 kfree(map->osd_addr);
602 kfree(map->osd_weight);
605 map->osd_state = state;
606 map->osd_weight = weight;
607 map->osd_addr = addr;
608 map->max_osd = max;
609 return 0;
613 * decode a full map.
615 struct ceph_osdmap *osdmap_decode(void **p, void *end)
617 struct ceph_osdmap *map;
618 u16 version;
619 u32 len, max, i;
620 u8 ev;
621 int err = -EINVAL;
622 void *start = *p;
623 struct ceph_pg_pool_info *pi;
625 dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p));
627 map = kzalloc(sizeof(*map), GFP_NOFS);
628 if (map == NULL)
629 return ERR_PTR(-ENOMEM);
630 map->pg_temp = RB_ROOT;
632 ceph_decode_16_safe(p, end, version, bad);
633 if (version > CEPH_OSDMAP_VERSION) {
634 pr_warning("got unknown v %d > %d of osdmap\n", version,
635 CEPH_OSDMAP_VERSION);
636 goto bad;
639 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad);
640 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
641 map->epoch = ceph_decode_32(p);
642 ceph_decode_copy(p, &map->created, sizeof(map->created));
643 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
645 ceph_decode_32_safe(p, end, max, bad);
646 while (max--) {
647 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
648 pi = kzalloc(sizeof(*pi), GFP_NOFS);
649 if (!pi)
650 goto bad;
651 pi->id = ceph_decode_32(p);
652 ev = ceph_decode_8(p); /* encoding version */
653 if (ev > CEPH_PG_POOL_VERSION) {
654 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
655 ev, CEPH_PG_POOL_VERSION);
656 kfree(pi);
657 goto bad;
659 err = __decode_pool(p, end, pi);
660 if (err < 0) {
661 kfree(pi);
662 goto bad;
664 __insert_pg_pool(&map->pg_pools, pi);
667 if (version >= 5 && __decode_pool_names(p, end, map) < 0)
668 goto bad;
670 ceph_decode_32_safe(p, end, map->pool_max, bad);
672 ceph_decode_32_safe(p, end, map->flags, bad);
674 max = ceph_decode_32(p);
676 /* (re)alloc osd arrays */
677 err = osdmap_set_max_osd(map, max);
678 if (err < 0)
679 goto bad;
680 dout("osdmap_decode max_osd = %d\n", map->max_osd);
682 /* osds */
683 err = -EINVAL;
684 ceph_decode_need(p, end, 3*sizeof(u32) +
685 map->max_osd*(1 + sizeof(*map->osd_weight) +
686 sizeof(*map->osd_addr)), bad);
687 *p += 4; /* skip length field (should match max) */
688 ceph_decode_copy(p, map->osd_state, map->max_osd);
690 *p += 4; /* skip length field (should match max) */
691 for (i = 0; i < map->max_osd; i++)
692 map->osd_weight[i] = ceph_decode_32(p);
694 *p += 4; /* skip length field (should match max) */
695 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
696 for (i = 0; i < map->max_osd; i++)
697 ceph_decode_addr(&map->osd_addr[i]);
699 /* pg_temp */
700 ceph_decode_32_safe(p, end, len, bad);
701 for (i = 0; i < len; i++) {
702 int n, j;
703 struct ceph_pg pgid;
704 struct ceph_pg_mapping *pg;
706 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
707 ceph_decode_copy(p, &pgid, sizeof(pgid));
708 n = ceph_decode_32(p);
709 err = -EINVAL;
710 if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
711 goto bad;
712 ceph_decode_need(p, end, n * sizeof(u32), bad);
713 err = -ENOMEM;
714 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
715 if (!pg)
716 goto bad;
717 pg->pgid = pgid;
718 pg->len = n;
719 for (j = 0; j < n; j++)
720 pg->osds[j] = ceph_decode_32(p);
722 err = __insert_pg_mapping(pg, &map->pg_temp);
723 if (err)
724 goto bad;
725 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len);
728 /* crush */
729 ceph_decode_32_safe(p, end, len, bad);
730 dout("osdmap_decode crush len %d from off 0x%x\n", len,
731 (int)(*p - start));
732 ceph_decode_need(p, end, len, bad);
733 map->crush = crush_decode(*p, end);
734 *p += len;
735 if (IS_ERR(map->crush)) {
736 err = PTR_ERR(map->crush);
737 map->crush = NULL;
738 goto bad;
741 /* ignore the rest of the map */
742 *p = end;
744 dout("osdmap_decode done %p %p\n", *p, end);
745 return map;
747 bad:
748 dout("osdmap_decode fail\n");
749 ceph_osdmap_destroy(map);
750 return ERR_PTR(err);
754 * decode and apply an incremental map update.
756 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
757 struct ceph_osdmap *map,
758 struct ceph_messenger *msgr)
760 struct crush_map *newcrush = NULL;
761 struct ceph_fsid fsid;
762 u32 epoch = 0;
763 struct ceph_timespec modified;
764 u32 len, pool;
765 __s32 new_pool_max, new_flags, max;
766 void *start = *p;
767 int err = -EINVAL;
768 u16 version;
770 ceph_decode_16_safe(p, end, version, bad);
771 if (version > CEPH_OSDMAP_INC_VERSION) {
772 pr_warning("got unknown v %d > %d of inc osdmap\n", version,
773 CEPH_OSDMAP_INC_VERSION);
774 goto bad;
777 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
778 bad);
779 ceph_decode_copy(p, &fsid, sizeof(fsid));
780 epoch = ceph_decode_32(p);
781 BUG_ON(epoch != map->epoch+1);
782 ceph_decode_copy(p, &modified, sizeof(modified));
783 new_pool_max = ceph_decode_32(p);
784 new_flags = ceph_decode_32(p);
786 /* full map? */
787 ceph_decode_32_safe(p, end, len, bad);
788 if (len > 0) {
789 dout("apply_incremental full map len %d, %p to %p\n",
790 len, *p, end);
791 return osdmap_decode(p, min(*p+len, end));
794 /* new crush? */
795 ceph_decode_32_safe(p, end, len, bad);
796 if (len > 0) {
797 dout("apply_incremental new crush map len %d, %p to %p\n",
798 len, *p, end);
799 newcrush = crush_decode(*p, min(*p+len, end));
800 if (IS_ERR(newcrush))
801 return ERR_CAST(newcrush);
802 *p += len;
805 /* new flags? */
806 if (new_flags >= 0)
807 map->flags = new_flags;
808 if (new_pool_max >= 0)
809 map->pool_max = new_pool_max;
811 ceph_decode_need(p, end, 5*sizeof(u32), bad);
813 /* new max? */
814 max = ceph_decode_32(p);
815 if (max >= 0) {
816 err = osdmap_set_max_osd(map, max);
817 if (err < 0)
818 goto bad;
821 map->epoch++;
822 map->modified = modified;
823 if (newcrush) {
824 if (map->crush)
825 crush_destroy(map->crush);
826 map->crush = newcrush;
827 newcrush = NULL;
830 /* new_pool */
831 ceph_decode_32_safe(p, end, len, bad);
832 while (len--) {
833 __u8 ev;
834 struct ceph_pg_pool_info *pi;
836 ceph_decode_32_safe(p, end, pool, bad);
837 ceph_decode_need(p, end, 1 + sizeof(pi->v), bad);
838 ev = ceph_decode_8(p); /* encoding version */
839 if (ev > CEPH_PG_POOL_VERSION) {
840 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
841 ev, CEPH_PG_POOL_VERSION);
842 goto bad;
844 pi = __lookup_pg_pool(&map->pg_pools, pool);
845 if (!pi) {
846 pi = kzalloc(sizeof(*pi), GFP_NOFS);
847 if (!pi) {
848 err = -ENOMEM;
849 goto bad;
851 pi->id = pool;
852 __insert_pg_pool(&map->pg_pools, pi);
854 err = __decode_pool(p, end, pi);
855 if (err < 0)
856 goto bad;
858 if (version >= 5 && __decode_pool_names(p, end, map) < 0)
859 goto bad;
861 /* old_pool */
862 ceph_decode_32_safe(p, end, len, bad);
863 while (len--) {
864 struct ceph_pg_pool_info *pi;
866 ceph_decode_32_safe(p, end, pool, bad);
867 pi = __lookup_pg_pool(&map->pg_pools, pool);
868 if (pi)
869 __remove_pg_pool(&map->pg_pools, pi);
872 /* new_up */
873 err = -EINVAL;
874 ceph_decode_32_safe(p, end, len, bad);
875 while (len--) {
876 u32 osd;
877 struct ceph_entity_addr addr;
878 ceph_decode_32_safe(p, end, osd, bad);
879 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
880 ceph_decode_addr(&addr);
881 pr_info("osd%d up\n", osd);
882 BUG_ON(osd >= map->max_osd);
883 map->osd_state[osd] |= CEPH_OSD_UP;
884 map->osd_addr[osd] = addr;
887 /* new_state */
888 ceph_decode_32_safe(p, end, len, bad);
889 while (len--) {
890 u32 osd;
891 u8 xorstate;
892 ceph_decode_32_safe(p, end, osd, bad);
893 xorstate = **(u8 **)p;
894 (*p)++; /* clean flag */
895 if (xorstate == 0)
896 xorstate = CEPH_OSD_UP;
897 if (xorstate & CEPH_OSD_UP)
898 pr_info("osd%d down\n", osd);
899 if (osd < map->max_osd)
900 map->osd_state[osd] ^= xorstate;
903 /* new_weight */
904 ceph_decode_32_safe(p, end, len, bad);
905 while (len--) {
906 u32 osd, off;
907 ceph_decode_need(p, end, sizeof(u32)*2, bad);
908 osd = ceph_decode_32(p);
909 off = ceph_decode_32(p);
910 pr_info("osd%d weight 0x%x %s\n", osd, off,
911 off == CEPH_OSD_IN ? "(in)" :
912 (off == CEPH_OSD_OUT ? "(out)" : ""));
913 if (osd < map->max_osd)
914 map->osd_weight[osd] = off;
917 /* new_pg_temp */
918 ceph_decode_32_safe(p, end, len, bad);
919 while (len--) {
920 struct ceph_pg_mapping *pg;
921 int j;
922 struct ceph_pg pgid;
923 u32 pglen;
924 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
925 ceph_decode_copy(p, &pgid, sizeof(pgid));
926 pglen = ceph_decode_32(p);
928 if (pglen) {
929 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
931 /* removing existing (if any) */
932 (void) __remove_pg_mapping(&map->pg_temp, pgid);
934 /* insert */
935 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) {
936 err = -EINVAL;
937 goto bad;
939 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
940 if (!pg) {
941 err = -ENOMEM;
942 goto bad;
944 pg->pgid = pgid;
945 pg->len = pglen;
946 for (j = 0; j < pglen; j++)
947 pg->osds[j] = ceph_decode_32(p);
948 err = __insert_pg_mapping(pg, &map->pg_temp);
949 if (err) {
950 kfree(pg);
951 goto bad;
953 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
954 pglen);
955 } else {
956 /* remove */
957 __remove_pg_mapping(&map->pg_temp, pgid);
961 /* ignore the rest */
962 *p = end;
963 return map;
965 bad:
966 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
967 epoch, (int)(*p - start), *p, start, end);
968 print_hex_dump(KERN_DEBUG, "osdmap: ",
969 DUMP_PREFIX_OFFSET, 16, 1,
970 start, end - start, true);
971 if (newcrush)
972 crush_destroy(newcrush);
973 return ERR_PTR(err);
980 * calculate file layout from given offset, length.
981 * fill in correct oid, logical length, and object extent
982 * offset, length.
984 * for now, we write only a single su, until we can
985 * pass a stride back to the caller.
987 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
988 u64 off, u64 *plen,
989 u64 *ono,
990 u64 *oxoff, u64 *oxlen)
992 u32 osize = le32_to_cpu(layout->fl_object_size);
993 u32 su = le32_to_cpu(layout->fl_stripe_unit);
994 u32 sc = le32_to_cpu(layout->fl_stripe_count);
995 u32 bl, stripeno, stripepos, objsetno;
996 u32 su_per_object;
997 u64 t, su_offset;
999 dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen,
1000 osize, su);
1001 if (su == 0 || sc == 0)
1002 goto invalid;
1003 su_per_object = osize / su;
1004 if (su_per_object == 0)
1005 goto invalid;
1006 dout("osize %u / su %u = su_per_object %u\n", osize, su,
1007 su_per_object);
1009 if ((su & ~PAGE_MASK) != 0)
1010 goto invalid;
1012 /* bl = *off / su; */
1013 t = off;
1014 do_div(t, su);
1015 bl = t;
1016 dout("off %llu / su %u = bl %u\n", off, su, bl);
1018 stripeno = bl / sc;
1019 stripepos = bl % sc;
1020 objsetno = stripeno / su_per_object;
1022 *ono = objsetno * sc + stripepos;
1023 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
1025 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
1026 t = off;
1027 su_offset = do_div(t, su);
1028 *oxoff = su_offset + (stripeno % su_per_object) * su;
1031 * Calculate the length of the extent being written to the selected
1032 * object. This is the minimum of the full length requested (plen) or
1033 * the remainder of the current stripe being written to.
1035 *oxlen = min_t(u64, *plen, su - su_offset);
1036 *plen = *oxlen;
1038 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
1039 return 0;
1041 invalid:
1042 dout(" invalid layout\n");
1043 *ono = 0;
1044 *oxoff = 0;
1045 *oxlen = 0;
1046 return -EINVAL;
1048 EXPORT_SYMBOL(ceph_calc_file_object_mapping);
1051 * calculate an object layout (i.e. pgid) from an oid,
1052 * file_layout, and osdmap
1054 int ceph_calc_object_layout(struct ceph_object_layout *ol,
1055 const char *oid,
1056 struct ceph_file_layout *fl,
1057 struct ceph_osdmap *osdmap)
1059 unsigned int num, num_mask;
1060 struct ceph_pg pgid;
1061 int poolid = le32_to_cpu(fl->fl_pg_pool);
1062 struct ceph_pg_pool_info *pool;
1063 unsigned int ps;
1065 BUG_ON(!osdmap);
1067 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1068 if (!pool)
1069 return -EIO;
1070 ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
1071 num = le32_to_cpu(pool->v.pg_num);
1072 num_mask = pool->pg_num_mask;
1074 pgid.ps = cpu_to_le16(ps);
1075 pgid.preferred = cpu_to_le16(-1);
1076 pgid.pool = fl->fl_pg_pool;
1077 dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
1079 ol->ol_pgid = pgid;
1080 ol->ol_stripe_unit = fl->fl_object_stripe_unit;
1081 return 0;
1083 EXPORT_SYMBOL(ceph_calc_object_layout);
1086 * Calculate raw osd vector for the given pgid. Return pointer to osd
1087 * array, or NULL on failure.
1089 static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1090 int *osds, int *num)
1092 struct ceph_pg_mapping *pg;
1093 struct ceph_pg_pool_info *pool;
1094 int ruleno;
1095 unsigned int poolid, ps, pps, t, r;
1097 poolid = le32_to_cpu(pgid.pool);
1098 ps = le16_to_cpu(pgid.ps);
1100 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1101 if (!pool)
1102 return NULL;
1104 /* pg_temp? */
1105 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
1106 pool->pgp_num_mask);
1107 pgid.ps = cpu_to_le16(t);
1108 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
1109 if (pg) {
1110 *num = pg->len;
1111 return pg->osds;
1114 /* crush */
1115 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
1116 pool->v.type, pool->v.size);
1117 if (ruleno < 0) {
1118 pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
1119 poolid, pool->v.crush_ruleset, pool->v.type,
1120 pool->v.size);
1121 return NULL;
1124 pps = ceph_stable_mod(ps,
1125 le32_to_cpu(pool->v.pgp_num),
1126 pool->pgp_num_mask);
1127 pps += poolid;
1128 r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
1129 min_t(int, pool->v.size, *num),
1130 osdmap->osd_weight);
1131 if (r < 0) {
1132 pr_err("error %d from crush rule: pool %d ruleset %d type %d"
1133 " size %d\n", r, poolid, pool->v.crush_ruleset,
1134 pool->v.type, pool->v.size);
1135 return NULL;
1137 *num = r;
1138 return osds;
1142 * Return acting set for given pgid.
1144 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1145 int *acting)
1147 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1148 int i, o, num = CEPH_PG_MAX_SIZE;
1150 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1151 if (!osds)
1152 return -1;
1154 /* primary is first up osd */
1155 o = 0;
1156 for (i = 0; i < num; i++)
1157 if (ceph_osd_is_up(osdmap, osds[i]))
1158 acting[o++] = osds[i];
1159 return o;
1163 * Return primary osd for given pgid, or -1 if none.
1165 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
1167 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1168 int i, num = CEPH_PG_MAX_SIZE;
1170 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1171 if (!osds)
1172 return -1;
1174 /* primary is first up osd */
1175 for (i = 0; i < num; i++)
1176 if (ceph_osd_is_up(osdmap, osds[i]))
1177 return osds[i];
1178 return -1;
1180 EXPORT_SYMBOL(ceph_calc_pg_primary);