2 #include "ceph_debug.h"
4 #include <linux/slab.h>
9 #include "crush/hash.h"
10 #include "crush/mapper.h"
13 char *ceph_osdmap_state_str(char *str
, int len
, int state
)
22 if (state
& CEPH_OSD_EXISTS
) {
23 snprintf(str
, len
, "exists");
26 if (state
& CEPH_OSD_UP
) {
27 snprintf(str
, len
, "%s%s%s", str
, (flag
? ", " : ""),
32 snprintf(str
, len
, "doesn't exist");
40 static int calc_bits_of(unsigned t
)
51 * the foo_mask is the smallest value 2^n-1 that is >= foo.
53 static void calc_pg_masks(struct ceph_pg_pool_info
*pi
)
55 pi
->pg_num_mask
= (1 << calc_bits_of(le32_to_cpu(pi
->v
.pg_num
)-1)) - 1;
57 (1 << calc_bits_of(le32_to_cpu(pi
->v
.pgp_num
)-1)) - 1;
59 (1 << calc_bits_of(le32_to_cpu(pi
->v
.lpg_num
)-1)) - 1;
61 (1 << calc_bits_of(le32_to_cpu(pi
->v
.lpgp_num
)-1)) - 1;
67 static int crush_decode_uniform_bucket(void **p
, void *end
,
68 struct crush_bucket_uniform
*b
)
70 dout("crush_decode_uniform_bucket %p to %p\n", *p
, end
);
71 ceph_decode_need(p
, end
, (1+b
->h
.size
) * sizeof(u32
), bad
);
72 b
->item_weight
= ceph_decode_32(p
);
78 static int crush_decode_list_bucket(void **p
, void *end
,
79 struct crush_bucket_list
*b
)
82 dout("crush_decode_list_bucket %p to %p\n", *p
, end
);
83 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
84 if (b
->item_weights
== NULL
)
86 b
->sum_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
87 if (b
->sum_weights
== NULL
)
89 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
90 for (j
= 0; j
< b
->h
.size
; j
++) {
91 b
->item_weights
[j
] = ceph_decode_32(p
);
92 b
->sum_weights
[j
] = ceph_decode_32(p
);
99 static int crush_decode_tree_bucket(void **p
, void *end
,
100 struct crush_bucket_tree
*b
)
103 dout("crush_decode_tree_bucket %p to %p\n", *p
, end
);
104 ceph_decode_32_safe(p
, end
, b
->num_nodes
, bad
);
105 b
->node_weights
= kcalloc(b
->num_nodes
, sizeof(u32
), GFP_NOFS
);
106 if (b
->node_weights
== NULL
)
108 ceph_decode_need(p
, end
, b
->num_nodes
* sizeof(u32
), bad
);
109 for (j
= 0; j
< b
->num_nodes
; j
++)
110 b
->node_weights
[j
] = ceph_decode_32(p
);
116 static int crush_decode_straw_bucket(void **p
, void *end
,
117 struct crush_bucket_straw
*b
)
120 dout("crush_decode_straw_bucket %p to %p\n", *p
, end
);
121 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
122 if (b
->item_weights
== NULL
)
124 b
->straws
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
125 if (b
->straws
== NULL
)
127 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
128 for (j
= 0; j
< b
->h
.size
; j
++) {
129 b
->item_weights
[j
] = ceph_decode_32(p
);
130 b
->straws
[j
] = ceph_decode_32(p
);
137 static struct crush_map
*crush_decode(void *pbyval
, void *end
)
143 void *start
= pbyval
;
146 dout("crush_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
148 c
= kzalloc(sizeof(*c
), GFP_NOFS
);
150 return ERR_PTR(-ENOMEM
);
152 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
153 magic
= ceph_decode_32(p
);
154 if (magic
!= CRUSH_MAGIC
) {
155 pr_err("crush_decode magic %x != current %x\n",
156 (unsigned)magic
, (unsigned)CRUSH_MAGIC
);
159 c
->max_buckets
= ceph_decode_32(p
);
160 c
->max_rules
= ceph_decode_32(p
);
161 c
->max_devices
= ceph_decode_32(p
);
163 c
->device_parents
= kcalloc(c
->max_devices
, sizeof(u32
), GFP_NOFS
);
164 if (c
->device_parents
== NULL
)
166 c
->bucket_parents
= kcalloc(c
->max_buckets
, sizeof(u32
), GFP_NOFS
);
167 if (c
->bucket_parents
== NULL
)
170 c
->buckets
= kcalloc(c
->max_buckets
, sizeof(*c
->buckets
), GFP_NOFS
);
171 if (c
->buckets
== NULL
)
173 c
->rules
= kcalloc(c
->max_rules
, sizeof(*c
->rules
), GFP_NOFS
);
174 if (c
->rules
== NULL
)
178 for (i
= 0; i
< c
->max_buckets
; i
++) {
181 struct crush_bucket
*b
;
183 ceph_decode_32_safe(p
, end
, alg
, bad
);
185 c
->buckets
[i
] = NULL
;
188 dout("crush_decode bucket %d off %x %p to %p\n",
189 i
, (int)(*p
-start
), *p
, end
);
192 case CRUSH_BUCKET_UNIFORM
:
193 size
= sizeof(struct crush_bucket_uniform
);
195 case CRUSH_BUCKET_LIST
:
196 size
= sizeof(struct crush_bucket_list
);
198 case CRUSH_BUCKET_TREE
:
199 size
= sizeof(struct crush_bucket_tree
);
201 case CRUSH_BUCKET_STRAW
:
202 size
= sizeof(struct crush_bucket_straw
);
209 b
= c
->buckets
[i
] = kzalloc(size
, GFP_NOFS
);
213 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
214 b
->id
= ceph_decode_32(p
);
215 b
->type
= ceph_decode_16(p
);
216 b
->alg
= ceph_decode_8(p
);
217 b
->hash
= ceph_decode_8(p
);
218 b
->weight
= ceph_decode_32(p
);
219 b
->size
= ceph_decode_32(p
);
221 dout("crush_decode bucket size %d off %x %p to %p\n",
222 b
->size
, (int)(*p
-start
), *p
, end
);
224 b
->items
= kcalloc(b
->size
, sizeof(__s32
), GFP_NOFS
);
225 if (b
->items
== NULL
)
227 b
->perm
= kcalloc(b
->size
, sizeof(u32
), GFP_NOFS
);
232 ceph_decode_need(p
, end
, b
->size
*sizeof(u32
), bad
);
233 for (j
= 0; j
< b
->size
; j
++)
234 b
->items
[j
] = ceph_decode_32(p
);
237 case CRUSH_BUCKET_UNIFORM
:
238 err
= crush_decode_uniform_bucket(p
, end
,
239 (struct crush_bucket_uniform
*)b
);
243 case CRUSH_BUCKET_LIST
:
244 err
= crush_decode_list_bucket(p
, end
,
245 (struct crush_bucket_list
*)b
);
249 case CRUSH_BUCKET_TREE
:
250 err
= crush_decode_tree_bucket(p
, end
,
251 (struct crush_bucket_tree
*)b
);
255 case CRUSH_BUCKET_STRAW
:
256 err
= crush_decode_straw_bucket(p
, end
,
257 (struct crush_bucket_straw
*)b
);
265 dout("rule vec is %p\n", c
->rules
);
266 for (i
= 0; i
< c
->max_rules
; i
++) {
268 struct crush_rule
*r
;
270 ceph_decode_32_safe(p
, end
, yes
, bad
);
272 dout("crush_decode NO rule %d off %x %p to %p\n",
273 i
, (int)(*p
-start
), *p
, end
);
278 dout("crush_decode rule %d off %x %p to %p\n",
279 i
, (int)(*p
-start
), *p
, end
);
282 ceph_decode_32_safe(p
, end
, yes
, bad
);
283 #if BITS_PER_LONG == 32
285 if (yes
> ULONG_MAX
/ sizeof(struct crush_rule_step
))
288 r
= c
->rules
[i
] = kmalloc(sizeof(*r
) +
289 yes
*sizeof(struct crush_rule_step
),
293 dout(" rule %d is at %p\n", i
, r
);
295 ceph_decode_copy_safe(p
, end
, &r
->mask
, 4, bad
); /* 4 u8's */
296 ceph_decode_need(p
, end
, r
->len
*3*sizeof(u32
), bad
);
297 for (j
= 0; j
< r
->len
; j
++) {
298 r
->steps
[j
].op
= ceph_decode_32(p
);
299 r
->steps
[j
].arg1
= ceph_decode_32(p
);
300 r
->steps
[j
].arg2
= ceph_decode_32(p
);
304 /* ignore trailing name maps. */
306 dout("crush_decode success\n");
312 dout("crush_decode fail %d\n", err
);
318 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
321 static int pgid_cmp(struct ceph_pg l
, struct ceph_pg r
)
333 static int __insert_pg_mapping(struct ceph_pg_mapping
*new,
334 struct rb_root
*root
)
336 struct rb_node
**p
= &root
->rb_node
;
337 struct rb_node
*parent
= NULL
;
338 struct ceph_pg_mapping
*pg
= NULL
;
343 pg
= rb_entry(parent
, struct ceph_pg_mapping
, node
);
344 c
= pgid_cmp(new->pgid
, pg
->pgid
);
353 rb_link_node(&new->node
, parent
, p
);
354 rb_insert_color(&new->node
, root
);
358 static struct ceph_pg_mapping
*__lookup_pg_mapping(struct rb_root
*root
,
361 struct rb_node
*n
= root
->rb_node
;
362 struct ceph_pg_mapping
*pg
;
366 pg
= rb_entry(n
, struct ceph_pg_mapping
, node
);
367 c
= pgid_cmp(pgid
, pg
->pgid
);
379 * rbtree of pg pool info
381 static int __insert_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*new)
383 struct rb_node
**p
= &root
->rb_node
;
384 struct rb_node
*parent
= NULL
;
385 struct ceph_pg_pool_info
*pi
= NULL
;
389 pi
= rb_entry(parent
, struct ceph_pg_pool_info
, node
);
390 if (new->id
< pi
->id
)
392 else if (new->id
> pi
->id
)
398 rb_link_node(&new->node
, parent
, p
);
399 rb_insert_color(&new->node
, root
);
403 static struct ceph_pg_pool_info
*__lookup_pg_pool(struct rb_root
*root
, int id
)
405 struct ceph_pg_pool_info
*pi
;
406 struct rb_node
*n
= root
->rb_node
;
409 pi
= rb_entry(n
, struct ceph_pg_pool_info
, node
);
412 else if (id
> pi
->id
)
420 static void __remove_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*pi
)
422 rb_erase(&pi
->node
, root
);
427 void __decode_pool(void **p
, struct ceph_pg_pool_info
*pi
)
429 ceph_decode_copy(p
, &pi
->v
, sizeof(pi
->v
));
431 *p
+= le32_to_cpu(pi
->v
.num_snaps
) * sizeof(u64
);
432 *p
+= le32_to_cpu(pi
->v
.num_removed_snap_intervals
) * sizeof(u64
) * 2;
435 static int __decode_pool_names(void **p
, void *end
, struct ceph_osdmap
*map
)
437 struct ceph_pg_pool_info
*pi
;
440 ceph_decode_32_safe(p
, end
, num
, bad
);
441 dout(" %d pool names\n", num
);
443 ceph_decode_32_safe(p
, end
, pool
, bad
);
444 ceph_decode_32_safe(p
, end
, len
, bad
);
445 dout(" pool %d len %d\n", pool
, len
);
446 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
449 pi
->name
= kmalloc(len
+ 1, GFP_NOFS
);
451 memcpy(pi
->name
, *p
, len
);
452 pi
->name
[len
] = '\0';
453 dout(" name is %s\n", pi
->name
);
467 void ceph_osdmap_destroy(struct ceph_osdmap
*map
)
469 dout("osdmap_destroy %p\n", map
);
471 crush_destroy(map
->crush
);
472 while (!RB_EMPTY_ROOT(&map
->pg_temp
)) {
473 struct ceph_pg_mapping
*pg
=
474 rb_entry(rb_first(&map
->pg_temp
),
475 struct ceph_pg_mapping
, node
);
476 rb_erase(&pg
->node
, &map
->pg_temp
);
479 while (!RB_EMPTY_ROOT(&map
->pg_pools
)) {
480 struct ceph_pg_pool_info
*pi
=
481 rb_entry(rb_first(&map
->pg_pools
),
482 struct ceph_pg_pool_info
, node
);
483 __remove_pg_pool(&map
->pg_pools
, pi
);
485 kfree(map
->osd_state
);
486 kfree(map
->osd_weight
);
487 kfree(map
->osd_addr
);
492 * adjust max osd value. reallocate arrays.
494 static int osdmap_set_max_osd(struct ceph_osdmap
*map
, int max
)
497 struct ceph_entity_addr
*addr
;
500 state
= kcalloc(max
, sizeof(*state
), GFP_NOFS
);
501 addr
= kcalloc(max
, sizeof(*addr
), GFP_NOFS
);
502 weight
= kcalloc(max
, sizeof(*weight
), GFP_NOFS
);
503 if (state
== NULL
|| addr
== NULL
|| weight
== NULL
) {
511 if (map
->osd_state
) {
512 memcpy(state
, map
->osd_state
, map
->max_osd
*sizeof(*state
));
513 memcpy(addr
, map
->osd_addr
, map
->max_osd
*sizeof(*addr
));
514 memcpy(weight
, map
->osd_weight
, map
->max_osd
*sizeof(*weight
));
515 kfree(map
->osd_state
);
516 kfree(map
->osd_addr
);
517 kfree(map
->osd_weight
);
520 map
->osd_state
= state
;
521 map
->osd_weight
= weight
;
522 map
->osd_addr
= addr
;
530 struct ceph_osdmap
*osdmap_decode(void **p
, void *end
)
532 struct ceph_osdmap
*map
;
538 struct ceph_pg_pool_info
*pi
;
540 dout("osdmap_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
542 map
= kzalloc(sizeof(*map
), GFP_NOFS
);
544 return ERR_PTR(-ENOMEM
);
545 map
->pg_temp
= RB_ROOT
;
547 ceph_decode_16_safe(p
, end
, version
, bad
);
548 if (version
> CEPH_OSDMAP_VERSION
) {
549 pr_warning("got unknown v %d > %d of osdmap\n", version
,
550 CEPH_OSDMAP_VERSION
);
554 ceph_decode_need(p
, end
, 2*sizeof(u64
)+6*sizeof(u32
), bad
);
555 ceph_decode_copy(p
, &map
->fsid
, sizeof(map
->fsid
));
556 map
->epoch
= ceph_decode_32(p
);
557 ceph_decode_copy(p
, &map
->created
, sizeof(map
->created
));
558 ceph_decode_copy(p
, &map
->modified
, sizeof(map
->modified
));
560 ceph_decode_32_safe(p
, end
, max
, bad
);
562 ceph_decode_need(p
, end
, 4 + 1 + sizeof(pi
->v
), bad
);
563 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
566 pi
->id
= ceph_decode_32(p
);
567 ev
= ceph_decode_8(p
); /* encoding version */
568 if (ev
> CEPH_PG_POOL_VERSION
) {
569 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
570 ev
, CEPH_PG_POOL_VERSION
);
573 __decode_pool(p
, pi
);
574 __insert_pg_pool(&map
->pg_pools
, pi
);
577 if (version
>= 5 && __decode_pool_names(p
, end
, map
) < 0)
580 ceph_decode_32_safe(p
, end
, map
->pool_max
, bad
);
582 ceph_decode_32_safe(p
, end
, map
->flags
, bad
);
584 max
= ceph_decode_32(p
);
586 /* (re)alloc osd arrays */
587 err
= osdmap_set_max_osd(map
, max
);
590 dout("osdmap_decode max_osd = %d\n", map
->max_osd
);
594 ceph_decode_need(p
, end
, 3*sizeof(u32
) +
595 map
->max_osd
*(1 + sizeof(*map
->osd_weight
) +
596 sizeof(*map
->osd_addr
)), bad
);
597 *p
+= 4; /* skip length field (should match max) */
598 ceph_decode_copy(p
, map
->osd_state
, map
->max_osd
);
600 *p
+= 4; /* skip length field (should match max) */
601 for (i
= 0; i
< map
->max_osd
; i
++)
602 map
->osd_weight
[i
] = ceph_decode_32(p
);
604 *p
+= 4; /* skip length field (should match max) */
605 ceph_decode_copy(p
, map
->osd_addr
, map
->max_osd
*sizeof(*map
->osd_addr
));
606 for (i
= 0; i
< map
->max_osd
; i
++)
607 ceph_decode_addr(&map
->osd_addr
[i
]);
610 ceph_decode_32_safe(p
, end
, len
, bad
);
611 for (i
= 0; i
< len
; i
++) {
614 struct ceph_pg_mapping
*pg
;
616 ceph_decode_need(p
, end
, sizeof(u32
) + sizeof(u64
), bad
);
617 ceph_decode_copy(p
, &pgid
, sizeof(pgid
));
618 n
= ceph_decode_32(p
);
619 ceph_decode_need(p
, end
, n
* sizeof(u32
), bad
);
621 pg
= kmalloc(sizeof(*pg
) + n
*sizeof(u32
), GFP_NOFS
);
626 for (j
= 0; j
< n
; j
++)
627 pg
->osds
[j
] = ceph_decode_32(p
);
629 err
= __insert_pg_mapping(pg
, &map
->pg_temp
);
632 dout(" added pg_temp %llx len %d\n", *(u64
*)&pgid
, len
);
636 ceph_decode_32_safe(p
, end
, len
, bad
);
637 dout("osdmap_decode crush len %d from off 0x%x\n", len
,
639 ceph_decode_need(p
, end
, len
, bad
);
640 map
->crush
= crush_decode(*p
, end
);
642 if (IS_ERR(map
->crush
)) {
643 err
= PTR_ERR(map
->crush
);
648 /* ignore the rest of the map */
651 dout("osdmap_decode done %p %p\n", *p
, end
);
655 dout("osdmap_decode fail\n");
656 ceph_osdmap_destroy(map
);
661 * decode and apply an incremental map update.
663 struct ceph_osdmap
*osdmap_apply_incremental(void **p
, void *end
,
664 struct ceph_osdmap
*map
,
665 struct ceph_messenger
*msgr
)
667 struct crush_map
*newcrush
= NULL
;
668 struct ceph_fsid fsid
;
670 struct ceph_timespec modified
;
672 __s32 new_pool_max
, new_flags
, max
;
678 ceph_decode_16_safe(p
, end
, version
, bad
);
679 if (version
> CEPH_OSDMAP_INC_VERSION
) {
680 pr_warning("got unknown v %d > %d of inc osdmap\n", version
,
681 CEPH_OSDMAP_INC_VERSION
);
685 ceph_decode_need(p
, end
, sizeof(fsid
)+sizeof(modified
)+2*sizeof(u32
),
687 ceph_decode_copy(p
, &fsid
, sizeof(fsid
));
688 epoch
= ceph_decode_32(p
);
689 BUG_ON(epoch
!= map
->epoch
+1);
690 ceph_decode_copy(p
, &modified
, sizeof(modified
));
691 new_pool_max
= ceph_decode_32(p
);
692 new_flags
= ceph_decode_32(p
);
695 ceph_decode_32_safe(p
, end
, len
, bad
);
697 dout("apply_incremental full map len %d, %p to %p\n",
699 return osdmap_decode(p
, min(*p
+len
, end
));
703 ceph_decode_32_safe(p
, end
, len
, bad
);
705 dout("apply_incremental new crush map len %d, %p to %p\n",
707 newcrush
= crush_decode(*p
, min(*p
+len
, end
));
708 if (IS_ERR(newcrush
))
709 return ERR_PTR(PTR_ERR(newcrush
));
714 map
->flags
= new_flags
;
715 if (new_pool_max
>= 0)
716 map
->pool_max
= new_pool_max
;
718 ceph_decode_need(p
, end
, 5*sizeof(u32
), bad
);
721 max
= ceph_decode_32(p
);
723 err
= osdmap_set_max_osd(map
, max
);
729 map
->modified
= map
->modified
;
732 crush_destroy(map
->crush
);
733 map
->crush
= newcrush
;
738 ceph_decode_32_safe(p
, end
, len
, bad
);
741 struct ceph_pg_pool_info
*pi
;
743 ceph_decode_32_safe(p
, end
, pool
, bad
);
744 ceph_decode_need(p
, end
, 1 + sizeof(pi
->v
), bad
);
745 ev
= ceph_decode_8(p
); /* encoding version */
746 if (ev
> CEPH_PG_POOL_VERSION
) {
747 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
748 ev
, CEPH_PG_POOL_VERSION
);
751 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
753 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
759 __insert_pg_pool(&map
->pg_pools
, pi
);
761 __decode_pool(p
, pi
);
763 if (version
>= 5 && __decode_pool_names(p
, end
, map
) < 0)
767 ceph_decode_32_safe(p
, end
, len
, bad
);
769 struct ceph_pg_pool_info
*pi
;
771 ceph_decode_32_safe(p
, end
, pool
, bad
);
772 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
774 __remove_pg_pool(&map
->pg_pools
, pi
);
779 ceph_decode_32_safe(p
, end
, len
, bad
);
782 struct ceph_entity_addr addr
;
783 ceph_decode_32_safe(p
, end
, osd
, bad
);
784 ceph_decode_copy_safe(p
, end
, &addr
, sizeof(addr
), bad
);
785 ceph_decode_addr(&addr
);
786 pr_info("osd%d up\n", osd
);
787 BUG_ON(osd
>= map
->max_osd
);
788 map
->osd_state
[osd
] |= CEPH_OSD_UP
;
789 map
->osd_addr
[osd
] = addr
;
793 ceph_decode_32_safe(p
, end
, len
, bad
);
796 ceph_decode_32_safe(p
, end
, osd
, bad
);
797 (*p
)++; /* clean flag */
798 pr_info("osd%d down\n", osd
);
799 if (osd
< map
->max_osd
)
800 map
->osd_state
[osd
] &= ~CEPH_OSD_UP
;
804 ceph_decode_32_safe(p
, end
, len
, bad
);
807 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
808 osd
= ceph_decode_32(p
);
809 off
= ceph_decode_32(p
);
810 pr_info("osd%d weight 0x%x %s\n", osd
, off
,
811 off
== CEPH_OSD_IN
? "(in)" :
812 (off
== CEPH_OSD_OUT
? "(out)" : ""));
813 if (osd
< map
->max_osd
)
814 map
->osd_weight
[osd
] = off
;
818 rbp
= rb_first(&map
->pg_temp
);
819 ceph_decode_32_safe(p
, end
, len
, bad
);
821 struct ceph_pg_mapping
*pg
;
825 ceph_decode_need(p
, end
, sizeof(u64
) + sizeof(u32
), bad
);
826 ceph_decode_copy(p
, &pgid
, sizeof(pgid
));
827 pglen
= ceph_decode_32(p
);
830 while (rbp
&& pgid_cmp(rb_entry(rbp
, struct ceph_pg_mapping
,
831 node
)->pgid
, pgid
) <= 0) {
832 struct rb_node
*cur
= rbp
;
834 dout(" removed pg_temp %llx\n",
835 *(u64
*)&rb_entry(cur
, struct ceph_pg_mapping
,
837 rb_erase(cur
, &map
->pg_temp
);
842 ceph_decode_need(p
, end
, pglen
*sizeof(u32
), bad
);
843 pg
= kmalloc(sizeof(*pg
) + sizeof(u32
)*pglen
, GFP_NOFS
);
850 for (j
= 0; j
< pglen
; j
++)
851 pg
->osds
[j
] = ceph_decode_32(p
);
852 err
= __insert_pg_mapping(pg
, &map
->pg_temp
);
855 dout(" added pg_temp %llx len %d\n", *(u64
*)&pgid
,
860 struct rb_node
*cur
= rbp
;
862 dout(" removed pg_temp %llx\n",
863 *(u64
*)&rb_entry(cur
, struct ceph_pg_mapping
,
865 rb_erase(cur
, &map
->pg_temp
);
868 /* ignore the rest */
873 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
874 epoch
, (int)(*p
- start
), *p
, start
, end
);
875 print_hex_dump(KERN_DEBUG
, "osdmap: ",
876 DUMP_PREFIX_OFFSET
, 16, 1,
877 start
, end
- start
, true);
879 crush_destroy(newcrush
);
887 * calculate file layout from given offset, length.
888 * fill in correct oid, logical length, and object extent
891 * for now, we write only a single su, until we can
892 * pass a stride back to the caller.
894 void ceph_calc_file_object_mapping(struct ceph_file_layout
*layout
,
897 u64
*oxoff
, u64
*oxlen
)
899 u32 osize
= le32_to_cpu(layout
->fl_object_size
);
900 u32 su
= le32_to_cpu(layout
->fl_stripe_unit
);
901 u32 sc
= le32_to_cpu(layout
->fl_stripe_count
);
902 u32 bl
, stripeno
, stripepos
, objsetno
;
906 dout("mapping %llu~%llu osize %u fl_su %u\n", off
, *plen
,
908 su_per_object
= osize
/ su
;
909 dout("osize %u / su %u = su_per_object %u\n", osize
, su
,
912 BUG_ON((su
& ~PAGE_MASK
) != 0);
913 /* bl = *off / su; */
917 dout("off %llu / su %u = bl %u\n", off
, su
, bl
);
921 objsetno
= stripeno
/ su_per_object
;
923 *ono
= objsetno
* sc
+ stripepos
;
924 dout("objset %u * sc %u = ono %u\n", objsetno
, sc
, (unsigned)*ono
);
926 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
928 su_offset
= do_div(t
, su
);
929 *oxoff
= su_offset
+ (stripeno
% su_per_object
) * su
;
932 * Calculate the length of the extent being written to the selected
933 * object. This is the minimum of the full length requested (plen) or
934 * the remainder of the current stripe being written to.
936 *oxlen
= min_t(u64
, *plen
, su
- su_offset
);
939 dout(" obj extent %llu~%llu\n", *oxoff
, *oxlen
);
943 * calculate an object layout (i.e. pgid) from an oid,
944 * file_layout, and osdmap
946 int ceph_calc_object_layout(struct ceph_object_layout
*ol
,
948 struct ceph_file_layout
*fl
,
949 struct ceph_osdmap
*osdmap
)
951 unsigned num
, num_mask
;
953 s32 preferred
= (s32
)le32_to_cpu(fl
->fl_pg_preferred
);
954 int poolid
= le32_to_cpu(fl
->fl_pg_pool
);
955 struct ceph_pg_pool_info
*pool
;
960 pool
= __lookup_pg_pool(&osdmap
->pg_pools
, poolid
);
963 ps
= ceph_str_hash(pool
->v
.object_hash
, oid
, strlen(oid
));
964 if (preferred
>= 0) {
966 num
= le32_to_cpu(pool
->v
.lpg_num
);
967 num_mask
= pool
->lpg_num_mask
;
969 num
= le32_to_cpu(pool
->v
.pg_num
);
970 num_mask
= pool
->pg_num_mask
;
973 pgid
.ps
= cpu_to_le16(ps
);
974 pgid
.preferred
= cpu_to_le16(preferred
);
975 pgid
.pool
= fl
->fl_pg_pool
;
977 dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid
, poolid
, ps
,
980 dout("calc_object_layout '%s' pgid %d.%x\n", oid
, poolid
, ps
);
983 ol
->ol_stripe_unit
= fl
->fl_object_stripe_unit
;
988 * Calculate raw osd vector for the given pgid. Return pointer to osd
989 * array, or NULL on failure.
991 static int *calc_pg_raw(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
,
994 struct ceph_pg_mapping
*pg
;
995 struct ceph_pg_pool_info
*pool
;
997 unsigned poolid
, ps
, pps
;
1001 pg
= __lookup_pg_mapping(&osdmap
->pg_temp
, pgid
);
1008 poolid
= le32_to_cpu(pgid
.pool
);
1009 ps
= le16_to_cpu(pgid
.ps
);
1010 preferred
= (s16
)le16_to_cpu(pgid
.preferred
);
1012 /* don't forcefeed bad device ids to crush */
1013 if (preferred
>= osdmap
->max_osd
||
1014 preferred
>= osdmap
->crush
->max_devices
)
1017 pool
= __lookup_pg_pool(&osdmap
->pg_pools
, poolid
);
1020 ruleno
= crush_find_rule(osdmap
->crush
, pool
->v
.crush_ruleset
,
1021 pool
->v
.type
, pool
->v
.size
);
1023 pr_err("no crush rule pool %d type %d size %d\n",
1024 poolid
, pool
->v
.type
, pool
->v
.size
);
1029 pps
= ceph_stable_mod(ps
,
1030 le32_to_cpu(pool
->v
.lpgp_num
),
1031 pool
->lpgp_num_mask
);
1033 pps
= ceph_stable_mod(ps
,
1034 le32_to_cpu(pool
->v
.pgp_num
),
1035 pool
->pgp_num_mask
);
1037 *num
= crush_do_rule(osdmap
->crush
, ruleno
, pps
, osds
,
1038 min_t(int, pool
->v
.size
, *num
),
1039 preferred
, osdmap
->osd_weight
);
1044 * Return primary osd for given pgid, or -1 if none.
1046 int ceph_calc_pg_primary(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
)
1048 int rawosds
[10], *osds
;
1049 int i
, num
= ARRAY_SIZE(rawosds
);
1051 osds
= calc_pg_raw(osdmap
, pgid
, rawosds
, &num
);
1055 /* primary is first up osd */
1056 for (i
= 0; i
< num
; i
++)
1057 if (ceph_osd_is_up(osdmap
, osds
[i
])) {