2 #include "ceph_debug.h"
4 #include <linux/slab.h>
9 #include "crush/hash.h"
10 #include "crush/mapper.h"
13 char *ceph_osdmap_state_str(char *str
, int len
, int state
)
22 if (state
& CEPH_OSD_EXISTS
) {
23 snprintf(str
, len
, "exists");
26 if (state
& CEPH_OSD_UP
) {
27 snprintf(str
, len
, "%s%s%s", str
, (flag
? ", " : ""),
32 snprintf(str
, len
, "doesn't exist");
40 static int calc_bits_of(unsigned t
)
51 * the foo_mask is the smallest value 2^n-1 that is >= foo.
53 static void calc_pg_masks(struct ceph_pg_pool_info
*pi
)
55 pi
->pg_num_mask
= (1 << calc_bits_of(le32_to_cpu(pi
->v
.pg_num
)-1)) - 1;
57 (1 << calc_bits_of(le32_to_cpu(pi
->v
.pgp_num
)-1)) - 1;
59 (1 << calc_bits_of(le32_to_cpu(pi
->v
.lpg_num
)-1)) - 1;
61 (1 << calc_bits_of(le32_to_cpu(pi
->v
.lpgp_num
)-1)) - 1;
67 static int crush_decode_uniform_bucket(void **p
, void *end
,
68 struct crush_bucket_uniform
*b
)
70 dout("crush_decode_uniform_bucket %p to %p\n", *p
, end
);
71 ceph_decode_need(p
, end
, (1+b
->h
.size
) * sizeof(u32
), bad
);
72 b
->item_weight
= ceph_decode_32(p
);
78 static int crush_decode_list_bucket(void **p
, void *end
,
79 struct crush_bucket_list
*b
)
82 dout("crush_decode_list_bucket %p to %p\n", *p
, end
);
83 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
84 if (b
->item_weights
== NULL
)
86 b
->sum_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
87 if (b
->sum_weights
== NULL
)
89 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
90 for (j
= 0; j
< b
->h
.size
; j
++) {
91 b
->item_weights
[j
] = ceph_decode_32(p
);
92 b
->sum_weights
[j
] = ceph_decode_32(p
);
99 static int crush_decode_tree_bucket(void **p
, void *end
,
100 struct crush_bucket_tree
*b
)
103 dout("crush_decode_tree_bucket %p to %p\n", *p
, end
);
104 ceph_decode_32_safe(p
, end
, b
->num_nodes
, bad
);
105 b
->node_weights
= kcalloc(b
->num_nodes
, sizeof(u32
), GFP_NOFS
);
106 if (b
->node_weights
== NULL
)
108 ceph_decode_need(p
, end
, b
->num_nodes
* sizeof(u32
), bad
);
109 for (j
= 0; j
< b
->num_nodes
; j
++)
110 b
->node_weights
[j
] = ceph_decode_32(p
);
116 static int crush_decode_straw_bucket(void **p
, void *end
,
117 struct crush_bucket_straw
*b
)
120 dout("crush_decode_straw_bucket %p to %p\n", *p
, end
);
121 b
->item_weights
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
122 if (b
->item_weights
== NULL
)
124 b
->straws
= kcalloc(b
->h
.size
, sizeof(u32
), GFP_NOFS
);
125 if (b
->straws
== NULL
)
127 ceph_decode_need(p
, end
, 2 * b
->h
.size
* sizeof(u32
), bad
);
128 for (j
= 0; j
< b
->h
.size
; j
++) {
129 b
->item_weights
[j
] = ceph_decode_32(p
);
130 b
->straws
[j
] = ceph_decode_32(p
);
137 static struct crush_map
*crush_decode(void *pbyval
, void *end
)
143 void *start
= pbyval
;
146 dout("crush_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
148 c
= kzalloc(sizeof(*c
), GFP_NOFS
);
150 return ERR_PTR(-ENOMEM
);
152 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
153 magic
= ceph_decode_32(p
);
154 if (magic
!= CRUSH_MAGIC
) {
155 pr_err("crush_decode magic %x != current %x\n",
156 (unsigned)magic
, (unsigned)CRUSH_MAGIC
);
159 c
->max_buckets
= ceph_decode_32(p
);
160 c
->max_rules
= ceph_decode_32(p
);
161 c
->max_devices
= ceph_decode_32(p
);
163 c
->device_parents
= kcalloc(c
->max_devices
, sizeof(u32
), GFP_NOFS
);
164 if (c
->device_parents
== NULL
)
166 c
->bucket_parents
= kcalloc(c
->max_buckets
, sizeof(u32
), GFP_NOFS
);
167 if (c
->bucket_parents
== NULL
)
170 c
->buckets
= kcalloc(c
->max_buckets
, sizeof(*c
->buckets
), GFP_NOFS
);
171 if (c
->buckets
== NULL
)
173 c
->rules
= kcalloc(c
->max_rules
, sizeof(*c
->rules
), GFP_NOFS
);
174 if (c
->rules
== NULL
)
178 for (i
= 0; i
< c
->max_buckets
; i
++) {
181 struct crush_bucket
*b
;
183 ceph_decode_32_safe(p
, end
, alg
, bad
);
185 c
->buckets
[i
] = NULL
;
188 dout("crush_decode bucket %d off %x %p to %p\n",
189 i
, (int)(*p
-start
), *p
, end
);
192 case CRUSH_BUCKET_UNIFORM
:
193 size
= sizeof(struct crush_bucket_uniform
);
195 case CRUSH_BUCKET_LIST
:
196 size
= sizeof(struct crush_bucket_list
);
198 case CRUSH_BUCKET_TREE
:
199 size
= sizeof(struct crush_bucket_tree
);
201 case CRUSH_BUCKET_STRAW
:
202 size
= sizeof(struct crush_bucket_straw
);
209 b
= c
->buckets
[i
] = kzalloc(size
, GFP_NOFS
);
213 ceph_decode_need(p
, end
, 4*sizeof(u32
), bad
);
214 b
->id
= ceph_decode_32(p
);
215 b
->type
= ceph_decode_16(p
);
216 b
->alg
= ceph_decode_8(p
);
217 b
->hash
= ceph_decode_8(p
);
218 b
->weight
= ceph_decode_32(p
);
219 b
->size
= ceph_decode_32(p
);
221 dout("crush_decode bucket size %d off %x %p to %p\n",
222 b
->size
, (int)(*p
-start
), *p
, end
);
224 b
->items
= kcalloc(b
->size
, sizeof(__s32
), GFP_NOFS
);
225 if (b
->items
== NULL
)
227 b
->perm
= kcalloc(b
->size
, sizeof(u32
), GFP_NOFS
);
232 ceph_decode_need(p
, end
, b
->size
*sizeof(u32
), bad
);
233 for (j
= 0; j
< b
->size
; j
++)
234 b
->items
[j
] = ceph_decode_32(p
);
237 case CRUSH_BUCKET_UNIFORM
:
238 err
= crush_decode_uniform_bucket(p
, end
,
239 (struct crush_bucket_uniform
*)b
);
243 case CRUSH_BUCKET_LIST
:
244 err
= crush_decode_list_bucket(p
, end
,
245 (struct crush_bucket_list
*)b
);
249 case CRUSH_BUCKET_TREE
:
250 err
= crush_decode_tree_bucket(p
, end
,
251 (struct crush_bucket_tree
*)b
);
255 case CRUSH_BUCKET_STRAW
:
256 err
= crush_decode_straw_bucket(p
, end
,
257 (struct crush_bucket_straw
*)b
);
265 dout("rule vec is %p\n", c
->rules
);
266 for (i
= 0; i
< c
->max_rules
; i
++) {
268 struct crush_rule
*r
;
270 ceph_decode_32_safe(p
, end
, yes
, bad
);
272 dout("crush_decode NO rule %d off %x %p to %p\n",
273 i
, (int)(*p
-start
), *p
, end
);
278 dout("crush_decode rule %d off %x %p to %p\n",
279 i
, (int)(*p
-start
), *p
, end
);
282 ceph_decode_32_safe(p
, end
, yes
, bad
);
283 #if BITS_PER_LONG == 32
285 if (yes
> ULONG_MAX
/ sizeof(struct crush_rule_step
))
288 r
= c
->rules
[i
] = kmalloc(sizeof(*r
) +
289 yes
*sizeof(struct crush_rule_step
),
293 dout(" rule %d is at %p\n", i
, r
);
295 ceph_decode_copy_safe(p
, end
, &r
->mask
, 4, bad
); /* 4 u8's */
296 ceph_decode_need(p
, end
, r
->len
*3*sizeof(u32
), bad
);
297 for (j
= 0; j
< r
->len
; j
++) {
298 r
->steps
[j
].op
= ceph_decode_32(p
);
299 r
->steps
[j
].arg1
= ceph_decode_32(p
);
300 r
->steps
[j
].arg2
= ceph_decode_32(p
);
304 /* ignore trailing name maps. */
306 dout("crush_decode success\n");
312 dout("crush_decode fail %d\n", err
);
318 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
321 static int pgid_cmp(struct ceph_pg l
, struct ceph_pg r
)
333 static int __insert_pg_mapping(struct ceph_pg_mapping
*new,
334 struct rb_root
*root
)
336 struct rb_node
**p
= &root
->rb_node
;
337 struct rb_node
*parent
= NULL
;
338 struct ceph_pg_mapping
*pg
= NULL
;
343 pg
= rb_entry(parent
, struct ceph_pg_mapping
, node
);
344 c
= pgid_cmp(new->pgid
, pg
->pgid
);
353 rb_link_node(&new->node
, parent
, p
);
354 rb_insert_color(&new->node
, root
);
358 static struct ceph_pg_mapping
*__lookup_pg_mapping(struct rb_root
*root
,
361 struct rb_node
*n
= root
->rb_node
;
362 struct ceph_pg_mapping
*pg
;
366 pg
= rb_entry(n
, struct ceph_pg_mapping
, node
);
367 c
= pgid_cmp(pgid
, pg
->pgid
);
379 * rbtree of pg pool info
381 static int __insert_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*new)
383 struct rb_node
**p
= &root
->rb_node
;
384 struct rb_node
*parent
= NULL
;
385 struct ceph_pg_pool_info
*pi
= NULL
;
389 pi
= rb_entry(parent
, struct ceph_pg_pool_info
, node
);
390 if (new->id
< pi
->id
)
392 else if (new->id
> pi
->id
)
398 rb_link_node(&new->node
, parent
, p
);
399 rb_insert_color(&new->node
, root
);
403 static struct ceph_pg_pool_info
*__lookup_pg_pool(struct rb_root
*root
, int id
)
405 struct ceph_pg_pool_info
*pi
;
406 struct rb_node
*n
= root
->rb_node
;
409 pi
= rb_entry(n
, struct ceph_pg_pool_info
, node
);
412 else if (id
> pi
->id
)
420 static void __remove_pg_pool(struct rb_root
*root
, struct ceph_pg_pool_info
*pi
)
422 rb_erase(&pi
->node
, root
);
427 static int __decode_pool(void **p
, void *end
, struct ceph_pg_pool_info
*pi
)
431 ceph_decode_copy(p
, &pi
->v
, sizeof(pi
->v
));
434 /* num_snaps * snap_info_t */
435 n
= le32_to_cpu(pi
->v
.num_snaps
);
437 ceph_decode_need(p
, end
, sizeof(u64
) + 1 + sizeof(u64
) +
438 sizeof(struct ceph_timespec
), bad
);
439 *p
+= sizeof(u64
) + /* key */
440 1 + sizeof(u64
) + /* u8, snapid */
441 sizeof(struct ceph_timespec
);
442 m
= ceph_decode_32(p
); /* snap name */
446 *p
+= le32_to_cpu(pi
->v
.num_removed_snap_intervals
) * sizeof(u64
) * 2;
453 static int __decode_pool_names(void **p
, void *end
, struct ceph_osdmap
*map
)
455 struct ceph_pg_pool_info
*pi
;
458 ceph_decode_32_safe(p
, end
, num
, bad
);
459 dout(" %d pool names\n", num
);
461 ceph_decode_32_safe(p
, end
, pool
, bad
);
462 ceph_decode_32_safe(p
, end
, len
, bad
);
463 dout(" pool %d len %d\n", pool
, len
);
464 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
467 pi
->name
= kmalloc(len
+ 1, GFP_NOFS
);
469 memcpy(pi
->name
, *p
, len
);
470 pi
->name
[len
] = '\0';
471 dout(" name is %s\n", pi
->name
);
485 void ceph_osdmap_destroy(struct ceph_osdmap
*map
)
487 dout("osdmap_destroy %p\n", map
);
489 crush_destroy(map
->crush
);
490 while (!RB_EMPTY_ROOT(&map
->pg_temp
)) {
491 struct ceph_pg_mapping
*pg
=
492 rb_entry(rb_first(&map
->pg_temp
),
493 struct ceph_pg_mapping
, node
);
494 rb_erase(&pg
->node
, &map
->pg_temp
);
497 while (!RB_EMPTY_ROOT(&map
->pg_pools
)) {
498 struct ceph_pg_pool_info
*pi
=
499 rb_entry(rb_first(&map
->pg_pools
),
500 struct ceph_pg_pool_info
, node
);
501 __remove_pg_pool(&map
->pg_pools
, pi
);
503 kfree(map
->osd_state
);
504 kfree(map
->osd_weight
);
505 kfree(map
->osd_addr
);
510 * adjust max osd value. reallocate arrays.
512 static int osdmap_set_max_osd(struct ceph_osdmap
*map
, int max
)
515 struct ceph_entity_addr
*addr
;
518 state
= kcalloc(max
, sizeof(*state
), GFP_NOFS
);
519 addr
= kcalloc(max
, sizeof(*addr
), GFP_NOFS
);
520 weight
= kcalloc(max
, sizeof(*weight
), GFP_NOFS
);
521 if (state
== NULL
|| addr
== NULL
|| weight
== NULL
) {
529 if (map
->osd_state
) {
530 memcpy(state
, map
->osd_state
, map
->max_osd
*sizeof(*state
));
531 memcpy(addr
, map
->osd_addr
, map
->max_osd
*sizeof(*addr
));
532 memcpy(weight
, map
->osd_weight
, map
->max_osd
*sizeof(*weight
));
533 kfree(map
->osd_state
);
534 kfree(map
->osd_addr
);
535 kfree(map
->osd_weight
);
538 map
->osd_state
= state
;
539 map
->osd_weight
= weight
;
540 map
->osd_addr
= addr
;
548 struct ceph_osdmap
*osdmap_decode(void **p
, void *end
)
550 struct ceph_osdmap
*map
;
556 struct ceph_pg_pool_info
*pi
;
558 dout("osdmap_decode %p to %p len %d\n", *p
, end
, (int)(end
- *p
));
560 map
= kzalloc(sizeof(*map
), GFP_NOFS
);
562 return ERR_PTR(-ENOMEM
);
563 map
->pg_temp
= RB_ROOT
;
565 ceph_decode_16_safe(p
, end
, version
, bad
);
566 if (version
> CEPH_OSDMAP_VERSION
) {
567 pr_warning("got unknown v %d > %d of osdmap\n", version
,
568 CEPH_OSDMAP_VERSION
);
572 ceph_decode_need(p
, end
, 2*sizeof(u64
)+6*sizeof(u32
), bad
);
573 ceph_decode_copy(p
, &map
->fsid
, sizeof(map
->fsid
));
574 map
->epoch
= ceph_decode_32(p
);
575 ceph_decode_copy(p
, &map
->created
, sizeof(map
->created
));
576 ceph_decode_copy(p
, &map
->modified
, sizeof(map
->modified
));
578 ceph_decode_32_safe(p
, end
, max
, bad
);
580 ceph_decode_need(p
, end
, 4 + 1 + sizeof(pi
->v
), bad
);
581 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
584 pi
->id
= ceph_decode_32(p
);
585 ev
= ceph_decode_8(p
); /* encoding version */
586 if (ev
> CEPH_PG_POOL_VERSION
) {
587 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
588 ev
, CEPH_PG_POOL_VERSION
);
592 err
= __decode_pool(p
, end
, pi
);
595 __insert_pg_pool(&map
->pg_pools
, pi
);
598 if (version
>= 5 && __decode_pool_names(p
, end
, map
) < 0)
601 ceph_decode_32_safe(p
, end
, map
->pool_max
, bad
);
603 ceph_decode_32_safe(p
, end
, map
->flags
, bad
);
605 max
= ceph_decode_32(p
);
607 /* (re)alloc osd arrays */
608 err
= osdmap_set_max_osd(map
, max
);
611 dout("osdmap_decode max_osd = %d\n", map
->max_osd
);
615 ceph_decode_need(p
, end
, 3*sizeof(u32
) +
616 map
->max_osd
*(1 + sizeof(*map
->osd_weight
) +
617 sizeof(*map
->osd_addr
)), bad
);
618 *p
+= 4; /* skip length field (should match max) */
619 ceph_decode_copy(p
, map
->osd_state
, map
->max_osd
);
621 *p
+= 4; /* skip length field (should match max) */
622 for (i
= 0; i
< map
->max_osd
; i
++)
623 map
->osd_weight
[i
] = ceph_decode_32(p
);
625 *p
+= 4; /* skip length field (should match max) */
626 ceph_decode_copy(p
, map
->osd_addr
, map
->max_osd
*sizeof(*map
->osd_addr
));
627 for (i
= 0; i
< map
->max_osd
; i
++)
628 ceph_decode_addr(&map
->osd_addr
[i
]);
631 ceph_decode_32_safe(p
, end
, len
, bad
);
632 for (i
= 0; i
< len
; i
++) {
635 struct ceph_pg_mapping
*pg
;
637 ceph_decode_need(p
, end
, sizeof(u32
) + sizeof(u64
), bad
);
638 ceph_decode_copy(p
, &pgid
, sizeof(pgid
));
639 n
= ceph_decode_32(p
);
640 ceph_decode_need(p
, end
, n
* sizeof(u32
), bad
);
642 pg
= kmalloc(sizeof(*pg
) + n
*sizeof(u32
), GFP_NOFS
);
647 for (j
= 0; j
< n
; j
++)
648 pg
->osds
[j
] = ceph_decode_32(p
);
650 err
= __insert_pg_mapping(pg
, &map
->pg_temp
);
653 dout(" added pg_temp %llx len %d\n", *(u64
*)&pgid
, len
);
657 ceph_decode_32_safe(p
, end
, len
, bad
);
658 dout("osdmap_decode crush len %d from off 0x%x\n", len
,
660 ceph_decode_need(p
, end
, len
, bad
);
661 map
->crush
= crush_decode(*p
, end
);
663 if (IS_ERR(map
->crush
)) {
664 err
= PTR_ERR(map
->crush
);
669 /* ignore the rest of the map */
672 dout("osdmap_decode done %p %p\n", *p
, end
);
676 dout("osdmap_decode fail\n");
677 ceph_osdmap_destroy(map
);
682 * decode and apply an incremental map update.
684 struct ceph_osdmap
*osdmap_apply_incremental(void **p
, void *end
,
685 struct ceph_osdmap
*map
,
686 struct ceph_messenger
*msgr
)
688 struct crush_map
*newcrush
= NULL
;
689 struct ceph_fsid fsid
;
691 struct ceph_timespec modified
;
693 __s32 new_pool_max
, new_flags
, max
;
699 ceph_decode_16_safe(p
, end
, version
, bad
);
700 if (version
> CEPH_OSDMAP_INC_VERSION
) {
701 pr_warning("got unknown v %d > %d of inc osdmap\n", version
,
702 CEPH_OSDMAP_INC_VERSION
);
706 ceph_decode_need(p
, end
, sizeof(fsid
)+sizeof(modified
)+2*sizeof(u32
),
708 ceph_decode_copy(p
, &fsid
, sizeof(fsid
));
709 epoch
= ceph_decode_32(p
);
710 BUG_ON(epoch
!= map
->epoch
+1);
711 ceph_decode_copy(p
, &modified
, sizeof(modified
));
712 new_pool_max
= ceph_decode_32(p
);
713 new_flags
= ceph_decode_32(p
);
716 ceph_decode_32_safe(p
, end
, len
, bad
);
718 dout("apply_incremental full map len %d, %p to %p\n",
720 return osdmap_decode(p
, min(*p
+len
, end
));
724 ceph_decode_32_safe(p
, end
, len
, bad
);
726 dout("apply_incremental new crush map len %d, %p to %p\n",
728 newcrush
= crush_decode(*p
, min(*p
+len
, end
));
729 if (IS_ERR(newcrush
))
730 return ERR_CAST(newcrush
);
736 map
->flags
= new_flags
;
737 if (new_pool_max
>= 0)
738 map
->pool_max
= new_pool_max
;
740 ceph_decode_need(p
, end
, 5*sizeof(u32
), bad
);
743 max
= ceph_decode_32(p
);
745 err
= osdmap_set_max_osd(map
, max
);
751 map
->modified
= map
->modified
;
754 crush_destroy(map
->crush
);
755 map
->crush
= newcrush
;
760 ceph_decode_32_safe(p
, end
, len
, bad
);
763 struct ceph_pg_pool_info
*pi
;
765 ceph_decode_32_safe(p
, end
, pool
, bad
);
766 ceph_decode_need(p
, end
, 1 + sizeof(pi
->v
), bad
);
767 ev
= ceph_decode_8(p
); /* encoding version */
768 if (ev
> CEPH_PG_POOL_VERSION
) {
769 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
770 ev
, CEPH_PG_POOL_VERSION
);
773 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
775 pi
= kzalloc(sizeof(*pi
), GFP_NOFS
);
781 __insert_pg_pool(&map
->pg_pools
, pi
);
783 err
= __decode_pool(p
, end
, pi
);
787 if (version
>= 5 && __decode_pool_names(p
, end
, map
) < 0)
791 ceph_decode_32_safe(p
, end
, len
, bad
);
793 struct ceph_pg_pool_info
*pi
;
795 ceph_decode_32_safe(p
, end
, pool
, bad
);
796 pi
= __lookup_pg_pool(&map
->pg_pools
, pool
);
798 __remove_pg_pool(&map
->pg_pools
, pi
);
803 ceph_decode_32_safe(p
, end
, len
, bad
);
806 struct ceph_entity_addr addr
;
807 ceph_decode_32_safe(p
, end
, osd
, bad
);
808 ceph_decode_copy_safe(p
, end
, &addr
, sizeof(addr
), bad
);
809 ceph_decode_addr(&addr
);
810 pr_info("osd%d up\n", osd
);
811 BUG_ON(osd
>= map
->max_osd
);
812 map
->osd_state
[osd
] |= CEPH_OSD_UP
;
813 map
->osd_addr
[osd
] = addr
;
817 ceph_decode_32_safe(p
, end
, len
, bad
);
820 ceph_decode_32_safe(p
, end
, osd
, bad
);
821 (*p
)++; /* clean flag */
822 pr_info("osd%d down\n", osd
);
823 if (osd
< map
->max_osd
)
824 map
->osd_state
[osd
] &= ~CEPH_OSD_UP
;
828 ceph_decode_32_safe(p
, end
, len
, bad
);
831 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
832 osd
= ceph_decode_32(p
);
833 off
= ceph_decode_32(p
);
834 pr_info("osd%d weight 0x%x %s\n", osd
, off
,
835 off
== CEPH_OSD_IN
? "(in)" :
836 (off
== CEPH_OSD_OUT
? "(out)" : ""));
837 if (osd
< map
->max_osd
)
838 map
->osd_weight
[osd
] = off
;
842 rbp
= rb_first(&map
->pg_temp
);
843 ceph_decode_32_safe(p
, end
, len
, bad
);
845 struct ceph_pg_mapping
*pg
;
849 ceph_decode_need(p
, end
, sizeof(u64
) + sizeof(u32
), bad
);
850 ceph_decode_copy(p
, &pgid
, sizeof(pgid
));
851 pglen
= ceph_decode_32(p
);
854 while (rbp
&& pgid_cmp(rb_entry(rbp
, struct ceph_pg_mapping
,
855 node
)->pgid
, pgid
) <= 0) {
856 struct ceph_pg_mapping
*cur
=
857 rb_entry(rbp
, struct ceph_pg_mapping
, node
);
860 dout(" removed pg_temp %llx\n", *(u64
*)&cur
->pgid
);
861 rb_erase(&cur
->node
, &map
->pg_temp
);
867 ceph_decode_need(p
, end
, pglen
*sizeof(u32
), bad
);
868 pg
= kmalloc(sizeof(*pg
) + sizeof(u32
)*pglen
, GFP_NOFS
);
875 for (j
= 0; j
< pglen
; j
++)
876 pg
->osds
[j
] = ceph_decode_32(p
);
877 err
= __insert_pg_mapping(pg
, &map
->pg_temp
);
882 dout(" added pg_temp %llx len %d\n", *(u64
*)&pgid
,
887 struct ceph_pg_mapping
*cur
=
888 rb_entry(rbp
, struct ceph_pg_mapping
, node
);
891 dout(" removed pg_temp %llx\n", *(u64
*)&cur
->pgid
);
892 rb_erase(&cur
->node
, &map
->pg_temp
);
896 /* ignore the rest */
901 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
902 epoch
, (int)(*p
- start
), *p
, start
, end
);
903 print_hex_dump(KERN_DEBUG
, "osdmap: ",
904 DUMP_PREFIX_OFFSET
, 16, 1,
905 start
, end
- start
, true);
907 crush_destroy(newcrush
);
915 * calculate file layout from given offset, length.
916 * fill in correct oid, logical length, and object extent
919 * for now, we write only a single su, until we can
920 * pass a stride back to the caller.
922 void ceph_calc_file_object_mapping(struct ceph_file_layout
*layout
,
925 u64
*oxoff
, u64
*oxlen
)
927 u32 osize
= le32_to_cpu(layout
->fl_object_size
);
928 u32 su
= le32_to_cpu(layout
->fl_stripe_unit
);
929 u32 sc
= le32_to_cpu(layout
->fl_stripe_count
);
930 u32 bl
, stripeno
, stripepos
, objsetno
;
934 dout("mapping %llu~%llu osize %u fl_su %u\n", off
, *plen
,
936 su_per_object
= osize
/ su
;
937 dout("osize %u / su %u = su_per_object %u\n", osize
, su
,
940 BUG_ON((su
& ~PAGE_MASK
) != 0);
941 /* bl = *off / su; */
945 dout("off %llu / su %u = bl %u\n", off
, su
, bl
);
949 objsetno
= stripeno
/ su_per_object
;
951 *ono
= objsetno
* sc
+ stripepos
;
952 dout("objset %u * sc %u = ono %u\n", objsetno
, sc
, (unsigned)*ono
);
954 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
956 su_offset
= do_div(t
, su
);
957 *oxoff
= su_offset
+ (stripeno
% su_per_object
) * su
;
960 * Calculate the length of the extent being written to the selected
961 * object. This is the minimum of the full length requested (plen) or
962 * the remainder of the current stripe being written to.
964 *oxlen
= min_t(u64
, *plen
, su
- su_offset
);
967 dout(" obj extent %llu~%llu\n", *oxoff
, *oxlen
);
971 * calculate an object layout (i.e. pgid) from an oid,
972 * file_layout, and osdmap
974 int ceph_calc_object_layout(struct ceph_object_layout
*ol
,
976 struct ceph_file_layout
*fl
,
977 struct ceph_osdmap
*osdmap
)
979 unsigned num
, num_mask
;
981 s32 preferred
= (s32
)le32_to_cpu(fl
->fl_pg_preferred
);
982 int poolid
= le32_to_cpu(fl
->fl_pg_pool
);
983 struct ceph_pg_pool_info
*pool
;
988 pool
= __lookup_pg_pool(&osdmap
->pg_pools
, poolid
);
991 ps
= ceph_str_hash(pool
->v
.object_hash
, oid
, strlen(oid
));
992 if (preferred
>= 0) {
994 num
= le32_to_cpu(pool
->v
.lpg_num
);
995 num_mask
= pool
->lpg_num_mask
;
997 num
= le32_to_cpu(pool
->v
.pg_num
);
998 num_mask
= pool
->pg_num_mask
;
1001 pgid
.ps
= cpu_to_le16(ps
);
1002 pgid
.preferred
= cpu_to_le16(preferred
);
1003 pgid
.pool
= fl
->fl_pg_pool
;
1005 dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid
, poolid
, ps
,
1008 dout("calc_object_layout '%s' pgid %d.%x\n", oid
, poolid
, ps
);
1011 ol
->ol_stripe_unit
= fl
->fl_object_stripe_unit
;
1016 * Calculate raw osd vector for the given pgid. Return pointer to osd
1017 * array, or NULL on failure.
1019 static int *calc_pg_raw(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
,
1020 int *osds
, int *num
)
1022 struct ceph_pg_mapping
*pg
;
1023 struct ceph_pg_pool_info
*pool
;
1025 unsigned poolid
, ps
, pps
;
1029 pg
= __lookup_pg_mapping(&osdmap
->pg_temp
, pgid
);
1036 poolid
= le32_to_cpu(pgid
.pool
);
1037 ps
= le16_to_cpu(pgid
.ps
);
1038 preferred
= (s16
)le16_to_cpu(pgid
.preferred
);
1040 /* don't forcefeed bad device ids to crush */
1041 if (preferred
>= osdmap
->max_osd
||
1042 preferred
>= osdmap
->crush
->max_devices
)
1045 pool
= __lookup_pg_pool(&osdmap
->pg_pools
, poolid
);
1048 ruleno
= crush_find_rule(osdmap
->crush
, pool
->v
.crush_ruleset
,
1049 pool
->v
.type
, pool
->v
.size
);
1051 pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
1052 poolid
, pool
->v
.crush_ruleset
, pool
->v
.type
,
1058 pps
= ceph_stable_mod(ps
,
1059 le32_to_cpu(pool
->v
.lpgp_num
),
1060 pool
->lpgp_num_mask
);
1062 pps
= ceph_stable_mod(ps
,
1063 le32_to_cpu(pool
->v
.pgp_num
),
1064 pool
->pgp_num_mask
);
1066 *num
= crush_do_rule(osdmap
->crush
, ruleno
, pps
, osds
,
1067 min_t(int, pool
->v
.size
, *num
),
1068 preferred
, osdmap
->osd_weight
);
1073 * Return acting set for given pgid.
1075 int ceph_calc_pg_acting(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
,
1078 int rawosds
[CEPH_PG_MAX_SIZE
], *osds
;
1079 int i
, o
, num
= CEPH_PG_MAX_SIZE
;
1081 osds
= calc_pg_raw(osdmap
, pgid
, rawosds
, &num
);
1085 /* primary is first up osd */
1087 for (i
= 0; i
< num
; i
++)
1088 if (ceph_osd_is_up(osdmap
, osds
[i
]))
1089 acting
[o
++] = osds
[i
];
1094 * Return primary osd for given pgid, or -1 if none.
1096 int ceph_calc_pg_primary(struct ceph_osdmap
*osdmap
, struct ceph_pg pgid
)
1098 int rawosds
[CEPH_PG_MAX_SIZE
], *osds
;
1099 int i
, num
= CEPH_PG_MAX_SIZE
;
1101 osds
= calc_pg_raw(osdmap
, pgid
, rawosds
, &num
);
1105 /* primary is first up osd */
1106 for (i
= 0; i
< num
; i
++)
1107 if (ceph_osd_is_up(osdmap
, osds
[i
]))