1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
5 #include <linux/wait.h>
6 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/debugfs.h>
10 #include <linux/seq_file.h>
11 #include <linux/ratelimit.h>
14 #include "mds_client.h"
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/debugfs.h>
23 #define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
26 * A cluster of MDS (metadata server) daemons is responsible for
27 * managing the file system namespace (the directory hierarchy and
28 * inodes) and for coordinating shared access to storage. Metadata is
29 * partitioning hierarchically across a number of servers, and that
30 * partition varies over time as the cluster adjusts the distribution
31 * in order to balance load.
33 * The MDS client is primarily responsible to managing synchronous
34 * metadata requests for operations like open, unlink, and so forth.
35 * If there is a MDS failure, we find out about it when we (possibly
36 * request and) receive a new MDS map, and can resubmit affected
39 * For the most part, though, we take advantage of a lossless
40 * communications channel to the MDS, and do not need to worry about
41 * timing out or resubmitting requests.
43 * We maintain a stateful "session" with each MDS we interact with.
44 * Within each session, we sent periodic heartbeat messages to ensure
45 * any capabilities or leases we have been issues remain valid. If
46 * the session times out and goes stale, our leases and capabilities
47 * are no longer valid.
50 struct ceph_reconnect_state
{
51 struct ceph_mds_session
*session
;
52 int nr_caps
, nr_realms
;
53 struct ceph_pagelist
*pagelist
;
58 static void __wake_requests(struct ceph_mds_client
*mdsc
,
59 struct list_head
*head
);
60 static void ceph_cap_release_work(struct work_struct
*work
);
61 static void ceph_cap_reclaim_work(struct work_struct
*work
);
63 static const struct ceph_connection_operations mds_con_ops
;
70 static int parse_reply_info_quota(void **p
, void *end
,
71 struct ceph_mds_reply_info_in
*info
)
73 u8 struct_v
, struct_compat
;
76 ceph_decode_8_safe(p
, end
, struct_v
, bad
);
77 ceph_decode_8_safe(p
, end
, struct_compat
, bad
);
78 /* struct_v is expected to be >= 1. we only
79 * understand encoding with struct_compat == 1. */
80 if (!struct_v
|| struct_compat
!= 1)
82 ceph_decode_32_safe(p
, end
, struct_len
, bad
);
83 ceph_decode_need(p
, end
, struct_len
, bad
);
84 end
= *p
+ struct_len
;
85 ceph_decode_64_safe(p
, end
, info
->max_bytes
, bad
);
86 ceph_decode_64_safe(p
, end
, info
->max_files
, bad
);
94 * parse individual inode info
96 static int parse_reply_info_in(void **p
, void *end
,
97 struct ceph_mds_reply_info_in
*info
,
103 if (features
== (u64
)-1) {
106 ceph_decode_8_safe(p
, end
, struct_v
, bad
);
107 ceph_decode_8_safe(p
, end
, struct_compat
, bad
);
108 /* struct_v is expected to be >= 1. we only understand
109 * encoding with struct_compat == 1. */
110 if (!struct_v
|| struct_compat
!= 1)
112 ceph_decode_32_safe(p
, end
, struct_len
, bad
);
113 ceph_decode_need(p
, end
, struct_len
, bad
);
114 end
= *p
+ struct_len
;
117 ceph_decode_need(p
, end
, sizeof(struct ceph_mds_reply_inode
), bad
);
119 *p
+= sizeof(struct ceph_mds_reply_inode
) +
120 sizeof(*info
->in
->fragtree
.splits
) *
121 le32_to_cpu(info
->in
->fragtree
.nsplits
);
123 ceph_decode_32_safe(p
, end
, info
->symlink_len
, bad
);
124 ceph_decode_need(p
, end
, info
->symlink_len
, bad
);
126 *p
+= info
->symlink_len
;
128 ceph_decode_copy_safe(p
, end
, &info
->dir_layout
,
129 sizeof(info
->dir_layout
), bad
);
130 ceph_decode_32_safe(p
, end
, info
->xattr_len
, bad
);
131 ceph_decode_need(p
, end
, info
->xattr_len
, bad
);
132 info
->xattr_data
= *p
;
133 *p
+= info
->xattr_len
;
135 if (features
== (u64
)-1) {
137 ceph_decode_64_safe(p
, end
, info
->inline_version
, bad
);
138 ceph_decode_32_safe(p
, end
, info
->inline_len
, bad
);
139 ceph_decode_need(p
, end
, info
->inline_len
, bad
);
140 info
->inline_data
= *p
;
141 *p
+= info
->inline_len
;
143 err
= parse_reply_info_quota(p
, end
, info
);
147 ceph_decode_32_safe(p
, end
, info
->pool_ns_len
, bad
);
148 if (info
->pool_ns_len
> 0) {
149 ceph_decode_need(p
, end
, info
->pool_ns_len
, bad
);
150 info
->pool_ns_data
= *p
;
151 *p
+= info
->pool_ns_len
;
155 ceph_decode_need(p
, end
, sizeof(info
->btime
), bad
);
156 ceph_decode_copy(p
, &info
->btime
, sizeof(info
->btime
));
158 /* change attribute */
159 ceph_decode_64_safe(p
, end
, info
->change_attr
, bad
);
163 ceph_decode_32_safe(p
, end
, info
->dir_pin
, bad
);
165 info
->dir_pin
= -ENODATA
;
168 /* snapshot birth time, remains zero for v<=2 */
170 ceph_decode_need(p
, end
, sizeof(info
->snap_btime
), bad
);
171 ceph_decode_copy(p
, &info
->snap_btime
,
172 sizeof(info
->snap_btime
));
174 memset(&info
->snap_btime
, 0, sizeof(info
->snap_btime
));
179 if (features
& CEPH_FEATURE_MDS_INLINE_DATA
) {
180 ceph_decode_64_safe(p
, end
, info
->inline_version
, bad
);
181 ceph_decode_32_safe(p
, end
, info
->inline_len
, bad
);
182 ceph_decode_need(p
, end
, info
->inline_len
, bad
);
183 info
->inline_data
= *p
;
184 *p
+= info
->inline_len
;
186 info
->inline_version
= CEPH_INLINE_NONE
;
188 if (features
& CEPH_FEATURE_MDS_QUOTA
) {
189 err
= parse_reply_info_quota(p
, end
, info
);
197 info
->pool_ns_len
= 0;
198 info
->pool_ns_data
= NULL
;
199 if (features
& CEPH_FEATURE_FS_FILE_LAYOUT_V2
) {
200 ceph_decode_32_safe(p
, end
, info
->pool_ns_len
, bad
);
201 if (info
->pool_ns_len
> 0) {
202 ceph_decode_need(p
, end
, info
->pool_ns_len
, bad
);
203 info
->pool_ns_data
= *p
;
204 *p
+= info
->pool_ns_len
;
208 if (features
& CEPH_FEATURE_FS_BTIME
) {
209 ceph_decode_need(p
, end
, sizeof(info
->btime
), bad
);
210 ceph_decode_copy(p
, &info
->btime
, sizeof(info
->btime
));
211 ceph_decode_64_safe(p
, end
, info
->change_attr
, bad
);
214 info
->dir_pin
= -ENODATA
;
215 /* info->snap_btime remains zero */
224 static int parse_reply_info_dir(void **p
, void *end
,
225 struct ceph_mds_reply_dirfrag
**dirfrag
,
228 if (features
== (u64
)-1) {
229 u8 struct_v
, struct_compat
;
231 ceph_decode_8_safe(p
, end
, struct_v
, bad
);
232 ceph_decode_8_safe(p
, end
, struct_compat
, bad
);
233 /* struct_v is expected to be >= 1. we only understand
234 * encoding whose struct_compat == 1. */
235 if (!struct_v
|| struct_compat
!= 1)
237 ceph_decode_32_safe(p
, end
, struct_len
, bad
);
238 ceph_decode_need(p
, end
, struct_len
, bad
);
239 end
= *p
+ struct_len
;
242 ceph_decode_need(p
, end
, sizeof(**dirfrag
), bad
);
244 *p
+= sizeof(**dirfrag
) + sizeof(u32
) * le32_to_cpu((*dirfrag
)->ndist
);
245 if (unlikely(*p
> end
))
247 if (features
== (u64
)-1)
254 static int parse_reply_info_lease(void **p
, void *end
,
255 struct ceph_mds_reply_lease
**lease
,
258 if (features
== (u64
)-1) {
259 u8 struct_v
, struct_compat
;
261 ceph_decode_8_safe(p
, end
, struct_v
, bad
);
262 ceph_decode_8_safe(p
, end
, struct_compat
, bad
);
263 /* struct_v is expected to be >= 1. we only understand
264 * encoding whose struct_compat == 1. */
265 if (!struct_v
|| struct_compat
!= 1)
267 ceph_decode_32_safe(p
, end
, struct_len
, bad
);
268 ceph_decode_need(p
, end
, struct_len
, bad
);
269 end
= *p
+ struct_len
;
272 ceph_decode_need(p
, end
, sizeof(**lease
), bad
);
274 *p
+= sizeof(**lease
);
275 if (features
== (u64
)-1)
283 * parse a normal reply, which may contain a (dir+)dentry and/or a
286 static int parse_reply_info_trace(void **p
, void *end
,
287 struct ceph_mds_reply_info_parsed
*info
,
292 if (info
->head
->is_dentry
) {
293 err
= parse_reply_info_in(p
, end
, &info
->diri
, features
);
297 err
= parse_reply_info_dir(p
, end
, &info
->dirfrag
, features
);
301 ceph_decode_32_safe(p
, end
, info
->dname_len
, bad
);
302 ceph_decode_need(p
, end
, info
->dname_len
, bad
);
304 *p
+= info
->dname_len
;
306 err
= parse_reply_info_lease(p
, end
, &info
->dlease
, features
);
311 if (info
->head
->is_target
) {
312 err
= parse_reply_info_in(p
, end
, &info
->targeti
, features
);
317 if (unlikely(*p
!= end
))
324 pr_err("problem parsing mds trace %d\n", err
);
329 * parse readdir results
331 static int parse_reply_info_readdir(void **p
, void *end
,
332 struct ceph_mds_reply_info_parsed
*info
,
338 err
= parse_reply_info_dir(p
, end
, &info
->dir_dir
, features
);
342 ceph_decode_need(p
, end
, sizeof(num
) + 2, bad
);
343 num
= ceph_decode_32(p
);
345 u16 flags
= ceph_decode_16(p
);
346 info
->dir_end
= !!(flags
& CEPH_READDIR_FRAG_END
);
347 info
->dir_complete
= !!(flags
& CEPH_READDIR_FRAG_COMPLETE
);
348 info
->hash_order
= !!(flags
& CEPH_READDIR_HASH_ORDER
);
349 info
->offset_hash
= !!(flags
& CEPH_READDIR_OFFSET_HASH
);
354 BUG_ON(!info
->dir_entries
);
355 if ((unsigned long)(info
->dir_entries
+ num
) >
356 (unsigned long)info
->dir_entries
+ info
->dir_buf_size
) {
357 pr_err("dir contents are larger than expected\n");
364 struct ceph_mds_reply_dir_entry
*rde
= info
->dir_entries
+ i
;
366 ceph_decode_32_safe(p
, end
, rde
->name_len
, bad
);
367 ceph_decode_need(p
, end
, rde
->name_len
, bad
);
370 dout("parsed dir dname '%.*s'\n", rde
->name_len
, rde
->name
);
373 err
= parse_reply_info_lease(p
, end
, &rde
->lease
, features
);
377 err
= parse_reply_info_in(p
, end
, &rde
->inode
, features
);
380 /* ceph_readdir_prepopulate() will update it */
387 /* Skip over any unrecognized fields */
394 pr_err("problem parsing dir contents %d\n", err
);
399 * parse fcntl F_GETLK results
401 static int parse_reply_info_filelock(void **p
, void *end
,
402 struct ceph_mds_reply_info_parsed
*info
,
405 if (*p
+ sizeof(*info
->filelock_reply
) > end
)
408 info
->filelock_reply
= *p
;
410 /* Skip over any unrecognized fields */
418 * parse create results
420 static int parse_reply_info_create(void **p
, void *end
,
421 struct ceph_mds_reply_info_parsed
*info
,
424 if (features
== (u64
)-1 ||
425 (features
& CEPH_FEATURE_REPLY_CREATE_INODE
)) {
426 /* Malformed reply? */
428 info
->has_create_ino
= false;
430 info
->has_create_ino
= true;
431 ceph_decode_64_safe(p
, end
, info
->ino
, bad
);
438 /* Skip over any unrecognized fields */
446 * parse extra results
448 static int parse_reply_info_extra(void **p
, void *end
,
449 struct ceph_mds_reply_info_parsed
*info
,
452 u32 op
= le32_to_cpu(info
->head
->op
);
454 if (op
== CEPH_MDS_OP_GETFILELOCK
)
455 return parse_reply_info_filelock(p
, end
, info
, features
);
456 else if (op
== CEPH_MDS_OP_READDIR
|| op
== CEPH_MDS_OP_LSSNAP
)
457 return parse_reply_info_readdir(p
, end
, info
, features
);
458 else if (op
== CEPH_MDS_OP_CREATE
)
459 return parse_reply_info_create(p
, end
, info
, features
);
465 * parse entire mds reply
467 static int parse_reply_info(struct ceph_msg
*msg
,
468 struct ceph_mds_reply_info_parsed
*info
,
475 info
->head
= msg
->front
.iov_base
;
476 p
= msg
->front
.iov_base
+ sizeof(struct ceph_mds_reply_head
);
477 end
= p
+ msg
->front
.iov_len
- sizeof(struct ceph_mds_reply_head
);
480 ceph_decode_32_safe(&p
, end
, len
, bad
);
482 ceph_decode_need(&p
, end
, len
, bad
);
483 err
= parse_reply_info_trace(&p
, p
+len
, info
, features
);
489 ceph_decode_32_safe(&p
, end
, len
, bad
);
491 ceph_decode_need(&p
, end
, len
, bad
);
492 err
= parse_reply_info_extra(&p
, p
+len
, info
, features
);
498 ceph_decode_32_safe(&p
, end
, len
, bad
);
499 info
->snapblob_len
= len
;
510 pr_err("mds parse_reply err %d\n", err
);
514 static void destroy_reply_info(struct ceph_mds_reply_info_parsed
*info
)
516 if (!info
->dir_entries
)
518 free_pages((unsigned long)info
->dir_entries
, get_order(info
->dir_buf_size
));
525 const char *ceph_session_state_name(int s
)
528 case CEPH_MDS_SESSION_NEW
: return "new";
529 case CEPH_MDS_SESSION_OPENING
: return "opening";
530 case CEPH_MDS_SESSION_OPEN
: return "open";
531 case CEPH_MDS_SESSION_HUNG
: return "hung";
532 case CEPH_MDS_SESSION_CLOSING
: return "closing";
533 case CEPH_MDS_SESSION_RESTARTING
: return "restarting";
534 case CEPH_MDS_SESSION_RECONNECTING
: return "reconnecting";
535 case CEPH_MDS_SESSION_REJECTED
: return "rejected";
536 default: return "???";
540 static struct ceph_mds_session
*get_session(struct ceph_mds_session
*s
)
542 if (refcount_inc_not_zero(&s
->s_ref
)) {
543 dout("mdsc get_session %p %d -> %d\n", s
,
544 refcount_read(&s
->s_ref
)-1, refcount_read(&s
->s_ref
));
547 dout("mdsc get_session %p 0 -- FAIL\n", s
);
552 void ceph_put_mds_session(struct ceph_mds_session
*s
)
554 dout("mdsc put_session %p %d -> %d\n", s
,
555 refcount_read(&s
->s_ref
), refcount_read(&s
->s_ref
)-1);
556 if (refcount_dec_and_test(&s
->s_ref
)) {
557 if (s
->s_auth
.authorizer
)
558 ceph_auth_destroy_authorizer(s
->s_auth
.authorizer
);
564 * called under mdsc->mutex
566 struct ceph_mds_session
*__ceph_lookup_mds_session(struct ceph_mds_client
*mdsc
,
569 if (mds
>= mdsc
->max_sessions
|| !mdsc
->sessions
[mds
])
571 return get_session(mdsc
->sessions
[mds
]);
574 static bool __have_session(struct ceph_mds_client
*mdsc
, int mds
)
576 if (mds
>= mdsc
->max_sessions
|| !mdsc
->sessions
[mds
])
582 static int __verify_registered_session(struct ceph_mds_client
*mdsc
,
583 struct ceph_mds_session
*s
)
585 if (s
->s_mds
>= mdsc
->max_sessions
||
586 mdsc
->sessions
[s
->s_mds
] != s
)
592 * create+register a new session for given mds.
593 * called under mdsc->mutex.
595 static struct ceph_mds_session
*register_session(struct ceph_mds_client
*mdsc
,
598 struct ceph_mds_session
*s
;
600 if (mds
>= mdsc
->mdsmap
->m_num_mds
)
601 return ERR_PTR(-EINVAL
);
603 s
= kzalloc(sizeof(*s
), GFP_NOFS
);
605 return ERR_PTR(-ENOMEM
);
607 if (mds
>= mdsc
->max_sessions
) {
608 int newmax
= 1 << get_count_order(mds
+ 1);
609 struct ceph_mds_session
**sa
;
611 dout("%s: realloc to %d\n", __func__
, newmax
);
612 sa
= kcalloc(newmax
, sizeof(void *), GFP_NOFS
);
615 if (mdsc
->sessions
) {
616 memcpy(sa
, mdsc
->sessions
,
617 mdsc
->max_sessions
* sizeof(void *));
618 kfree(mdsc
->sessions
);
621 mdsc
->max_sessions
= newmax
;
624 dout("%s: mds%d\n", __func__
, mds
);
627 s
->s_state
= CEPH_MDS_SESSION_NEW
;
630 mutex_init(&s
->s_mutex
);
632 ceph_con_init(&s
->s_con
, s
, &mds_con_ops
, &mdsc
->fsc
->client
->msgr
);
634 spin_lock_init(&s
->s_gen_ttl_lock
);
636 s
->s_cap_ttl
= jiffies
- 1;
638 spin_lock_init(&s
->s_cap_lock
);
639 s
->s_renew_requested
= 0;
641 INIT_LIST_HEAD(&s
->s_caps
);
643 refcount_set(&s
->s_ref
, 1);
644 INIT_LIST_HEAD(&s
->s_waiting
);
645 INIT_LIST_HEAD(&s
->s_unsafe
);
646 s
->s_num_cap_releases
= 0;
647 s
->s_cap_reconnect
= 0;
648 s
->s_cap_iterator
= NULL
;
649 INIT_LIST_HEAD(&s
->s_cap_releases
);
650 INIT_WORK(&s
->s_cap_release_work
, ceph_cap_release_work
);
652 INIT_LIST_HEAD(&s
->s_cap_flushing
);
654 mdsc
->sessions
[mds
] = s
;
655 atomic_inc(&mdsc
->num_sessions
);
656 refcount_inc(&s
->s_ref
); /* one ref to sessions[], one to caller */
658 ceph_con_open(&s
->s_con
, CEPH_ENTITY_TYPE_MDS
, mds
,
659 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
665 return ERR_PTR(-ENOMEM
);
669 * called under mdsc->mutex
671 static void __unregister_session(struct ceph_mds_client
*mdsc
,
672 struct ceph_mds_session
*s
)
674 dout("__unregister_session mds%d %p\n", s
->s_mds
, s
);
675 BUG_ON(mdsc
->sessions
[s
->s_mds
] != s
);
676 mdsc
->sessions
[s
->s_mds
] = NULL
;
678 ceph_con_close(&s
->s_con
);
679 ceph_put_mds_session(s
);
680 atomic_dec(&mdsc
->num_sessions
);
684 * drop session refs in request.
686 * should be last request ref, or hold mdsc->mutex
688 static void put_request_session(struct ceph_mds_request
*req
)
690 if (req
->r_session
) {
691 ceph_put_mds_session(req
->r_session
);
692 req
->r_session
= NULL
;
696 void ceph_mdsc_release_request(struct kref
*kref
)
698 struct ceph_mds_request
*req
= container_of(kref
,
699 struct ceph_mds_request
,
701 destroy_reply_info(&req
->r_reply_info
);
703 ceph_msg_put(req
->r_request
);
705 ceph_msg_put(req
->r_reply
);
707 ceph_put_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
708 /* avoid calling iput_final() in mds dispatch threads */
709 ceph_async_iput(req
->r_inode
);
712 ceph_put_cap_refs(ceph_inode(req
->r_parent
), CEPH_CAP_PIN
);
713 ceph_async_iput(req
->r_target_inode
);
716 if (req
->r_old_dentry
)
717 dput(req
->r_old_dentry
);
718 if (req
->r_old_dentry_dir
) {
720 * track (and drop pins for) r_old_dentry_dir
721 * separately, since r_old_dentry's d_parent may have
722 * changed between the dir mutex being dropped and
723 * this request being freed.
725 ceph_put_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
727 ceph_async_iput(req
->r_old_dentry_dir
);
732 ceph_pagelist_release(req
->r_pagelist
);
733 put_request_session(req
);
734 ceph_unreserve_caps(req
->r_mdsc
, &req
->r_caps_reservation
);
735 WARN_ON_ONCE(!list_empty(&req
->r_wait
));
739 DEFINE_RB_FUNCS(request
, struct ceph_mds_request
, r_tid
, r_node
)
742 * lookup session, bump ref if found.
744 * called under mdsc->mutex.
746 static struct ceph_mds_request
*
747 lookup_get_request(struct ceph_mds_client
*mdsc
, u64 tid
)
749 struct ceph_mds_request
*req
;
751 req
= lookup_request(&mdsc
->request_tree
, tid
);
753 ceph_mdsc_get_request(req
);
759 * Register an in-flight request, and assign a tid. Link to directory
760 * are modifying (if any).
762 * Called under mdsc->mutex.
764 static void __register_request(struct ceph_mds_client
*mdsc
,
765 struct ceph_mds_request
*req
,
770 req
->r_tid
= ++mdsc
->last_tid
;
771 if (req
->r_num_caps
) {
772 ret
= ceph_reserve_caps(mdsc
, &req
->r_caps_reservation
,
775 pr_err("__register_request %p "
776 "failed to reserve caps: %d\n", req
, ret
);
777 /* set req->r_err to fail early from __do_request */
782 dout("__register_request %p tid %lld\n", req
, req
->r_tid
);
783 ceph_mdsc_get_request(req
);
784 insert_request(&mdsc
->request_tree
, req
);
786 req
->r_uid
= current_fsuid();
787 req
->r_gid
= current_fsgid();
789 if (mdsc
->oldest_tid
== 0 && req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
)
790 mdsc
->oldest_tid
= req
->r_tid
;
794 req
->r_unsafe_dir
= dir
;
798 static void __unregister_request(struct ceph_mds_client
*mdsc
,
799 struct ceph_mds_request
*req
)
801 dout("__unregister_request %p tid %lld\n", req
, req
->r_tid
);
803 /* Never leave an unregistered request on an unsafe list! */
804 list_del_init(&req
->r_unsafe_item
);
806 if (req
->r_tid
== mdsc
->oldest_tid
) {
807 struct rb_node
*p
= rb_next(&req
->r_node
);
808 mdsc
->oldest_tid
= 0;
810 struct ceph_mds_request
*next_req
=
811 rb_entry(p
, struct ceph_mds_request
, r_node
);
812 if (next_req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
) {
813 mdsc
->oldest_tid
= next_req
->r_tid
;
820 erase_request(&mdsc
->request_tree
, req
);
822 if (req
->r_unsafe_dir
&&
823 test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
)) {
824 struct ceph_inode_info
*ci
= ceph_inode(req
->r_unsafe_dir
);
825 spin_lock(&ci
->i_unsafe_lock
);
826 list_del_init(&req
->r_unsafe_dir_item
);
827 spin_unlock(&ci
->i_unsafe_lock
);
829 if (req
->r_target_inode
&&
830 test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
)) {
831 struct ceph_inode_info
*ci
= ceph_inode(req
->r_target_inode
);
832 spin_lock(&ci
->i_unsafe_lock
);
833 list_del_init(&req
->r_unsafe_target_item
);
834 spin_unlock(&ci
->i_unsafe_lock
);
837 if (req
->r_unsafe_dir
) {
838 /* avoid calling iput_final() in mds dispatch threads */
839 ceph_async_iput(req
->r_unsafe_dir
);
840 req
->r_unsafe_dir
= NULL
;
843 complete_all(&req
->r_safe_completion
);
845 ceph_mdsc_put_request(req
);
849 * Walk back up the dentry tree until we hit a dentry representing a
850 * non-snapshot inode. We do this using the rcu_read_lock (which must be held
851 * when calling this) to ensure that the objects won't disappear while we're
852 * working with them. Once we hit a candidate dentry, we attempt to take a
853 * reference to it, and return that as the result.
855 static struct inode
*get_nonsnap_parent(struct dentry
*dentry
)
857 struct inode
*inode
= NULL
;
859 while (dentry
&& !IS_ROOT(dentry
)) {
860 inode
= d_inode_rcu(dentry
);
861 if (!inode
|| ceph_snap(inode
) == CEPH_NOSNAP
)
863 dentry
= dentry
->d_parent
;
866 inode
= igrab(inode
);
871 * Choose mds to send request to next. If there is a hint set in the
872 * request (e.g., due to a prior forward hint from the mds), use that.
873 * Otherwise, consult frag tree and/or caps to identify the
874 * appropriate mds. If all else fails, choose randomly.
876 * Called under mdsc->mutex.
878 static int __choose_mds(struct ceph_mds_client
*mdsc
,
879 struct ceph_mds_request
*req
)
882 struct ceph_inode_info
*ci
;
883 struct ceph_cap
*cap
;
884 int mode
= req
->r_direct_mode
;
886 u32 hash
= req
->r_direct_hash
;
887 bool is_hash
= test_bit(CEPH_MDS_R_DIRECT_IS_HASH
, &req
->r_req_flags
);
890 * is there a specific mds we should try? ignore hint if we have
891 * no session and the mds is not up (active or recovering).
893 if (req
->r_resend_mds
>= 0 &&
894 (__have_session(mdsc
, req
->r_resend_mds
) ||
895 ceph_mdsmap_get_state(mdsc
->mdsmap
, req
->r_resend_mds
) > 0)) {
896 dout("choose_mds using resend_mds mds%d\n",
898 return req
->r_resend_mds
;
901 if (mode
== USE_RANDOM_MDS
)
906 if (ceph_snap(req
->r_inode
) != CEPH_SNAPDIR
) {
907 inode
= req
->r_inode
;
910 /* req->r_dentry is non-null for LSSNAP request */
912 inode
= get_nonsnap_parent(req
->r_dentry
);
914 dout("__choose_mds using snapdir's parent %p\n", inode
);
916 } else if (req
->r_dentry
) {
917 /* ignore race with rename; old or new d_parent is okay */
918 struct dentry
*parent
;
922 parent
= READ_ONCE(req
->r_dentry
->d_parent
);
923 dir
= req
->r_parent
? : d_inode_rcu(parent
);
925 if (!dir
|| dir
->i_sb
!= mdsc
->fsc
->sb
) {
926 /* not this fs or parent went negative */
927 inode
= d_inode(req
->r_dentry
);
930 } else if (ceph_snap(dir
) != CEPH_NOSNAP
) {
931 /* direct snapped/virtual snapdir requests
932 * based on parent dir inode */
933 inode
= get_nonsnap_parent(parent
);
934 dout("__choose_mds using nonsnap parent %p\n", inode
);
937 inode
= d_inode(req
->r_dentry
);
938 if (!inode
|| mode
== USE_AUTH_MDS
) {
941 hash
= ceph_dentry_hash(dir
, req
->r_dentry
);
950 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode
, (int)is_hash
,
954 ci
= ceph_inode(inode
);
956 if (is_hash
&& S_ISDIR(inode
->i_mode
)) {
957 struct ceph_inode_frag frag
;
960 ceph_choose_frag(ci
, hash
, &frag
, &found
);
962 if (mode
== USE_ANY_MDS
&& frag
.ndist
> 0) {
965 /* choose a random replica */
966 get_random_bytes(&r
, 1);
969 dout("choose_mds %p %llx.%llx "
970 "frag %u mds%d (%d/%d)\n",
971 inode
, ceph_vinop(inode
),
974 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
975 CEPH_MDS_STATE_ACTIVE
)
979 /* since this file/dir wasn't known to be
980 * replicated, then we want to look for the
981 * authoritative mds. */
984 /* choose auth mds */
986 dout("choose_mds %p %llx.%llx "
987 "frag %u mds%d (auth)\n",
988 inode
, ceph_vinop(inode
), frag
.frag
, mds
);
989 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
990 CEPH_MDS_STATE_ACTIVE
)
996 spin_lock(&ci
->i_ceph_lock
);
998 if (mode
== USE_AUTH_MDS
)
999 cap
= ci
->i_auth_cap
;
1000 if (!cap
&& !RB_EMPTY_ROOT(&ci
->i_caps
))
1001 cap
= rb_entry(rb_first(&ci
->i_caps
), struct ceph_cap
, ci_node
);
1003 spin_unlock(&ci
->i_ceph_lock
);
1004 ceph_async_iput(inode
);
1007 mds
= cap
->session
->s_mds
;
1008 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
1009 inode
, ceph_vinop(inode
), mds
,
1010 cap
== ci
->i_auth_cap
? "auth " : "", cap
);
1011 spin_unlock(&ci
->i_ceph_lock
);
1013 /* avoid calling iput_final() while holding mdsc->mutex or
1014 * in mds dispatch threads */
1015 ceph_async_iput(inode
);
1019 mds
= ceph_mdsmap_get_random_mds(mdsc
->mdsmap
);
1020 dout("choose_mds chose random mds%d\n", mds
);
1028 static struct ceph_msg
*create_session_msg(u32 op
, u64 seq
)
1030 struct ceph_msg
*msg
;
1031 struct ceph_mds_session_head
*h
;
1033 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
), GFP_NOFS
,
1036 pr_err("create_session_msg ENOMEM creating msg\n");
1039 h
= msg
->front
.iov_base
;
1040 h
->op
= cpu_to_le32(op
);
1041 h
->seq
= cpu_to_le64(seq
);
1046 static void encode_supported_features(void **p
, void *end
)
1048 static const unsigned char bits
[] = CEPHFS_FEATURES_CLIENT_SUPPORTED
;
1049 static const size_t count
= ARRAY_SIZE(bits
);
1053 size_t size
= ((size_t)bits
[count
- 1] + 64) / 64 * 8;
1055 BUG_ON(*p
+ 4 + size
> end
);
1056 ceph_encode_32(p
, size
);
1057 memset(*p
, 0, size
);
1058 for (i
= 0; i
< count
; i
++)
1059 ((unsigned char*)(*p
))[i
/ 8] |= 1 << (bits
[i
] % 8);
1062 BUG_ON(*p
+ 4 > end
);
1063 ceph_encode_32(p
, 0);
1068 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
1069 * to include additional client metadata fields.
1071 static struct ceph_msg
*create_session_open_msg(struct ceph_mds_client
*mdsc
, u64 seq
)
1073 struct ceph_msg
*msg
;
1074 struct ceph_mds_session_head
*h
;
1076 int extra_bytes
= 0;
1077 int metadata_key_count
= 0;
1078 struct ceph_options
*opt
= mdsc
->fsc
->client
->options
;
1079 struct ceph_mount_options
*fsopt
= mdsc
->fsc
->mount_options
;
1082 const char* metadata
[][2] = {
1083 {"hostname", mdsc
->nodename
},
1084 {"kernel_version", init_utsname()->release
},
1085 {"entity_id", opt
->name
? : ""},
1086 {"root", fsopt
->server_path
? : "/"},
1090 /* Calculate serialized length of metadata */
1091 extra_bytes
= 4; /* map length */
1092 for (i
= 0; metadata
[i
][0]; ++i
) {
1093 extra_bytes
+= 8 + strlen(metadata
[i
][0]) +
1094 strlen(metadata
[i
][1]);
1095 metadata_key_count
++;
1097 /* supported feature */
1098 extra_bytes
+= 4 + 8;
1100 /* Allocate the message */
1101 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
) + extra_bytes
,
1104 pr_err("create_session_msg ENOMEM creating msg\n");
1107 p
= msg
->front
.iov_base
;
1108 end
= p
+ msg
->front
.iov_len
;
1111 h
->op
= cpu_to_le32(CEPH_SESSION_REQUEST_OPEN
);
1112 h
->seq
= cpu_to_le64(seq
);
1115 * Serialize client metadata into waiting buffer space, using
1116 * the format that userspace expects for map<string, string>
1118 * ClientSession messages with metadata are v2
1120 msg
->hdr
.version
= cpu_to_le16(3);
1121 msg
->hdr
.compat_version
= cpu_to_le16(1);
1123 /* The write pointer, following the session_head structure */
1126 /* Number of entries in the map */
1127 ceph_encode_32(&p
, metadata_key_count
);
1129 /* Two length-prefixed strings for each entry in the map */
1130 for (i
= 0; metadata
[i
][0]; ++i
) {
1131 size_t const key_len
= strlen(metadata
[i
][0]);
1132 size_t const val_len
= strlen(metadata
[i
][1]);
1134 ceph_encode_32(&p
, key_len
);
1135 memcpy(p
, metadata
[i
][0], key_len
);
1137 ceph_encode_32(&p
, val_len
);
1138 memcpy(p
, metadata
[i
][1], val_len
);
1142 encode_supported_features(&p
, end
);
1143 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1144 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1150 * send session open request.
1152 * called under mdsc->mutex
1154 static int __open_session(struct ceph_mds_client
*mdsc
,
1155 struct ceph_mds_session
*session
)
1157 struct ceph_msg
*msg
;
1159 int mds
= session
->s_mds
;
1161 /* wait for mds to go active? */
1162 mstate
= ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
);
1163 dout("open_session to mds%d (%s)\n", mds
,
1164 ceph_mds_state_name(mstate
));
1165 session
->s_state
= CEPH_MDS_SESSION_OPENING
;
1166 session
->s_renew_requested
= jiffies
;
1168 /* send connect message */
1169 msg
= create_session_open_msg(mdsc
, session
->s_seq
);
1172 ceph_con_send(&session
->s_con
, msg
);
1177 * open sessions for any export targets for the given mds
1179 * called under mdsc->mutex
1181 static struct ceph_mds_session
*
1182 __open_export_target_session(struct ceph_mds_client
*mdsc
, int target
)
1184 struct ceph_mds_session
*session
;
1186 session
= __ceph_lookup_mds_session(mdsc
, target
);
1188 session
= register_session(mdsc
, target
);
1189 if (IS_ERR(session
))
1192 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
1193 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
1194 __open_session(mdsc
, session
);
1199 struct ceph_mds_session
*
1200 ceph_mdsc_open_export_target_session(struct ceph_mds_client
*mdsc
, int target
)
1202 struct ceph_mds_session
*session
;
1204 dout("open_export_target_session to mds%d\n", target
);
1206 mutex_lock(&mdsc
->mutex
);
1207 session
= __open_export_target_session(mdsc
, target
);
1208 mutex_unlock(&mdsc
->mutex
);
1213 static void __open_export_target_sessions(struct ceph_mds_client
*mdsc
,
1214 struct ceph_mds_session
*session
)
1216 struct ceph_mds_info
*mi
;
1217 struct ceph_mds_session
*ts
;
1218 int i
, mds
= session
->s_mds
;
1220 if (mds
>= mdsc
->mdsmap
->m_num_mds
)
1223 mi
= &mdsc
->mdsmap
->m_info
[mds
];
1224 dout("open_export_target_sessions for mds%d (%d targets)\n",
1225 session
->s_mds
, mi
->num_export_targets
);
1227 for (i
= 0; i
< mi
->num_export_targets
; i
++) {
1228 ts
= __open_export_target_session(mdsc
, mi
->export_targets
[i
]);
1230 ceph_put_mds_session(ts
);
1234 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client
*mdsc
,
1235 struct ceph_mds_session
*session
)
1237 mutex_lock(&mdsc
->mutex
);
1238 __open_export_target_sessions(mdsc
, session
);
1239 mutex_unlock(&mdsc
->mutex
);
1246 static void detach_cap_releases(struct ceph_mds_session
*session
,
1247 struct list_head
*target
)
1249 lockdep_assert_held(&session
->s_cap_lock
);
1251 list_splice_init(&session
->s_cap_releases
, target
);
1252 session
->s_num_cap_releases
= 0;
1253 dout("dispose_cap_releases mds%d\n", session
->s_mds
);
1256 static void dispose_cap_releases(struct ceph_mds_client
*mdsc
,
1257 struct list_head
*dispose
)
1259 while (!list_empty(dispose
)) {
1260 struct ceph_cap
*cap
;
1261 /* zero out the in-progress message */
1262 cap
= list_first_entry(dispose
, struct ceph_cap
, session_caps
);
1263 list_del(&cap
->session_caps
);
1264 ceph_put_cap(mdsc
, cap
);
1268 static void cleanup_session_requests(struct ceph_mds_client
*mdsc
,
1269 struct ceph_mds_session
*session
)
1271 struct ceph_mds_request
*req
;
1273 struct ceph_inode_info
*ci
;
1275 dout("cleanup_session_requests mds%d\n", session
->s_mds
);
1276 mutex_lock(&mdsc
->mutex
);
1277 while (!list_empty(&session
->s_unsafe
)) {
1278 req
= list_first_entry(&session
->s_unsafe
,
1279 struct ceph_mds_request
, r_unsafe_item
);
1280 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1282 if (req
->r_target_inode
) {
1283 /* dropping unsafe change of inode's attributes */
1284 ci
= ceph_inode(req
->r_target_inode
);
1285 errseq_set(&ci
->i_meta_err
, -EIO
);
1287 if (req
->r_unsafe_dir
) {
1288 /* dropping unsafe directory operation */
1289 ci
= ceph_inode(req
->r_unsafe_dir
);
1290 errseq_set(&ci
->i_meta_err
, -EIO
);
1292 __unregister_request(mdsc
, req
);
1294 /* zero r_attempts, so kick_requests() will re-send requests */
1295 p
= rb_first(&mdsc
->request_tree
);
1297 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
1299 if (req
->r_session
&&
1300 req
->r_session
->s_mds
== session
->s_mds
)
1301 req
->r_attempts
= 0;
1303 mutex_unlock(&mdsc
->mutex
);
1307 * Helper to safely iterate over all caps associated with a session, with
1308 * special care taken to handle a racing __ceph_remove_cap().
1310 * Caller must hold session s_mutex.
1312 int ceph_iterate_session_caps(struct ceph_mds_session
*session
,
1313 int (*cb
)(struct inode
*, struct ceph_cap
*,
1316 struct list_head
*p
;
1317 struct ceph_cap
*cap
;
1318 struct inode
*inode
, *last_inode
= NULL
;
1319 struct ceph_cap
*old_cap
= NULL
;
1322 dout("iterate_session_caps %p mds%d\n", session
, session
->s_mds
);
1323 spin_lock(&session
->s_cap_lock
);
1324 p
= session
->s_caps
.next
;
1325 while (p
!= &session
->s_caps
) {
1326 cap
= list_entry(p
, struct ceph_cap
, session_caps
);
1327 inode
= igrab(&cap
->ci
->vfs_inode
);
1332 session
->s_cap_iterator
= cap
;
1333 spin_unlock(&session
->s_cap_lock
);
1336 /* avoid calling iput_final() while holding
1337 * s_mutex or in mds dispatch threads */
1338 ceph_async_iput(last_inode
);
1342 ceph_put_cap(session
->s_mdsc
, old_cap
);
1346 ret
= cb(inode
, cap
, arg
);
1349 spin_lock(&session
->s_cap_lock
);
1352 dout("iterate_session_caps finishing cap %p removal\n",
1354 BUG_ON(cap
->session
!= session
);
1355 cap
->session
= NULL
;
1356 list_del_init(&cap
->session_caps
);
1357 session
->s_nr_caps
--;
1358 if (cap
->queue_release
)
1359 __ceph_queue_cap_release(session
, cap
);
1361 old_cap
= cap
; /* put_cap it w/o locks held */
1368 session
->s_cap_iterator
= NULL
;
1369 spin_unlock(&session
->s_cap_lock
);
1371 ceph_async_iput(last_inode
);
1373 ceph_put_cap(session
->s_mdsc
, old_cap
);
1378 static int remove_session_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1381 struct ceph_fs_client
*fsc
= (struct ceph_fs_client
*)arg
;
1382 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1383 LIST_HEAD(to_remove
);
1384 bool dirty_dropped
= false;
1385 bool invalidate
= false;
1387 dout("removing cap %p, ci is %p, inode is %p\n",
1388 cap
, ci
, &ci
->vfs_inode
);
1389 spin_lock(&ci
->i_ceph_lock
);
1390 if (cap
->mds_wanted
| cap
->issued
)
1391 ci
->i_ceph_flags
|= CEPH_I_CAP_DROPPED
;
1392 __ceph_remove_cap(cap
, false);
1393 if (!ci
->i_auth_cap
) {
1394 struct ceph_cap_flush
*cf
;
1395 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
1397 if (READ_ONCE(fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
) {
1398 if (inode
->i_data
.nrpages
> 0)
1400 if (ci
->i_wrbuffer_ref
> 0)
1401 mapping_set_error(&inode
->i_data
, -EIO
);
1404 while (!list_empty(&ci
->i_cap_flush_list
)) {
1405 cf
= list_first_entry(&ci
->i_cap_flush_list
,
1406 struct ceph_cap_flush
, i_list
);
1407 list_move(&cf
->i_list
, &to_remove
);
1410 spin_lock(&mdsc
->cap_dirty_lock
);
1412 list_for_each_entry(cf
, &to_remove
, i_list
)
1413 list_del(&cf
->g_list
);
1415 if (!list_empty(&ci
->i_dirty_item
)) {
1416 pr_warn_ratelimited(
1417 " dropping dirty %s state for %p %lld\n",
1418 ceph_cap_string(ci
->i_dirty_caps
),
1419 inode
, ceph_ino(inode
));
1420 ci
->i_dirty_caps
= 0;
1421 list_del_init(&ci
->i_dirty_item
);
1422 dirty_dropped
= true;
1424 if (!list_empty(&ci
->i_flushing_item
)) {
1425 pr_warn_ratelimited(
1426 " dropping dirty+flushing %s state for %p %lld\n",
1427 ceph_cap_string(ci
->i_flushing_caps
),
1428 inode
, ceph_ino(inode
));
1429 ci
->i_flushing_caps
= 0;
1430 list_del_init(&ci
->i_flushing_item
);
1431 mdsc
->num_cap_flushing
--;
1432 dirty_dropped
= true;
1434 spin_unlock(&mdsc
->cap_dirty_lock
);
1436 if (dirty_dropped
) {
1437 errseq_set(&ci
->i_meta_err
, -EIO
);
1439 if (ci
->i_wrbuffer_ref_head
== 0 &&
1440 ci
->i_wr_ref
== 0 &&
1441 ci
->i_dirty_caps
== 0 &&
1442 ci
->i_flushing_caps
== 0) {
1443 ceph_put_snap_context(ci
->i_head_snapc
);
1444 ci
->i_head_snapc
= NULL
;
1448 if (atomic_read(&ci
->i_filelock_ref
) > 0) {
1449 /* make further file lock syscall return -EIO */
1450 ci
->i_ceph_flags
|= CEPH_I_ERROR_FILELOCK
;
1451 pr_warn_ratelimited(" dropping file locks for %p %lld\n",
1452 inode
, ceph_ino(inode
));
1455 if (!ci
->i_dirty_caps
&& ci
->i_prealloc_cap_flush
) {
1456 list_add(&ci
->i_prealloc_cap_flush
->i_list
, &to_remove
);
1457 ci
->i_prealloc_cap_flush
= NULL
;
1460 spin_unlock(&ci
->i_ceph_lock
);
1461 while (!list_empty(&to_remove
)) {
1462 struct ceph_cap_flush
*cf
;
1463 cf
= list_first_entry(&to_remove
,
1464 struct ceph_cap_flush
, i_list
);
1465 list_del(&cf
->i_list
);
1466 ceph_free_cap_flush(cf
);
1469 wake_up_all(&ci
->i_cap_wq
);
1471 ceph_queue_invalidate(inode
);
1478 * caller must hold session s_mutex
1480 static void remove_session_caps(struct ceph_mds_session
*session
)
1482 struct ceph_fs_client
*fsc
= session
->s_mdsc
->fsc
;
1483 struct super_block
*sb
= fsc
->sb
;
1486 dout("remove_session_caps on %p\n", session
);
1487 ceph_iterate_session_caps(session
, remove_session_caps_cb
, fsc
);
1489 wake_up_all(&fsc
->mdsc
->cap_flushing_wq
);
1491 spin_lock(&session
->s_cap_lock
);
1492 if (session
->s_nr_caps
> 0) {
1493 struct inode
*inode
;
1494 struct ceph_cap
*cap
, *prev
= NULL
;
1495 struct ceph_vino vino
;
1497 * iterate_session_caps() skips inodes that are being
1498 * deleted, we need to wait until deletions are complete.
1499 * __wait_on_freeing_inode() is designed for the job,
1500 * but it is not exported, so use lookup inode function
1503 while (!list_empty(&session
->s_caps
)) {
1504 cap
= list_entry(session
->s_caps
.next
,
1505 struct ceph_cap
, session_caps
);
1509 vino
= cap
->ci
->i_vino
;
1510 spin_unlock(&session
->s_cap_lock
);
1512 inode
= ceph_find_inode(sb
, vino
);
1513 /* avoid calling iput_final() while holding s_mutex */
1514 ceph_async_iput(inode
);
1516 spin_lock(&session
->s_cap_lock
);
1520 // drop cap expires and unlock s_cap_lock
1521 detach_cap_releases(session
, &dispose
);
1523 BUG_ON(session
->s_nr_caps
> 0);
1524 BUG_ON(!list_empty(&session
->s_cap_flushing
));
1525 spin_unlock(&session
->s_cap_lock
);
1526 dispose_cap_releases(session
->s_mdsc
, &dispose
);
1536 * wake up any threads waiting on this session's caps. if the cap is
1537 * old (didn't get renewed on the client reconnect), remove it now.
1539 * caller must hold s_mutex.
1541 static int wake_up_session_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1544 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1545 unsigned long ev
= (unsigned long)arg
;
1547 if (ev
== RECONNECT
) {
1548 spin_lock(&ci
->i_ceph_lock
);
1549 ci
->i_wanted_max_size
= 0;
1550 ci
->i_requested_max_size
= 0;
1551 spin_unlock(&ci
->i_ceph_lock
);
1552 } else if (ev
== RENEWCAPS
) {
1553 if (cap
->cap_gen
< cap
->session
->s_cap_gen
) {
1554 /* mds did not re-issue stale cap */
1555 spin_lock(&ci
->i_ceph_lock
);
1556 cap
->issued
= cap
->implemented
= CEPH_CAP_PIN
;
1557 /* make sure mds knows what we want */
1558 if (__ceph_caps_file_wanted(ci
) & ~cap
->mds_wanted
)
1559 ci
->i_ceph_flags
|= CEPH_I_CAP_DROPPED
;
1560 spin_unlock(&ci
->i_ceph_lock
);
1562 } else if (ev
== FORCE_RO
) {
1564 wake_up_all(&ci
->i_cap_wq
);
1568 static void wake_up_session_caps(struct ceph_mds_session
*session
, int ev
)
1570 dout("wake_up_session_caps %p mds%d\n", session
, session
->s_mds
);
1571 ceph_iterate_session_caps(session
, wake_up_session_cb
,
1572 (void *)(unsigned long)ev
);
1576 * Send periodic message to MDS renewing all currently held caps. The
1577 * ack will reset the expiration for all caps from this session.
1579 * caller holds s_mutex
1581 static int send_renew_caps(struct ceph_mds_client
*mdsc
,
1582 struct ceph_mds_session
*session
)
1584 struct ceph_msg
*msg
;
1587 if (time_after_eq(jiffies
, session
->s_cap_ttl
) &&
1588 time_after_eq(session
->s_cap_ttl
, session
->s_renew_requested
))
1589 pr_info("mds%d caps stale\n", session
->s_mds
);
1590 session
->s_renew_requested
= jiffies
;
1592 /* do not try to renew caps until a recovering mds has reconnected
1593 * with its clients. */
1594 state
= ceph_mdsmap_get_state(mdsc
->mdsmap
, session
->s_mds
);
1595 if (state
< CEPH_MDS_STATE_RECONNECT
) {
1596 dout("send_renew_caps ignoring mds%d (%s)\n",
1597 session
->s_mds
, ceph_mds_state_name(state
));
1601 dout("send_renew_caps to mds%d (%s)\n", session
->s_mds
,
1602 ceph_mds_state_name(state
));
1603 msg
= create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS
,
1604 ++session
->s_renew_seq
);
1607 ceph_con_send(&session
->s_con
, msg
);
1611 static int send_flushmsg_ack(struct ceph_mds_client
*mdsc
,
1612 struct ceph_mds_session
*session
, u64 seq
)
1614 struct ceph_msg
*msg
;
1616 dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1617 session
->s_mds
, ceph_session_state_name(session
->s_state
), seq
);
1618 msg
= create_session_msg(CEPH_SESSION_FLUSHMSG_ACK
, seq
);
1621 ceph_con_send(&session
->s_con
, msg
);
1627 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1629 * Called under session->s_mutex
1631 static void renewed_caps(struct ceph_mds_client
*mdsc
,
1632 struct ceph_mds_session
*session
, int is_renew
)
1637 spin_lock(&session
->s_cap_lock
);
1638 was_stale
= is_renew
&& time_after_eq(jiffies
, session
->s_cap_ttl
);
1640 session
->s_cap_ttl
= session
->s_renew_requested
+
1641 mdsc
->mdsmap
->m_session_timeout
*HZ
;
1644 if (time_before(jiffies
, session
->s_cap_ttl
)) {
1645 pr_info("mds%d caps renewed\n", session
->s_mds
);
1648 pr_info("mds%d caps still stale\n", session
->s_mds
);
1651 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1652 session
->s_mds
, session
->s_cap_ttl
, was_stale
? "stale" : "fresh",
1653 time_before(jiffies
, session
->s_cap_ttl
) ? "stale" : "fresh");
1654 spin_unlock(&session
->s_cap_lock
);
1657 wake_up_session_caps(session
, RENEWCAPS
);
1661 * send a session close request
1663 static int request_close_session(struct ceph_mds_client
*mdsc
,
1664 struct ceph_mds_session
*session
)
1666 struct ceph_msg
*msg
;
1668 dout("request_close_session mds%d state %s seq %lld\n",
1669 session
->s_mds
, ceph_session_state_name(session
->s_state
),
1671 msg
= create_session_msg(CEPH_SESSION_REQUEST_CLOSE
, session
->s_seq
);
1674 ceph_con_send(&session
->s_con
, msg
);
1679 * Called with s_mutex held.
1681 static int __close_session(struct ceph_mds_client
*mdsc
,
1682 struct ceph_mds_session
*session
)
1684 if (session
->s_state
>= CEPH_MDS_SESSION_CLOSING
)
1686 session
->s_state
= CEPH_MDS_SESSION_CLOSING
;
1687 return request_close_session(mdsc
, session
);
1690 static bool drop_negative_children(struct dentry
*dentry
)
1692 struct dentry
*child
;
1693 bool all_negative
= true;
1695 if (!d_is_dir(dentry
))
1698 spin_lock(&dentry
->d_lock
);
1699 list_for_each_entry(child
, &dentry
->d_subdirs
, d_child
) {
1700 if (d_really_is_positive(child
)) {
1701 all_negative
= false;
1705 spin_unlock(&dentry
->d_lock
);
1708 shrink_dcache_parent(dentry
);
1710 return all_negative
;
1714 * Trim old(er) caps.
1716 * Because we can't cache an inode without one or more caps, we do
1717 * this indirectly: if a cap is unused, we prune its aliases, at which
1718 * point the inode will hopefully get dropped to.
1720 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1721 * memory pressure from the MDS, though, so it needn't be perfect.
1723 static int trim_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
, void *arg
)
1725 int *remaining
= arg
;
1726 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1727 int used
, wanted
, oissued
, mine
;
1729 if (*remaining
<= 0)
1732 spin_lock(&ci
->i_ceph_lock
);
1733 mine
= cap
->issued
| cap
->implemented
;
1734 used
= __ceph_caps_used(ci
);
1735 wanted
= __ceph_caps_file_wanted(ci
);
1736 oissued
= __ceph_caps_issued_other(ci
, cap
);
1738 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1739 inode
, cap
, ceph_cap_string(mine
), ceph_cap_string(oissued
),
1740 ceph_cap_string(used
), ceph_cap_string(wanted
));
1741 if (cap
== ci
->i_auth_cap
) {
1742 if (ci
->i_dirty_caps
|| ci
->i_flushing_caps
||
1743 !list_empty(&ci
->i_cap_snaps
))
1745 if ((used
| wanted
) & CEPH_CAP_ANY_WR
)
1747 /* Note: it's possible that i_filelock_ref becomes non-zero
1748 * after dropping auth caps. It doesn't hurt because reply
1749 * of lock mds request will re-add auth caps. */
1750 if (atomic_read(&ci
->i_filelock_ref
) > 0)
1753 /* The inode has cached pages, but it's no longer used.
1754 * we can safely drop it */
1755 if (wanted
== 0 && used
== CEPH_CAP_FILE_CACHE
&&
1756 !(oissued
& CEPH_CAP_FILE_CACHE
)) {
1760 if ((used
| wanted
) & ~oissued
& mine
)
1761 goto out
; /* we need these caps */
1764 /* we aren't the only cap.. just remove us */
1765 __ceph_remove_cap(cap
, true);
1768 struct dentry
*dentry
;
1769 /* try dropping referring dentries */
1770 spin_unlock(&ci
->i_ceph_lock
);
1771 dentry
= d_find_any_alias(inode
);
1772 if (dentry
&& drop_negative_children(dentry
)) {
1775 d_prune_aliases(inode
);
1776 count
= atomic_read(&inode
->i_count
);
1779 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1788 spin_unlock(&ci
->i_ceph_lock
);
1793 * Trim session cap count down to some max number.
1795 int ceph_trim_caps(struct ceph_mds_client
*mdsc
,
1796 struct ceph_mds_session
*session
,
1799 int trim_caps
= session
->s_nr_caps
- max_caps
;
1801 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1802 session
->s_mds
, session
->s_nr_caps
, max_caps
, trim_caps
);
1803 if (trim_caps
> 0) {
1804 int remaining
= trim_caps
;
1806 ceph_iterate_session_caps(session
, trim_caps_cb
, &remaining
);
1807 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1808 session
->s_mds
, session
->s_nr_caps
, max_caps
,
1809 trim_caps
- remaining
);
1812 ceph_flush_cap_releases(mdsc
, session
);
1816 static int check_caps_flush(struct ceph_mds_client
*mdsc
,
1821 spin_lock(&mdsc
->cap_dirty_lock
);
1822 if (!list_empty(&mdsc
->cap_flush_list
)) {
1823 struct ceph_cap_flush
*cf
=
1824 list_first_entry(&mdsc
->cap_flush_list
,
1825 struct ceph_cap_flush
, g_list
);
1826 if (cf
->tid
<= want_flush_tid
) {
1827 dout("check_caps_flush still flushing tid "
1828 "%llu <= %llu\n", cf
->tid
, want_flush_tid
);
1832 spin_unlock(&mdsc
->cap_dirty_lock
);
1837 * flush all dirty inode data to disk.
1839 * returns true if we've flushed through want_flush_tid
1841 static void wait_caps_flush(struct ceph_mds_client
*mdsc
,
1844 dout("check_caps_flush want %llu\n", want_flush_tid
);
1846 wait_event(mdsc
->cap_flushing_wq
,
1847 check_caps_flush(mdsc
, want_flush_tid
));
1849 dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid
);
1853 * called under s_mutex
1855 static void ceph_send_cap_releases(struct ceph_mds_client
*mdsc
,
1856 struct ceph_mds_session
*session
)
1858 struct ceph_msg
*msg
= NULL
;
1859 struct ceph_mds_cap_release
*head
;
1860 struct ceph_mds_cap_item
*item
;
1861 struct ceph_osd_client
*osdc
= &mdsc
->fsc
->client
->osdc
;
1862 struct ceph_cap
*cap
;
1863 LIST_HEAD(tmp_list
);
1864 int num_cap_releases
;
1865 __le32 barrier
, *cap_barrier
;
1867 down_read(&osdc
->lock
);
1868 barrier
= cpu_to_le32(osdc
->epoch_barrier
);
1869 up_read(&osdc
->lock
);
1871 spin_lock(&session
->s_cap_lock
);
1873 list_splice_init(&session
->s_cap_releases
, &tmp_list
);
1874 num_cap_releases
= session
->s_num_cap_releases
;
1875 session
->s_num_cap_releases
= 0;
1876 spin_unlock(&session
->s_cap_lock
);
1878 while (!list_empty(&tmp_list
)) {
1880 msg
= ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE
,
1881 PAGE_SIZE
, GFP_NOFS
, false);
1884 head
= msg
->front
.iov_base
;
1885 head
->num
= cpu_to_le32(0);
1886 msg
->front
.iov_len
= sizeof(*head
);
1888 msg
->hdr
.version
= cpu_to_le16(2);
1889 msg
->hdr
.compat_version
= cpu_to_le16(1);
1892 cap
= list_first_entry(&tmp_list
, struct ceph_cap
,
1894 list_del(&cap
->session_caps
);
1897 head
= msg
->front
.iov_base
;
1898 put_unaligned_le32(get_unaligned_le32(&head
->num
) + 1,
1900 item
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1901 item
->ino
= cpu_to_le64(cap
->cap_ino
);
1902 item
->cap_id
= cpu_to_le64(cap
->cap_id
);
1903 item
->migrate_seq
= cpu_to_le32(cap
->mseq
);
1904 item
->seq
= cpu_to_le32(cap
->issue_seq
);
1905 msg
->front
.iov_len
+= sizeof(*item
);
1907 ceph_put_cap(mdsc
, cap
);
1909 if (le32_to_cpu(head
->num
) == CEPH_CAPS_PER_RELEASE
) {
1910 // Append cap_barrier field
1911 cap_barrier
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1912 *cap_barrier
= barrier
;
1913 msg
->front
.iov_len
+= sizeof(*cap_barrier
);
1915 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1916 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1917 ceph_con_send(&session
->s_con
, msg
);
1922 BUG_ON(num_cap_releases
!= 0);
1924 spin_lock(&session
->s_cap_lock
);
1925 if (!list_empty(&session
->s_cap_releases
))
1927 spin_unlock(&session
->s_cap_lock
);
1930 // Append cap_barrier field
1931 cap_barrier
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1932 *cap_barrier
= barrier
;
1933 msg
->front
.iov_len
+= sizeof(*cap_barrier
);
1935 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1936 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1937 ceph_con_send(&session
->s_con
, msg
);
1941 pr_err("send_cap_releases mds%d, failed to allocate message\n",
1943 spin_lock(&session
->s_cap_lock
);
1944 list_splice(&tmp_list
, &session
->s_cap_releases
);
1945 session
->s_num_cap_releases
+= num_cap_releases
;
1946 spin_unlock(&session
->s_cap_lock
);
1949 static void ceph_cap_release_work(struct work_struct
*work
)
1951 struct ceph_mds_session
*session
=
1952 container_of(work
, struct ceph_mds_session
, s_cap_release_work
);
1954 mutex_lock(&session
->s_mutex
);
1955 if (session
->s_state
== CEPH_MDS_SESSION_OPEN
||
1956 session
->s_state
== CEPH_MDS_SESSION_HUNG
)
1957 ceph_send_cap_releases(session
->s_mdsc
, session
);
1958 mutex_unlock(&session
->s_mutex
);
1959 ceph_put_mds_session(session
);
1962 void ceph_flush_cap_releases(struct ceph_mds_client
*mdsc
,
1963 struct ceph_mds_session
*session
)
1968 get_session(session
);
1969 if (queue_work(mdsc
->fsc
->cap_wq
,
1970 &session
->s_cap_release_work
)) {
1971 dout("cap release work queued\n");
1973 ceph_put_mds_session(session
);
1974 dout("failed to queue cap release work\n");
1979 * caller holds session->s_cap_lock
1981 void __ceph_queue_cap_release(struct ceph_mds_session
*session
,
1982 struct ceph_cap
*cap
)
1984 list_add_tail(&cap
->session_caps
, &session
->s_cap_releases
);
1985 session
->s_num_cap_releases
++;
1987 if (!(session
->s_num_cap_releases
% CEPH_CAPS_PER_RELEASE
))
1988 ceph_flush_cap_releases(session
->s_mdsc
, session
);
1991 static void ceph_cap_reclaim_work(struct work_struct
*work
)
1993 struct ceph_mds_client
*mdsc
=
1994 container_of(work
, struct ceph_mds_client
, cap_reclaim_work
);
1995 int ret
= ceph_trim_dentries(mdsc
);
1997 ceph_queue_cap_reclaim_work(mdsc
);
2000 void ceph_queue_cap_reclaim_work(struct ceph_mds_client
*mdsc
)
2005 if (queue_work(mdsc
->fsc
->cap_wq
, &mdsc
->cap_reclaim_work
)) {
2006 dout("caps reclaim work queued\n");
2008 dout("failed to queue caps release work\n");
2012 void ceph_reclaim_caps_nr(struct ceph_mds_client
*mdsc
, int nr
)
2017 val
= atomic_add_return(nr
, &mdsc
->cap_reclaim_pending
);
2018 if (!(val
% CEPH_CAPS_PER_RELEASE
)) {
2019 atomic_set(&mdsc
->cap_reclaim_pending
, 0);
2020 ceph_queue_cap_reclaim_work(mdsc
);
2028 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request
*req
,
2031 struct ceph_inode_info
*ci
= ceph_inode(dir
);
2032 struct ceph_mds_reply_info_parsed
*rinfo
= &req
->r_reply_info
;
2033 struct ceph_mount_options
*opt
= req
->r_mdsc
->fsc
->mount_options
;
2034 size_t size
= sizeof(struct ceph_mds_reply_dir_entry
);
2035 int order
, num_entries
;
2037 spin_lock(&ci
->i_ceph_lock
);
2038 num_entries
= ci
->i_files
+ ci
->i_subdirs
;
2039 spin_unlock(&ci
->i_ceph_lock
);
2040 num_entries
= max(num_entries
, 1);
2041 num_entries
= min(num_entries
, opt
->max_readdir
);
2043 order
= get_order(size
* num_entries
);
2044 while (order
>= 0) {
2045 rinfo
->dir_entries
= (void*)__get_free_pages(GFP_KERNEL
|
2048 if (rinfo
->dir_entries
)
2052 if (!rinfo
->dir_entries
)
2055 num_entries
= (PAGE_SIZE
<< order
) / size
;
2056 num_entries
= min(num_entries
, opt
->max_readdir
);
2058 rinfo
->dir_buf_size
= PAGE_SIZE
<< order
;
2059 req
->r_num_caps
= num_entries
+ 1;
2060 req
->r_args
.readdir
.max_entries
= cpu_to_le32(num_entries
);
2061 req
->r_args
.readdir
.max_bytes
= cpu_to_le32(opt
->max_readdir_bytes
);
2066 * Create an mds request.
2068 struct ceph_mds_request
*
2069 ceph_mdsc_create_request(struct ceph_mds_client
*mdsc
, int op
, int mode
)
2071 struct ceph_mds_request
*req
= kzalloc(sizeof(*req
), GFP_NOFS
);
2072 struct timespec64 ts
;
2075 return ERR_PTR(-ENOMEM
);
2077 mutex_init(&req
->r_fill_mutex
);
2079 req
->r_started
= jiffies
;
2080 req
->r_resend_mds
= -1;
2081 INIT_LIST_HEAD(&req
->r_unsafe_dir_item
);
2082 INIT_LIST_HEAD(&req
->r_unsafe_target_item
);
2084 kref_init(&req
->r_kref
);
2085 RB_CLEAR_NODE(&req
->r_node
);
2086 INIT_LIST_HEAD(&req
->r_wait
);
2087 init_completion(&req
->r_completion
);
2088 init_completion(&req
->r_safe_completion
);
2089 INIT_LIST_HEAD(&req
->r_unsafe_item
);
2091 ktime_get_coarse_real_ts64(&ts
);
2092 req
->r_stamp
= timespec64_trunc(ts
, mdsc
->fsc
->sb
->s_time_gran
);
2095 req
->r_direct_mode
= mode
;
2100 * return oldest (lowest) request, tid in request tree, 0 if none.
2102 * called under mdsc->mutex.
2104 static struct ceph_mds_request
*__get_oldest_req(struct ceph_mds_client
*mdsc
)
2106 if (RB_EMPTY_ROOT(&mdsc
->request_tree
))
2108 return rb_entry(rb_first(&mdsc
->request_tree
),
2109 struct ceph_mds_request
, r_node
);
2112 static inline u64
__get_oldest_tid(struct ceph_mds_client
*mdsc
)
2114 return mdsc
->oldest_tid
;
2118 * Build a dentry's path. Allocate on heap; caller must kfree. Based
2119 * on build_path_from_dentry in fs/cifs/dir.c.
2121 * If @stop_on_nosnap, generate path relative to the first non-snapped
2124 * Encode hidden .snap dirs as a double /, i.e.
2125 * foo/.snap/bar -> foo//bar
2127 char *ceph_mdsc_build_path(struct dentry
*dentry
, int *plen
, u64
*pbase
,
2130 struct dentry
*temp
;
2137 return ERR_PTR(-EINVAL
);
2141 return ERR_PTR(-ENOMEM
);
2146 seq
= read_seqbegin(&rename_lock
);
2150 struct inode
*inode
;
2152 spin_lock(&temp
->d_lock
);
2153 inode
= d_inode(temp
);
2154 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
) {
2155 dout("build_path path+%d: %p SNAPDIR\n",
2157 } else if (stop_on_nosnap
&& inode
&& dentry
!= temp
&&
2158 ceph_snap(inode
) == CEPH_NOSNAP
) {
2159 spin_unlock(&temp
->d_lock
);
2160 pos
++; /* get rid of any prepended '/' */
2163 pos
-= temp
->d_name
.len
;
2165 spin_unlock(&temp
->d_lock
);
2168 memcpy(path
+ pos
, temp
->d_name
.name
, temp
->d_name
.len
);
2170 spin_unlock(&temp
->d_lock
);
2171 temp
= READ_ONCE(temp
->d_parent
);
2173 /* Are we at the root? */
2177 /* Are we out of buffer? */
2183 base
= ceph_ino(d_inode(temp
));
2186 if (read_seqretry(&rename_lock
, seq
))
2191 * A rename didn't occur, but somehow we didn't end up where
2192 * we thought we would. Throw a warning and try again.
2194 pr_warn("build_path did not end path lookup where "
2195 "expected, pos is %d\n", pos
);
2200 *plen
= PATH_MAX
- 1 - pos
;
2201 dout("build_path on %p %d built %llx '%.*s'\n",
2202 dentry
, d_count(dentry
), base
, *plen
, path
+ pos
);
2206 static int build_dentry_path(struct dentry
*dentry
, struct inode
*dir
,
2207 const char **ppath
, int *ppathlen
, u64
*pino
,
2208 bool *pfreepath
, bool parent_locked
)
2214 dir
= d_inode_rcu(dentry
->d_parent
);
2215 if (dir
&& parent_locked
&& ceph_snap(dir
) == CEPH_NOSNAP
) {
2216 *pino
= ceph_ino(dir
);
2218 *ppath
= dentry
->d_name
.name
;
2219 *ppathlen
= dentry
->d_name
.len
;
2223 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
2225 return PTR_ERR(path
);
2231 static int build_inode_path(struct inode
*inode
,
2232 const char **ppath
, int *ppathlen
, u64
*pino
,
2235 struct dentry
*dentry
;
2238 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
2239 *pino
= ceph_ino(inode
);
2243 dentry
= d_find_alias(inode
);
2244 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
2247 return PTR_ERR(path
);
2254 * request arguments may be specified via an inode *, a dentry *, or
2255 * an explicit ino+path.
2257 static int set_request_path_attr(struct inode
*rinode
, struct dentry
*rdentry
,
2258 struct inode
*rdiri
, const char *rpath
,
2259 u64 rino
, const char **ppath
, int *pathlen
,
2260 u64
*ino
, bool *freepath
, bool parent_locked
)
2265 r
= build_inode_path(rinode
, ppath
, pathlen
, ino
, freepath
);
2266 dout(" inode %p %llx.%llx\n", rinode
, ceph_ino(rinode
),
2268 } else if (rdentry
) {
2269 r
= build_dentry_path(rdentry
, rdiri
, ppath
, pathlen
, ino
,
2270 freepath
, parent_locked
);
2271 dout(" dentry %p %llx/%.*s\n", rdentry
, *ino
, *pathlen
,
2273 } else if (rpath
|| rino
) {
2276 *pathlen
= rpath
? strlen(rpath
) : 0;
2277 dout(" path %.*s\n", *pathlen
, rpath
);
2284 * called under mdsc->mutex
2286 static struct ceph_msg
*create_request_message(struct ceph_mds_client
*mdsc
,
2287 struct ceph_mds_request
*req
,
2288 int mds
, bool drop_cap_releases
)
2290 struct ceph_msg
*msg
;
2291 struct ceph_mds_request_head
*head
;
2292 const char *path1
= NULL
;
2293 const char *path2
= NULL
;
2294 u64 ino1
= 0, ino2
= 0;
2295 int pathlen1
= 0, pathlen2
= 0;
2296 bool freepath1
= false, freepath2
= false;
2302 ret
= set_request_path_attr(req
->r_inode
, req
->r_dentry
,
2303 req
->r_parent
, req
->r_path1
, req
->r_ino1
.ino
,
2304 &path1
, &pathlen1
, &ino1
, &freepath1
,
2305 test_bit(CEPH_MDS_R_PARENT_LOCKED
,
2306 &req
->r_req_flags
));
2312 /* If r_old_dentry is set, then assume that its parent is locked */
2313 ret
= set_request_path_attr(NULL
, req
->r_old_dentry
,
2314 req
->r_old_dentry_dir
,
2315 req
->r_path2
, req
->r_ino2
.ino
,
2316 &path2
, &pathlen2
, &ino2
, &freepath2
, true);
2322 len
= sizeof(*head
) +
2323 pathlen1
+ pathlen2
+ 2*(1 + sizeof(u32
) + sizeof(u64
)) +
2324 sizeof(struct ceph_timespec
);
2326 /* calculate (max) length for cap releases */
2327 len
+= sizeof(struct ceph_mds_request_release
) *
2328 (!!req
->r_inode_drop
+ !!req
->r_dentry_drop
+
2329 !!req
->r_old_inode_drop
+ !!req
->r_old_dentry_drop
);
2330 if (req
->r_dentry_drop
)
2332 if (req
->r_old_dentry_drop
)
2335 msg
= ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST
, len
, 1, GFP_NOFS
, false);
2337 msg
= ERR_PTR(-ENOMEM
);
2341 msg
->hdr
.version
= cpu_to_le16(2);
2342 msg
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
2344 head
= msg
->front
.iov_base
;
2345 p
= msg
->front
.iov_base
+ sizeof(*head
);
2346 end
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
2348 head
->mdsmap_epoch
= cpu_to_le32(mdsc
->mdsmap
->m_epoch
);
2349 head
->op
= cpu_to_le32(req
->r_op
);
2350 head
->caller_uid
= cpu_to_le32(from_kuid(&init_user_ns
, req
->r_uid
));
2351 head
->caller_gid
= cpu_to_le32(from_kgid(&init_user_ns
, req
->r_gid
));
2353 head
->args
= req
->r_args
;
2355 ceph_encode_filepath(&p
, end
, ino1
, path1
);
2356 ceph_encode_filepath(&p
, end
, ino2
, path2
);
2358 /* make note of release offset, in case we need to replay */
2359 req
->r_request_release_offset
= p
- msg
->front
.iov_base
;
2363 if (req
->r_inode_drop
)
2364 releases
+= ceph_encode_inode_release(&p
,
2365 req
->r_inode
? req
->r_inode
: d_inode(req
->r_dentry
),
2366 mds
, req
->r_inode_drop
, req
->r_inode_unless
, 0);
2367 if (req
->r_dentry_drop
)
2368 releases
+= ceph_encode_dentry_release(&p
, req
->r_dentry
,
2369 req
->r_parent
, mds
, req
->r_dentry_drop
,
2370 req
->r_dentry_unless
);
2371 if (req
->r_old_dentry_drop
)
2372 releases
+= ceph_encode_dentry_release(&p
, req
->r_old_dentry
,
2373 req
->r_old_dentry_dir
, mds
,
2374 req
->r_old_dentry_drop
,
2375 req
->r_old_dentry_unless
);
2376 if (req
->r_old_inode_drop
)
2377 releases
+= ceph_encode_inode_release(&p
,
2378 d_inode(req
->r_old_dentry
),
2379 mds
, req
->r_old_inode_drop
, req
->r_old_inode_unless
, 0);
2381 if (drop_cap_releases
) {
2383 p
= msg
->front
.iov_base
+ req
->r_request_release_offset
;
2386 head
->num_releases
= cpu_to_le16(releases
);
2390 struct ceph_timespec ts
;
2391 ceph_encode_timespec64(&ts
, &req
->r_stamp
);
2392 ceph_encode_copy(&p
, &ts
, sizeof(ts
));
2396 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2397 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2399 if (req
->r_pagelist
) {
2400 struct ceph_pagelist
*pagelist
= req
->r_pagelist
;
2401 ceph_msg_data_add_pagelist(msg
, pagelist
);
2402 msg
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
2404 msg
->hdr
.data_len
= 0;
2407 msg
->hdr
.data_off
= cpu_to_le16(0);
2411 ceph_mdsc_free_path((char *)path2
, pathlen2
);
2414 ceph_mdsc_free_path((char *)path1
, pathlen1
);
2420 * called under mdsc->mutex if error, under no mutex if
2423 static void complete_request(struct ceph_mds_client
*mdsc
,
2424 struct ceph_mds_request
*req
)
2426 if (req
->r_callback
)
2427 req
->r_callback(mdsc
, req
);
2428 complete_all(&req
->r_completion
);
2432 * called under mdsc->mutex
2434 static int __prepare_send_request(struct ceph_mds_client
*mdsc
,
2435 struct ceph_mds_request
*req
,
2436 int mds
, bool drop_cap_releases
)
2438 struct ceph_mds_request_head
*rhead
;
2439 struct ceph_msg
*msg
;
2444 struct ceph_cap
*cap
=
2445 ceph_get_cap_for_mds(ceph_inode(req
->r_inode
), mds
);
2448 req
->r_sent_on_mseq
= cap
->mseq
;
2450 req
->r_sent_on_mseq
= -1;
2452 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req
,
2453 req
->r_tid
, ceph_mds_op_name(req
->r_op
), req
->r_attempts
);
2455 if (test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
)) {
2458 * Replay. Do not regenerate message (and rebuild
2459 * paths, etc.); just use the original message.
2460 * Rebuilding paths will break for renames because
2461 * d_move mangles the src name.
2463 msg
= req
->r_request
;
2464 rhead
= msg
->front
.iov_base
;
2466 flags
= le32_to_cpu(rhead
->flags
);
2467 flags
|= CEPH_MDS_FLAG_REPLAY
;
2468 rhead
->flags
= cpu_to_le32(flags
);
2470 if (req
->r_target_inode
)
2471 rhead
->ino
= cpu_to_le64(ceph_ino(req
->r_target_inode
));
2473 rhead
->num_retry
= req
->r_attempts
- 1;
2475 /* remove cap/dentry releases from message */
2476 rhead
->num_releases
= 0;
2479 p
= msg
->front
.iov_base
+ req
->r_request_release_offset
;
2481 struct ceph_timespec ts
;
2482 ceph_encode_timespec64(&ts
, &req
->r_stamp
);
2483 ceph_encode_copy(&p
, &ts
, sizeof(ts
));
2486 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2487 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2491 if (req
->r_request
) {
2492 ceph_msg_put(req
->r_request
);
2493 req
->r_request
= NULL
;
2495 msg
= create_request_message(mdsc
, req
, mds
, drop_cap_releases
);
2497 req
->r_err
= PTR_ERR(msg
);
2498 return PTR_ERR(msg
);
2500 req
->r_request
= msg
;
2502 rhead
= msg
->front
.iov_base
;
2503 rhead
->oldest_client_tid
= cpu_to_le64(__get_oldest_tid(mdsc
));
2504 if (test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
))
2505 flags
|= CEPH_MDS_FLAG_REPLAY
;
2507 flags
|= CEPH_MDS_FLAG_WANT_DENTRY
;
2508 rhead
->flags
= cpu_to_le32(flags
);
2509 rhead
->num_fwd
= req
->r_num_fwd
;
2510 rhead
->num_retry
= req
->r_attempts
- 1;
2513 dout(" r_parent = %p\n", req
->r_parent
);
2518 * send request, or put it on the appropriate wait list.
2520 static void __do_request(struct ceph_mds_client
*mdsc
,
2521 struct ceph_mds_request
*req
)
2523 struct ceph_mds_session
*session
= NULL
;
2527 if (req
->r_err
|| test_bit(CEPH_MDS_R_GOT_RESULT
, &req
->r_req_flags
)) {
2528 if (test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
))
2529 __unregister_request(mdsc
, req
);
2533 if (req
->r_timeout
&&
2534 time_after_eq(jiffies
, req
->r_started
+ req
->r_timeout
)) {
2535 dout("do_request timed out\n");
2539 if (READ_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
) {
2540 dout("do_request forced umount\n");
2544 if (READ_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_MOUNTING
) {
2545 if (mdsc
->mdsmap_err
) {
2546 err
= mdsc
->mdsmap_err
;
2547 dout("do_request mdsmap err %d\n", err
);
2550 if (mdsc
->mdsmap
->m_epoch
== 0) {
2551 dout("do_request no mdsmap, waiting for map\n");
2552 list_add(&req
->r_wait
, &mdsc
->waiting_for_map
);
2555 if (!(mdsc
->fsc
->mount_options
->flags
&
2556 CEPH_MOUNT_OPT_MOUNTWAIT
) &&
2557 !ceph_mdsmap_is_cluster_available(mdsc
->mdsmap
)) {
2559 pr_info("probably no mds server is up\n");
2564 put_request_session(req
);
2566 mds
= __choose_mds(mdsc
, req
);
2568 ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) < CEPH_MDS_STATE_ACTIVE
) {
2569 dout("do_request no mds or not active, waiting for map\n");
2570 list_add(&req
->r_wait
, &mdsc
->waiting_for_map
);
2574 /* get, open session */
2575 session
= __ceph_lookup_mds_session(mdsc
, mds
);
2577 session
= register_session(mdsc
, mds
);
2578 if (IS_ERR(session
)) {
2579 err
= PTR_ERR(session
);
2583 req
->r_session
= get_session(session
);
2585 dout("do_request mds%d session %p state %s\n", mds
, session
,
2586 ceph_session_state_name(session
->s_state
));
2587 if (session
->s_state
!= CEPH_MDS_SESSION_OPEN
&&
2588 session
->s_state
!= CEPH_MDS_SESSION_HUNG
) {
2589 if (session
->s_state
== CEPH_MDS_SESSION_REJECTED
) {
2593 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
2594 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
2595 __open_session(mdsc
, session
);
2596 list_add(&req
->r_wait
, &session
->s_waiting
);
2601 req
->r_resend_mds
= -1; /* forget any previous mds hint */
2603 if (req
->r_request_started
== 0) /* note request start time */
2604 req
->r_request_started
= jiffies
;
2606 err
= __prepare_send_request(mdsc
, req
, mds
, false);
2608 ceph_msg_get(req
->r_request
);
2609 ceph_con_send(&session
->s_con
, req
->r_request
);
2613 ceph_put_mds_session(session
);
2616 dout("__do_request early error %d\n", err
);
2618 complete_request(mdsc
, req
);
2619 __unregister_request(mdsc
, req
);
2625 * called under mdsc->mutex
2627 static void __wake_requests(struct ceph_mds_client
*mdsc
,
2628 struct list_head
*head
)
2630 struct ceph_mds_request
*req
;
2631 LIST_HEAD(tmp_list
);
2633 list_splice_init(head
, &tmp_list
);
2635 while (!list_empty(&tmp_list
)) {
2636 req
= list_entry(tmp_list
.next
,
2637 struct ceph_mds_request
, r_wait
);
2638 list_del_init(&req
->r_wait
);
2639 dout(" wake request %p tid %llu\n", req
, req
->r_tid
);
2640 __do_request(mdsc
, req
);
2645 * Wake up threads with requests pending for @mds, so that they can
2646 * resubmit their requests to a possibly different mds.
2648 static void kick_requests(struct ceph_mds_client
*mdsc
, int mds
)
2650 struct ceph_mds_request
*req
;
2651 struct rb_node
*p
= rb_first(&mdsc
->request_tree
);
2653 dout("kick_requests mds%d\n", mds
);
2655 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
2657 if (test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
))
2659 if (req
->r_attempts
> 0)
2660 continue; /* only new requests */
2661 if (req
->r_session
&&
2662 req
->r_session
->s_mds
== mds
) {
2663 dout(" kicking tid %llu\n", req
->r_tid
);
2664 list_del_init(&req
->r_wait
);
2665 __do_request(mdsc
, req
);
2670 int ceph_mdsc_submit_request(struct ceph_mds_client
*mdsc
, struct inode
*dir
,
2671 struct ceph_mds_request
*req
)
2675 /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
2677 ceph_get_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
2679 ceph_get_cap_refs(ceph_inode(req
->r_parent
), CEPH_CAP_PIN
);
2680 if (req
->r_old_dentry_dir
)
2681 ceph_get_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
2684 dout("submit_request on %p for inode %p\n", req
, dir
);
2685 mutex_lock(&mdsc
->mutex
);
2686 __register_request(mdsc
, req
, dir
);
2687 __do_request(mdsc
, req
);
2689 mutex_unlock(&mdsc
->mutex
);
2693 static int ceph_mdsc_wait_request(struct ceph_mds_client
*mdsc
,
2694 struct ceph_mds_request
*req
)
2699 dout("do_request waiting\n");
2700 if (!req
->r_timeout
&& req
->r_wait_for_completion
) {
2701 err
= req
->r_wait_for_completion(mdsc
, req
);
2703 long timeleft
= wait_for_completion_killable_timeout(
2705 ceph_timeout_jiffies(req
->r_timeout
));
2709 err
= -EIO
; /* timed out */
2711 err
= timeleft
; /* killed */
2713 dout("do_request waited, got %d\n", err
);
2714 mutex_lock(&mdsc
->mutex
);
2716 /* only abort if we didn't race with a real reply */
2717 if (test_bit(CEPH_MDS_R_GOT_RESULT
, &req
->r_req_flags
)) {
2718 err
= le32_to_cpu(req
->r_reply_info
.head
->result
);
2719 } else if (err
< 0) {
2720 dout("aborted request %lld with %d\n", req
->r_tid
, err
);
2723 * ensure we aren't running concurrently with
2724 * ceph_fill_trace or ceph_readdir_prepopulate, which
2725 * rely on locks (dir mutex) held by our caller.
2727 mutex_lock(&req
->r_fill_mutex
);
2729 set_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
);
2730 mutex_unlock(&req
->r_fill_mutex
);
2732 if (req
->r_parent
&&
2733 (req
->r_op
& CEPH_MDS_OP_WRITE
))
2734 ceph_invalidate_dir_request(req
);
2739 mutex_unlock(&mdsc
->mutex
);
2744 * Synchrously perform an mds request. Take care of all of the
2745 * session setup, forwarding, retry details.
2747 int ceph_mdsc_do_request(struct ceph_mds_client
*mdsc
,
2749 struct ceph_mds_request
*req
)
2753 dout("do_request on %p\n", req
);
2756 err
= ceph_mdsc_submit_request(mdsc
, dir
, req
);
2758 err
= ceph_mdsc_wait_request(mdsc
, req
);
2759 dout("do_request %p done, result %d\n", req
, err
);
2764 * Invalidate dir's completeness, dentry lease state on an aborted MDS
2765 * namespace request.
2767 void ceph_invalidate_dir_request(struct ceph_mds_request
*req
)
2769 struct inode
*dir
= req
->r_parent
;
2770 struct inode
*old_dir
= req
->r_old_dentry_dir
;
2772 dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir
, old_dir
);
2774 ceph_dir_clear_complete(dir
);
2776 ceph_dir_clear_complete(old_dir
);
2778 ceph_invalidate_dentry_lease(req
->r_dentry
);
2779 if (req
->r_old_dentry
)
2780 ceph_invalidate_dentry_lease(req
->r_old_dentry
);
2786 * We take the session mutex and parse and process the reply immediately.
2787 * This preserves the logical ordering of replies, capabilities, etc., sent
2788 * by the MDS as they are applied to our local cache.
2790 static void handle_reply(struct ceph_mds_session
*session
, struct ceph_msg
*msg
)
2792 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2793 struct ceph_mds_request
*req
;
2794 struct ceph_mds_reply_head
*head
= msg
->front
.iov_base
;
2795 struct ceph_mds_reply_info_parsed
*rinfo
; /* parsed reply info */
2796 struct ceph_snap_realm
*realm
;
2799 int mds
= session
->s_mds
;
2801 if (msg
->front
.iov_len
< sizeof(*head
)) {
2802 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2807 /* get request, session */
2808 tid
= le64_to_cpu(msg
->hdr
.tid
);
2809 mutex_lock(&mdsc
->mutex
);
2810 req
= lookup_get_request(mdsc
, tid
);
2812 dout("handle_reply on unknown tid %llu\n", tid
);
2813 mutex_unlock(&mdsc
->mutex
);
2816 dout("handle_reply %p\n", req
);
2818 /* correct session? */
2819 if (req
->r_session
!= session
) {
2820 pr_err("mdsc_handle_reply got %llu on session mds%d"
2821 " not mds%d\n", tid
, session
->s_mds
,
2822 req
->r_session
? req
->r_session
->s_mds
: -1);
2823 mutex_unlock(&mdsc
->mutex
);
2828 if ((test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
) && !head
->safe
) ||
2829 (test_bit(CEPH_MDS_R_GOT_SAFE
, &req
->r_req_flags
) && head
->safe
)) {
2830 pr_warn("got a dup %s reply on %llu from mds%d\n",
2831 head
->safe
? "safe" : "unsafe", tid
, mds
);
2832 mutex_unlock(&mdsc
->mutex
);
2835 if (test_bit(CEPH_MDS_R_GOT_SAFE
, &req
->r_req_flags
)) {
2836 pr_warn("got unsafe after safe on %llu from mds%d\n",
2838 mutex_unlock(&mdsc
->mutex
);
2842 result
= le32_to_cpu(head
->result
);
2846 * if we're not talking to the authority, send to them
2847 * if the authority has changed while we weren't looking,
2848 * send to new authority
2849 * Otherwise we just have to return an ESTALE
2851 if (result
== -ESTALE
) {
2852 dout("got ESTALE on request %llu\n", req
->r_tid
);
2853 req
->r_resend_mds
= -1;
2854 if (req
->r_direct_mode
!= USE_AUTH_MDS
) {
2855 dout("not using auth, setting for that now\n");
2856 req
->r_direct_mode
= USE_AUTH_MDS
;
2857 __do_request(mdsc
, req
);
2858 mutex_unlock(&mdsc
->mutex
);
2861 int mds
= __choose_mds(mdsc
, req
);
2862 if (mds
>= 0 && mds
!= req
->r_session
->s_mds
) {
2863 dout("but auth changed, so resending\n");
2864 __do_request(mdsc
, req
);
2865 mutex_unlock(&mdsc
->mutex
);
2869 dout("have to return ESTALE on request %llu\n", req
->r_tid
);
2874 set_bit(CEPH_MDS_R_GOT_SAFE
, &req
->r_req_flags
);
2875 __unregister_request(mdsc
, req
);
2877 if (test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
)) {
2879 * We already handled the unsafe response, now do the
2880 * cleanup. No need to examine the response; the MDS
2881 * doesn't include any result info in the safe
2882 * response. And even if it did, there is nothing
2883 * useful we could do with a revised return value.
2885 dout("got safe reply %llu, mds%d\n", tid
, mds
);
2887 /* last unsafe request during umount? */
2888 if (mdsc
->stopping
&& !__get_oldest_req(mdsc
))
2889 complete_all(&mdsc
->safe_umount_waiters
);
2890 mutex_unlock(&mdsc
->mutex
);
2894 set_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
);
2895 list_add_tail(&req
->r_unsafe_item
, &req
->r_session
->s_unsafe
);
2896 if (req
->r_unsafe_dir
) {
2897 struct ceph_inode_info
*ci
=
2898 ceph_inode(req
->r_unsafe_dir
);
2899 spin_lock(&ci
->i_unsafe_lock
);
2900 list_add_tail(&req
->r_unsafe_dir_item
,
2901 &ci
->i_unsafe_dirops
);
2902 spin_unlock(&ci
->i_unsafe_lock
);
2906 dout("handle_reply tid %lld result %d\n", tid
, result
);
2907 rinfo
= &req
->r_reply_info
;
2908 if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING
, &session
->s_features
))
2909 err
= parse_reply_info(msg
, rinfo
, (u64
)-1);
2911 err
= parse_reply_info(msg
, rinfo
, session
->s_con
.peer_features
);
2912 mutex_unlock(&mdsc
->mutex
);
2914 mutex_lock(&session
->s_mutex
);
2916 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds
, tid
);
2923 if (rinfo
->snapblob_len
) {
2924 down_write(&mdsc
->snap_rwsem
);
2925 ceph_update_snap_trace(mdsc
, rinfo
->snapblob
,
2926 rinfo
->snapblob
+ rinfo
->snapblob_len
,
2927 le32_to_cpu(head
->op
) == CEPH_MDS_OP_RMSNAP
,
2929 downgrade_write(&mdsc
->snap_rwsem
);
2931 down_read(&mdsc
->snap_rwsem
);
2934 /* insert trace into our cache */
2935 mutex_lock(&req
->r_fill_mutex
);
2936 current
->journal_info
= req
;
2937 err
= ceph_fill_trace(mdsc
->fsc
->sb
, req
);
2939 if (result
== 0 && (req
->r_op
== CEPH_MDS_OP_READDIR
||
2940 req
->r_op
== CEPH_MDS_OP_LSSNAP
))
2941 ceph_readdir_prepopulate(req
, req
->r_session
);
2943 current
->journal_info
= NULL
;
2944 mutex_unlock(&req
->r_fill_mutex
);
2946 up_read(&mdsc
->snap_rwsem
);
2948 ceph_put_snap_realm(mdsc
, realm
);
2951 if (req
->r_target_inode
&&
2952 test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
)) {
2953 struct ceph_inode_info
*ci
=
2954 ceph_inode(req
->r_target_inode
);
2955 spin_lock(&ci
->i_unsafe_lock
);
2956 list_add_tail(&req
->r_unsafe_target_item
,
2957 &ci
->i_unsafe_iops
);
2958 spin_unlock(&ci
->i_unsafe_lock
);
2961 ceph_unreserve_caps(mdsc
, &req
->r_caps_reservation
);
2964 mutex_lock(&mdsc
->mutex
);
2965 if (!test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
)) {
2969 req
->r_reply
= ceph_msg_get(msg
);
2970 set_bit(CEPH_MDS_R_GOT_RESULT
, &req
->r_req_flags
);
2973 dout("reply arrived after request %lld was aborted\n", tid
);
2975 mutex_unlock(&mdsc
->mutex
);
2977 mutex_unlock(&session
->s_mutex
);
2979 /* kick calling process */
2980 complete_request(mdsc
, req
);
2982 ceph_mdsc_put_request(req
);
2989 * handle mds notification that our request has been forwarded.
2991 static void handle_forward(struct ceph_mds_client
*mdsc
,
2992 struct ceph_mds_session
*session
,
2993 struct ceph_msg
*msg
)
2995 struct ceph_mds_request
*req
;
2996 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
3000 void *p
= msg
->front
.iov_base
;
3001 void *end
= p
+ msg
->front
.iov_len
;
3003 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
3004 next_mds
= ceph_decode_32(&p
);
3005 fwd_seq
= ceph_decode_32(&p
);
3007 mutex_lock(&mdsc
->mutex
);
3008 req
= lookup_get_request(mdsc
, tid
);
3010 dout("forward tid %llu to mds%d - req dne\n", tid
, next_mds
);
3011 goto out
; /* dup reply? */
3014 if (test_bit(CEPH_MDS_R_ABORTED
, &req
->r_req_flags
)) {
3015 dout("forward tid %llu aborted, unregistering\n", tid
);
3016 __unregister_request(mdsc
, req
);
3017 } else if (fwd_seq
<= req
->r_num_fwd
) {
3018 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
3019 tid
, next_mds
, req
->r_num_fwd
, fwd_seq
);
3021 /* resend. forward race not possible; mds would drop */
3022 dout("forward tid %llu to mds%d (we resend)\n", tid
, next_mds
);
3024 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT
, &req
->r_req_flags
));
3025 req
->r_attempts
= 0;
3026 req
->r_num_fwd
= fwd_seq
;
3027 req
->r_resend_mds
= next_mds
;
3028 put_request_session(req
);
3029 __do_request(mdsc
, req
);
3031 ceph_mdsc_put_request(req
);
3033 mutex_unlock(&mdsc
->mutex
);
3037 pr_err("mdsc_handle_forward decode error err=%d\n", err
);
3040 static int __decode_session_metadata(void **p
, void *end
,
3043 /* map<string,string> */
3046 ceph_decode_32_safe(p
, end
, n
, bad
);
3049 ceph_decode_32_safe(p
, end
, len
, bad
);
3050 ceph_decode_need(p
, end
, len
, bad
);
3051 err_str
= !strncmp(*p
, "error_string", len
);
3053 ceph_decode_32_safe(p
, end
, len
, bad
);
3054 ceph_decode_need(p
, end
, len
, bad
);
3055 if (err_str
&& strnstr(*p
, "blacklisted", len
))
3056 *blacklisted
= true;
3065 * handle a mds session control message
3067 static void handle_session(struct ceph_mds_session
*session
,
3068 struct ceph_msg
*msg
)
3070 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
3071 int mds
= session
->s_mds
;
3072 int msg_version
= le16_to_cpu(msg
->hdr
.version
);
3073 void *p
= msg
->front
.iov_base
;
3074 void *end
= p
+ msg
->front
.iov_len
;
3075 struct ceph_mds_session_head
*h
;
3078 unsigned long features
= 0;
3080 bool blacklisted
= false;
3083 ceph_decode_need(&p
, end
, sizeof(*h
), bad
);
3087 op
= le32_to_cpu(h
->op
);
3088 seq
= le64_to_cpu(h
->seq
);
3090 if (msg_version
>= 3) {
3092 /* version >= 2, metadata */
3093 if (__decode_session_metadata(&p
, end
, &blacklisted
) < 0)
3095 /* version >= 3, feature bits */
3096 ceph_decode_32_safe(&p
, end
, len
, bad
);
3097 ceph_decode_need(&p
, end
, len
, bad
);
3098 memcpy(&features
, p
, min_t(size_t, len
, sizeof(features
)));
3102 mutex_lock(&mdsc
->mutex
);
3103 if (op
== CEPH_SESSION_CLOSE
) {
3104 get_session(session
);
3105 __unregister_session(mdsc
, session
);
3107 /* FIXME: this ttl calculation is generous */
3108 session
->s_ttl
= jiffies
+ HZ
*mdsc
->mdsmap
->m_session_autoclose
;
3109 mutex_unlock(&mdsc
->mutex
);
3111 mutex_lock(&session
->s_mutex
);
3113 dout("handle_session mds%d %s %p state %s seq %llu\n",
3114 mds
, ceph_session_op_name(op
), session
,
3115 ceph_session_state_name(session
->s_state
), seq
);
3117 if (session
->s_state
== CEPH_MDS_SESSION_HUNG
) {
3118 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
3119 pr_info("mds%d came back\n", session
->s_mds
);
3123 case CEPH_SESSION_OPEN
:
3124 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
3125 pr_info("mds%d reconnect success\n", session
->s_mds
);
3126 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
3127 session
->s_features
= features
;
3128 renewed_caps(mdsc
, session
, 0);
3131 __close_session(mdsc
, session
);
3134 case CEPH_SESSION_RENEWCAPS
:
3135 if (session
->s_renew_seq
== seq
)
3136 renewed_caps(mdsc
, session
, 1);
3139 case CEPH_SESSION_CLOSE
:
3140 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
3141 pr_info("mds%d reconnect denied\n", session
->s_mds
);
3142 cleanup_session_requests(mdsc
, session
);
3143 remove_session_caps(session
);
3144 wake
= 2; /* for good measure */
3145 wake_up_all(&mdsc
->session_close_wq
);
3148 case CEPH_SESSION_STALE
:
3149 pr_info("mds%d caps went stale, renewing\n",
3151 spin_lock(&session
->s_gen_ttl_lock
);
3152 session
->s_cap_gen
++;
3153 session
->s_cap_ttl
= jiffies
- 1;
3154 spin_unlock(&session
->s_gen_ttl_lock
);
3155 send_renew_caps(mdsc
, session
);
3158 case CEPH_SESSION_RECALL_STATE
:
3159 ceph_trim_caps(mdsc
, session
, le32_to_cpu(h
->max_caps
));
3162 case CEPH_SESSION_FLUSHMSG
:
3163 send_flushmsg_ack(mdsc
, session
, seq
);
3166 case CEPH_SESSION_FORCE_RO
:
3167 dout("force_session_readonly %p\n", session
);
3168 spin_lock(&session
->s_cap_lock
);
3169 session
->s_readonly
= true;
3170 spin_unlock(&session
->s_cap_lock
);
3171 wake_up_session_caps(session
, FORCE_RO
);
3174 case CEPH_SESSION_REJECT
:
3175 WARN_ON(session
->s_state
!= CEPH_MDS_SESSION_OPENING
);
3176 pr_info("mds%d rejected session\n", session
->s_mds
);
3177 session
->s_state
= CEPH_MDS_SESSION_REJECTED
;
3178 cleanup_session_requests(mdsc
, session
);
3179 remove_session_caps(session
);
3181 mdsc
->fsc
->blacklisted
= true;
3182 wake
= 2; /* for good measure */
3186 pr_err("mdsc_handle_session bad op %d mds%d\n", op
, mds
);
3190 mutex_unlock(&session
->s_mutex
);
3192 mutex_lock(&mdsc
->mutex
);
3193 __wake_requests(mdsc
, &session
->s_waiting
);
3195 kick_requests(mdsc
, mds
);
3196 mutex_unlock(&mdsc
->mutex
);
3198 if (op
== CEPH_SESSION_CLOSE
)
3199 ceph_put_mds_session(session
);
3203 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds
,
3204 (int)msg
->front
.iov_len
);
3211 * called under session->mutex.
3213 static void replay_unsafe_requests(struct ceph_mds_client
*mdsc
,
3214 struct ceph_mds_session
*session
)
3216 struct ceph_mds_request
*req
, *nreq
;
3220 dout("replay_unsafe_requests mds%d\n", session
->s_mds
);
3222 mutex_lock(&mdsc
->mutex
);
3223 list_for_each_entry_safe(req
, nreq
, &session
->s_unsafe
, r_unsafe_item
) {
3224 err
= __prepare_send_request(mdsc
, req
, session
->s_mds
, true);
3226 ceph_msg_get(req
->r_request
);
3227 ceph_con_send(&session
->s_con
, req
->r_request
);
3232 * also re-send old requests when MDS enters reconnect stage. So that MDS
3233 * can process completed request in clientreplay stage.
3235 p
= rb_first(&mdsc
->request_tree
);
3237 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
3239 if (test_bit(CEPH_MDS_R_GOT_UNSAFE
, &req
->r_req_flags
))
3241 if (req
->r_attempts
== 0)
3242 continue; /* only old requests */
3243 if (req
->r_session
&&
3244 req
->r_session
->s_mds
== session
->s_mds
) {
3245 err
= __prepare_send_request(mdsc
, req
,
3246 session
->s_mds
, true);
3248 ceph_msg_get(req
->r_request
);
3249 ceph_con_send(&session
->s_con
, req
->r_request
);
3253 mutex_unlock(&mdsc
->mutex
);
3256 static int send_reconnect_partial(struct ceph_reconnect_state
*recon_state
)
3258 struct ceph_msg
*reply
;
3259 struct ceph_pagelist
*_pagelist
;
3264 if (!recon_state
->allow_multi
)
3267 /* can't handle message that contains both caps and realm */
3268 BUG_ON(!recon_state
->nr_caps
== !recon_state
->nr_realms
);
3270 /* pre-allocate new pagelist */
3271 _pagelist
= ceph_pagelist_alloc(GFP_NOFS
);
3275 reply
= ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT
, 0, 1, GFP_NOFS
, false);
3279 /* placeholder for nr_caps */
3280 err
= ceph_pagelist_encode_32(_pagelist
, 0);
3284 if (recon_state
->nr_caps
) {
3285 /* currently encoding caps */
3286 err
= ceph_pagelist_encode_32(recon_state
->pagelist
, 0);
3290 /* placeholder for nr_realms (currently encoding relams) */
3291 err
= ceph_pagelist_encode_32(_pagelist
, 0);
3296 err
= ceph_pagelist_encode_8(recon_state
->pagelist
, 1);
3300 page
= list_first_entry(&recon_state
->pagelist
->head
, struct page
, lru
);
3301 addr
= kmap_atomic(page
);
3302 if (recon_state
->nr_caps
) {
3303 /* currently encoding caps */
3304 *addr
= cpu_to_le32(recon_state
->nr_caps
);
3306 /* currently encoding relams */
3307 *(addr
+ 1) = cpu_to_le32(recon_state
->nr_realms
);
3309 kunmap_atomic(addr
);
3311 reply
->hdr
.version
= cpu_to_le16(5);
3312 reply
->hdr
.compat_version
= cpu_to_le16(4);
3314 reply
->hdr
.data_len
= cpu_to_le32(recon_state
->pagelist
->length
);
3315 ceph_msg_data_add_pagelist(reply
, recon_state
->pagelist
);
3317 ceph_con_send(&recon_state
->session
->s_con
, reply
);
3318 ceph_pagelist_release(recon_state
->pagelist
);
3320 recon_state
->pagelist
= _pagelist
;
3321 recon_state
->nr_caps
= 0;
3322 recon_state
->nr_realms
= 0;
3323 recon_state
->msg_version
= 5;
3326 ceph_msg_put(reply
);
3328 ceph_pagelist_release(_pagelist
);
3333 * Encode information about a cap for a reconnect with the MDS.
3335 static int encode_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
3339 struct ceph_mds_cap_reconnect v2
;
3340 struct ceph_mds_cap_reconnect_v1 v1
;
3342 struct ceph_inode_info
*ci
= cap
->ci
;
3343 struct ceph_reconnect_state
*recon_state
= arg
;
3344 struct ceph_pagelist
*pagelist
= recon_state
->pagelist
;
3348 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
3349 inode
, ceph_vinop(inode
), cap
, cap
->cap_id
,
3350 ceph_cap_string(cap
->issued
));
3352 spin_lock(&ci
->i_ceph_lock
);
3353 cap
->seq
= 0; /* reset cap seq */
3354 cap
->issue_seq
= 0; /* and issue_seq */
3355 cap
->mseq
= 0; /* and migrate_seq */
3356 cap
->cap_gen
= cap
->session
->s_cap_gen
;
3358 if (recon_state
->msg_version
>= 2) {
3359 rec
.v2
.cap_id
= cpu_to_le64(cap
->cap_id
);
3360 rec
.v2
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
3361 rec
.v2
.issued
= cpu_to_le32(cap
->issued
);
3362 rec
.v2
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
3363 rec
.v2
.pathbase
= 0;
3364 rec
.v2
.flock_len
= (__force __le32
)
3365 ((ci
->i_ceph_flags
& CEPH_I_ERROR_FILELOCK
) ? 0 : 1);
3367 rec
.v1
.cap_id
= cpu_to_le64(cap
->cap_id
);
3368 rec
.v1
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
3369 rec
.v1
.issued
= cpu_to_le32(cap
->issued
);
3370 rec
.v1
.size
= cpu_to_le64(inode
->i_size
);
3371 ceph_encode_timespec64(&rec
.v1
.mtime
, &inode
->i_mtime
);
3372 ceph_encode_timespec64(&rec
.v1
.atime
, &inode
->i_atime
);
3373 rec
.v1
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
3374 rec
.v1
.pathbase
= 0;
3377 if (list_empty(&ci
->i_cap_snaps
)) {
3378 snap_follows
= ci
->i_head_snapc
? ci
->i_head_snapc
->seq
: 0;
3380 struct ceph_cap_snap
*capsnap
=
3381 list_first_entry(&ci
->i_cap_snaps
,
3382 struct ceph_cap_snap
, ci_item
);
3383 snap_follows
= capsnap
->follows
;
3385 spin_unlock(&ci
->i_ceph_lock
);
3387 if (recon_state
->msg_version
>= 2) {
3388 int num_fcntl_locks
, num_flock_locks
;
3389 struct ceph_filelock
*flocks
= NULL
;
3390 size_t struct_len
, total_len
= sizeof(u64
);
3394 if (rec
.v2
.flock_len
) {
3395 ceph_count_locks(inode
, &num_fcntl_locks
, &num_flock_locks
);
3397 num_fcntl_locks
= 0;
3398 num_flock_locks
= 0;
3400 if (num_fcntl_locks
+ num_flock_locks
> 0) {
3401 flocks
= kmalloc_array(num_fcntl_locks
+ num_flock_locks
,
3402 sizeof(struct ceph_filelock
),
3408 err
= ceph_encode_locks_to_buffer(inode
, flocks
,
3423 if (recon_state
->msg_version
>= 3) {
3424 /* version, compat_version and struct_len */
3425 total_len
+= 2 * sizeof(u8
) + sizeof(u32
);
3429 * number of encoded locks is stable, so copy to pagelist
3431 struct_len
= 2 * sizeof(u32
) +
3432 (num_fcntl_locks
+ num_flock_locks
) *
3433 sizeof(struct ceph_filelock
);
3434 rec
.v2
.flock_len
= cpu_to_le32(struct_len
);
3436 struct_len
+= sizeof(u32
) + sizeof(rec
.v2
);
3439 struct_len
+= sizeof(u64
); /* snap_follows */
3441 total_len
+= struct_len
;
3443 if (pagelist
->length
+ total_len
> RECONNECT_MAX_SIZE
) {
3444 err
= send_reconnect_partial(recon_state
);
3446 goto out_freeflocks
;
3447 pagelist
= recon_state
->pagelist
;
3450 err
= ceph_pagelist_reserve(pagelist
, total_len
);
3452 goto out_freeflocks
;
3454 ceph_pagelist_encode_64(pagelist
, ceph_ino(inode
));
3455 if (recon_state
->msg_version
>= 3) {
3456 ceph_pagelist_encode_8(pagelist
, struct_v
);
3457 ceph_pagelist_encode_8(pagelist
, 1);
3458 ceph_pagelist_encode_32(pagelist
, struct_len
);
3460 ceph_pagelist_encode_string(pagelist
, NULL
, 0);
3461 ceph_pagelist_append(pagelist
, &rec
, sizeof(rec
.v2
));
3462 ceph_locks_to_pagelist(flocks
, pagelist
,
3463 num_fcntl_locks
, num_flock_locks
);
3465 ceph_pagelist_encode_64(pagelist
, snap_follows
);
3472 struct dentry
*dentry
;
3474 dentry
= d_find_alias(inode
);
3476 path
= ceph_mdsc_build_path(dentry
,
3477 &pathlen
, &pathbase
, 0);
3480 err
= PTR_ERR(path
);
3483 rec
.v1
.pathbase
= cpu_to_le64(pathbase
);
3486 err
= ceph_pagelist_reserve(pagelist
,
3487 sizeof(u64
) + sizeof(u32
) +
3488 pathlen
+ sizeof(rec
.v1
));
3493 ceph_pagelist_encode_64(pagelist
, ceph_ino(inode
));
3494 ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
3495 ceph_pagelist_append(pagelist
, &rec
, sizeof(rec
.v1
));
3497 ceph_mdsc_free_path(path
, pathlen
);
3502 recon_state
->nr_caps
++;
3506 static int encode_snap_realms(struct ceph_mds_client
*mdsc
,
3507 struct ceph_reconnect_state
*recon_state
)
3510 struct ceph_pagelist
*pagelist
= recon_state
->pagelist
;
3513 if (recon_state
->msg_version
>= 4) {
3514 err
= ceph_pagelist_encode_32(pagelist
, mdsc
->num_snap_realms
);
3520 * snaprealms. we provide mds with the ino, seq (version), and
3521 * parent for all of our realms. If the mds has any newer info,
3524 for (p
= rb_first(&mdsc
->snap_realms
); p
; p
= rb_next(p
)) {
3525 struct ceph_snap_realm
*realm
=
3526 rb_entry(p
, struct ceph_snap_realm
, node
);
3527 struct ceph_mds_snaprealm_reconnect sr_rec
;
3529 if (recon_state
->msg_version
>= 4) {
3530 size_t need
= sizeof(u8
) * 2 + sizeof(u32
) +
3533 if (pagelist
->length
+ need
> RECONNECT_MAX_SIZE
) {
3534 err
= send_reconnect_partial(recon_state
);
3537 pagelist
= recon_state
->pagelist
;
3540 err
= ceph_pagelist_reserve(pagelist
, need
);
3544 ceph_pagelist_encode_8(pagelist
, 1);
3545 ceph_pagelist_encode_8(pagelist
, 1);
3546 ceph_pagelist_encode_32(pagelist
, sizeof(sr_rec
));
3549 dout(" adding snap realm %llx seq %lld parent %llx\n",
3550 realm
->ino
, realm
->seq
, realm
->parent_ino
);
3551 sr_rec
.ino
= cpu_to_le64(realm
->ino
);
3552 sr_rec
.seq
= cpu_to_le64(realm
->seq
);
3553 sr_rec
.parent
= cpu_to_le64(realm
->parent_ino
);
3555 err
= ceph_pagelist_append(pagelist
, &sr_rec
, sizeof(sr_rec
));
3559 recon_state
->nr_realms
++;
3567 * If an MDS fails and recovers, clients need to reconnect in order to
3568 * reestablish shared state. This includes all caps issued through
3569 * this session _and_ the snap_realm hierarchy. Because it's not
3570 * clear which snap realms the mds cares about, we send everything we
3571 * know about.. that ensures we'll then get any new info the
3572 * recovering MDS might have.
3574 * This is a relatively heavyweight operation, but it's rare.
3576 * called with mdsc->mutex held.
3578 static void send_mds_reconnect(struct ceph_mds_client
*mdsc
,
3579 struct ceph_mds_session
*session
)
3581 struct ceph_msg
*reply
;
3582 int mds
= session
->s_mds
;
3584 struct ceph_reconnect_state recon_state
= {
3589 pr_info("mds%d reconnect start\n", mds
);
3591 recon_state
.pagelist
= ceph_pagelist_alloc(GFP_NOFS
);
3592 if (!recon_state
.pagelist
)
3593 goto fail_nopagelist
;
3595 reply
= ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT
, 0, 1, GFP_NOFS
, false);
3599 mutex_lock(&session
->s_mutex
);
3600 session
->s_state
= CEPH_MDS_SESSION_RECONNECTING
;
3603 dout("session %p state %s\n", session
,
3604 ceph_session_state_name(session
->s_state
));
3606 spin_lock(&session
->s_gen_ttl_lock
);
3607 session
->s_cap_gen
++;
3608 spin_unlock(&session
->s_gen_ttl_lock
);
3610 spin_lock(&session
->s_cap_lock
);
3611 /* don't know if session is readonly */
3612 session
->s_readonly
= 0;
3614 * notify __ceph_remove_cap() that we are composing cap reconnect.
3615 * If a cap get released before being added to the cap reconnect,
3616 * __ceph_remove_cap() should skip queuing cap release.
3618 session
->s_cap_reconnect
= 1;
3619 /* drop old cap expires; we're about to reestablish that state */
3620 detach_cap_releases(session
, &dispose
);
3621 spin_unlock(&session
->s_cap_lock
);
3622 dispose_cap_releases(mdsc
, &dispose
);
3624 /* trim unused caps to reduce MDS's cache rejoin time */
3625 if (mdsc
->fsc
->sb
->s_root
)
3626 shrink_dcache_parent(mdsc
->fsc
->sb
->s_root
);
3628 ceph_con_close(&session
->s_con
);
3629 ceph_con_open(&session
->s_con
,
3630 CEPH_ENTITY_TYPE_MDS
, mds
,
3631 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
3633 /* replay unsafe requests */
3634 replay_unsafe_requests(mdsc
, session
);
3636 ceph_early_kick_flushing_caps(mdsc
, session
);
3638 down_read(&mdsc
->snap_rwsem
);
3640 /* placeholder for nr_caps */
3641 err
= ceph_pagelist_encode_32(recon_state
.pagelist
, 0);
3645 if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT
, &session
->s_features
)) {
3646 recon_state
.msg_version
= 3;
3647 recon_state
.allow_multi
= true;
3648 } else if (session
->s_con
.peer_features
& CEPH_FEATURE_MDSENC
) {
3649 recon_state
.msg_version
= 3;
3651 recon_state
.msg_version
= 2;
3653 /* trsaverse this session's caps */
3654 err
= ceph_iterate_session_caps(session
, encode_caps_cb
, &recon_state
);
3656 spin_lock(&session
->s_cap_lock
);
3657 session
->s_cap_reconnect
= 0;
3658 spin_unlock(&session
->s_cap_lock
);
3663 /* check if all realms can be encoded into current message */
3664 if (mdsc
->num_snap_realms
) {
3666 recon_state
.pagelist
->length
+
3667 mdsc
->num_snap_realms
*
3668 sizeof(struct ceph_mds_snaprealm_reconnect
);
3669 if (recon_state
.msg_version
>= 4) {
3670 /* number of realms */
3671 total_len
+= sizeof(u32
);
3672 /* version, compat_version and struct_len */
3673 total_len
+= mdsc
->num_snap_realms
*
3674 (2 * sizeof(u8
) + sizeof(u32
));
3676 if (total_len
> RECONNECT_MAX_SIZE
) {
3677 if (!recon_state
.allow_multi
) {
3681 if (recon_state
.nr_caps
) {
3682 err
= send_reconnect_partial(&recon_state
);
3686 recon_state
.msg_version
= 5;
3690 err
= encode_snap_realms(mdsc
, &recon_state
);
3694 if (recon_state
.msg_version
>= 5) {
3695 err
= ceph_pagelist_encode_8(recon_state
.pagelist
, 0);
3700 if (recon_state
.nr_caps
|| recon_state
.nr_realms
) {
3702 list_first_entry(&recon_state
.pagelist
->head
,
3704 __le32
*addr
= kmap_atomic(page
);
3705 if (recon_state
.nr_caps
) {
3706 WARN_ON(recon_state
.nr_realms
!= mdsc
->num_snap_realms
);
3707 *addr
= cpu_to_le32(recon_state
.nr_caps
);
3708 } else if (recon_state
.msg_version
>= 4) {
3709 *(addr
+ 1) = cpu_to_le32(recon_state
.nr_realms
);
3711 kunmap_atomic(addr
);
3714 reply
->hdr
.version
= cpu_to_le16(recon_state
.msg_version
);
3715 if (recon_state
.msg_version
>= 4)
3716 reply
->hdr
.compat_version
= cpu_to_le16(4);
3718 reply
->hdr
.data_len
= cpu_to_le32(recon_state
.pagelist
->length
);
3719 ceph_msg_data_add_pagelist(reply
, recon_state
.pagelist
);
3721 ceph_con_send(&session
->s_con
, reply
);
3723 mutex_unlock(&session
->s_mutex
);
3725 mutex_lock(&mdsc
->mutex
);
3726 __wake_requests(mdsc
, &session
->s_waiting
);
3727 mutex_unlock(&mdsc
->mutex
);
3729 up_read(&mdsc
->snap_rwsem
);
3730 ceph_pagelist_release(recon_state
.pagelist
);
3734 ceph_msg_put(reply
);
3735 up_read(&mdsc
->snap_rwsem
);
3736 mutex_unlock(&session
->s_mutex
);
3738 ceph_pagelist_release(recon_state
.pagelist
);
3740 pr_err("error %d preparing reconnect for mds%d\n", err
, mds
);
3746 * compare old and new mdsmaps, kicking requests
3747 * and closing out old connections as necessary
3749 * called under mdsc->mutex.
3751 static void check_new_map(struct ceph_mds_client
*mdsc
,
3752 struct ceph_mdsmap
*newmap
,
3753 struct ceph_mdsmap
*oldmap
)
3756 int oldstate
, newstate
;
3757 struct ceph_mds_session
*s
;
3759 dout("check_new_map new %u old %u\n",
3760 newmap
->m_epoch
, oldmap
->m_epoch
);
3762 for (i
= 0; i
< oldmap
->m_num_mds
&& i
< mdsc
->max_sessions
; i
++) {
3763 if (!mdsc
->sessions
[i
])
3765 s
= mdsc
->sessions
[i
];
3766 oldstate
= ceph_mdsmap_get_state(oldmap
, i
);
3767 newstate
= ceph_mdsmap_get_state(newmap
, i
);
3769 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3770 i
, ceph_mds_state_name(oldstate
),
3771 ceph_mdsmap_is_laggy(oldmap
, i
) ? " (laggy)" : "",
3772 ceph_mds_state_name(newstate
),
3773 ceph_mdsmap_is_laggy(newmap
, i
) ? " (laggy)" : "",
3774 ceph_session_state_name(s
->s_state
));
3776 if (i
>= newmap
->m_num_mds
) {
3777 /* force close session for stopped mds */
3779 __unregister_session(mdsc
, s
);
3780 __wake_requests(mdsc
, &s
->s_waiting
);
3781 mutex_unlock(&mdsc
->mutex
);
3783 mutex_lock(&s
->s_mutex
);
3784 cleanup_session_requests(mdsc
, s
);
3785 remove_session_caps(s
);
3786 mutex_unlock(&s
->s_mutex
);
3788 ceph_put_mds_session(s
);
3790 mutex_lock(&mdsc
->mutex
);
3791 kick_requests(mdsc
, i
);
3795 if (memcmp(ceph_mdsmap_get_addr(oldmap
, i
),
3796 ceph_mdsmap_get_addr(newmap
, i
),
3797 sizeof(struct ceph_entity_addr
))) {
3799 mutex_unlock(&mdsc
->mutex
);
3800 mutex_lock(&s
->s_mutex
);
3801 mutex_lock(&mdsc
->mutex
);
3802 ceph_con_close(&s
->s_con
);
3803 mutex_unlock(&s
->s_mutex
);
3804 s
->s_state
= CEPH_MDS_SESSION_RESTARTING
;
3805 } else if (oldstate
== newstate
) {
3806 continue; /* nothing new with this mds */
3812 if (s
->s_state
== CEPH_MDS_SESSION_RESTARTING
&&
3813 newstate
>= CEPH_MDS_STATE_RECONNECT
) {
3814 mutex_unlock(&mdsc
->mutex
);
3815 send_mds_reconnect(mdsc
, s
);
3816 mutex_lock(&mdsc
->mutex
);
3820 * kick request on any mds that has gone active.
3822 if (oldstate
< CEPH_MDS_STATE_ACTIVE
&&
3823 newstate
>= CEPH_MDS_STATE_ACTIVE
) {
3824 if (oldstate
!= CEPH_MDS_STATE_CREATING
&&
3825 oldstate
!= CEPH_MDS_STATE_STARTING
)
3826 pr_info("mds%d recovery completed\n", s
->s_mds
);
3827 kick_requests(mdsc
, i
);
3828 ceph_kick_flushing_caps(mdsc
, s
);
3829 wake_up_session_caps(s
, RECONNECT
);
3833 for (i
= 0; i
< newmap
->m_num_mds
&& i
< mdsc
->max_sessions
; i
++) {
3834 s
= mdsc
->sessions
[i
];
3837 if (!ceph_mdsmap_is_laggy(newmap
, i
))
3839 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
3840 s
->s_state
== CEPH_MDS_SESSION_HUNG
||
3841 s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3842 dout(" connecting to export targets of laggy mds%d\n",
3844 __open_export_target_sessions(mdsc
, s
);
3856 * caller must hold session s_mutex, dentry->d_lock
3858 void __ceph_mdsc_drop_dentry_lease(struct dentry
*dentry
)
3860 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
3862 ceph_put_mds_session(di
->lease_session
);
3863 di
->lease_session
= NULL
;
3866 static void handle_lease(struct ceph_mds_client
*mdsc
,
3867 struct ceph_mds_session
*session
,
3868 struct ceph_msg
*msg
)
3870 struct super_block
*sb
= mdsc
->fsc
->sb
;
3871 struct inode
*inode
;
3872 struct dentry
*parent
, *dentry
;
3873 struct ceph_dentry_info
*di
;
3874 int mds
= session
->s_mds
;
3875 struct ceph_mds_lease
*h
= msg
->front
.iov_base
;
3877 struct ceph_vino vino
;
3881 dout("handle_lease from mds%d\n", mds
);
3884 if (msg
->front
.iov_len
< sizeof(*h
) + sizeof(u32
))
3886 vino
.ino
= le64_to_cpu(h
->ino
);
3887 vino
.snap
= CEPH_NOSNAP
;
3888 seq
= le32_to_cpu(h
->seq
);
3889 dname
.len
= get_unaligned_le32(h
+ 1);
3890 if (msg
->front
.iov_len
< sizeof(*h
) + sizeof(u32
) + dname
.len
)
3892 dname
.name
= (void *)(h
+ 1) + sizeof(u32
);
3895 inode
= ceph_find_inode(sb
, vino
);
3896 dout("handle_lease %s, ino %llx %p %.*s\n",
3897 ceph_lease_op_name(h
->action
), vino
.ino
, inode
,
3898 dname
.len
, dname
.name
);
3900 mutex_lock(&session
->s_mutex
);
3904 dout("handle_lease no inode %llx\n", vino
.ino
);
3909 parent
= d_find_alias(inode
);
3911 dout("no parent dentry on inode %p\n", inode
);
3913 goto release
; /* hrm... */
3915 dname
.hash
= full_name_hash(parent
, dname
.name
, dname
.len
);
3916 dentry
= d_lookup(parent
, &dname
);
3921 spin_lock(&dentry
->d_lock
);
3922 di
= ceph_dentry(dentry
);
3923 switch (h
->action
) {
3924 case CEPH_MDS_LEASE_REVOKE
:
3925 if (di
->lease_session
== session
) {
3926 if (ceph_seq_cmp(di
->lease_seq
, seq
) > 0)
3927 h
->seq
= cpu_to_le32(di
->lease_seq
);
3928 __ceph_mdsc_drop_dentry_lease(dentry
);
3933 case CEPH_MDS_LEASE_RENEW
:
3934 if (di
->lease_session
== session
&&
3935 di
->lease_gen
== session
->s_cap_gen
&&
3936 di
->lease_renew_from
&&
3937 di
->lease_renew_after
== 0) {
3938 unsigned long duration
=
3939 msecs_to_jiffies(le32_to_cpu(h
->duration_ms
));
3941 di
->lease_seq
= seq
;
3942 di
->time
= di
->lease_renew_from
+ duration
;
3943 di
->lease_renew_after
= di
->lease_renew_from
+
3945 di
->lease_renew_from
= 0;
3949 spin_unlock(&dentry
->d_lock
);
3956 /* let's just reuse the same message */
3957 h
->action
= CEPH_MDS_LEASE_REVOKE_ACK
;
3959 ceph_con_send(&session
->s_con
, msg
);
3962 mutex_unlock(&session
->s_mutex
);
3963 /* avoid calling iput_final() in mds dispatch threads */
3964 ceph_async_iput(inode
);
3968 pr_err("corrupt lease message\n");
3972 void ceph_mdsc_lease_send_msg(struct ceph_mds_session
*session
,
3973 struct dentry
*dentry
, char action
,
3976 struct ceph_msg
*msg
;
3977 struct ceph_mds_lease
*lease
;
3979 int len
= sizeof(*lease
) + sizeof(u32
) + NAME_MAX
;
3981 dout("lease_send_msg identry %p %s to mds%d\n",
3982 dentry
, ceph_lease_op_name(action
), session
->s_mds
);
3984 msg
= ceph_msg_new(CEPH_MSG_CLIENT_LEASE
, len
, GFP_NOFS
, false);
3987 lease
= msg
->front
.iov_base
;
3988 lease
->action
= action
;
3989 lease
->seq
= cpu_to_le32(seq
);
3991 spin_lock(&dentry
->d_lock
);
3992 dir
= d_inode(dentry
->d_parent
);
3993 lease
->ino
= cpu_to_le64(ceph_ino(dir
));
3994 lease
->first
= lease
->last
= cpu_to_le64(ceph_snap(dir
));
3996 put_unaligned_le32(dentry
->d_name
.len
, lease
+ 1);
3997 memcpy((void *)(lease
+ 1) + 4,
3998 dentry
->d_name
.name
, dentry
->d_name
.len
);
3999 spin_unlock(&dentry
->d_lock
);
4001 * if this is a preemptive lease RELEASE, no need to
4002 * flush request stream, since the actual request will
4005 msg
->more_to_follow
= (action
== CEPH_MDS_LEASE_RELEASE
);
4007 ceph_con_send(&session
->s_con
, msg
);
4011 * lock unlock sessions, to wait ongoing session activities
4013 static void lock_unlock_sessions(struct ceph_mds_client
*mdsc
)
4017 mutex_lock(&mdsc
->mutex
);
4018 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
4019 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
4022 mutex_unlock(&mdsc
->mutex
);
4023 mutex_lock(&s
->s_mutex
);
4024 mutex_unlock(&s
->s_mutex
);
4025 ceph_put_mds_session(s
);
4026 mutex_lock(&mdsc
->mutex
);
4028 mutex_unlock(&mdsc
->mutex
);
4031 static void maybe_recover_session(struct ceph_mds_client
*mdsc
)
4033 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
4035 if (!ceph_test_mount_opt(fsc
, CLEANRECOVER
))
4038 if (READ_ONCE(fsc
->mount_state
) != CEPH_MOUNT_MOUNTED
)
4041 if (!READ_ONCE(fsc
->blacklisted
))
4044 if (fsc
->last_auto_reconnect
&&
4045 time_before(jiffies
, fsc
->last_auto_reconnect
+ HZ
* 60 * 30))
4048 pr_info("auto reconnect after blacklisted\n");
4049 fsc
->last_auto_reconnect
= jiffies
;
4050 ceph_force_reconnect(fsc
->sb
);
4054 * delayed work -- periodically trim expired leases, renew caps with mds
4056 static void schedule_delayed(struct ceph_mds_client
*mdsc
)
4059 unsigned hz
= round_jiffies_relative(HZ
* delay
);
4060 schedule_delayed_work(&mdsc
->delayed_work
, hz
);
4063 static void delayed_work(struct work_struct
*work
)
4066 struct ceph_mds_client
*mdsc
=
4067 container_of(work
, struct ceph_mds_client
, delayed_work
.work
);
4071 dout("mdsc delayed_work\n");
4073 mutex_lock(&mdsc
->mutex
);
4074 renew_interval
= mdsc
->mdsmap
->m_session_timeout
>> 2;
4075 renew_caps
= time_after_eq(jiffies
, HZ
*renew_interval
+
4076 mdsc
->last_renew_caps
);
4078 mdsc
->last_renew_caps
= jiffies
;
4080 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
4081 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
4084 if (s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
4085 dout("resending session close request for mds%d\n",
4087 request_close_session(mdsc
, s
);
4088 ceph_put_mds_session(s
);
4091 if (s
->s_ttl
&& time_after(jiffies
, s
->s_ttl
)) {
4092 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
) {
4093 s
->s_state
= CEPH_MDS_SESSION_HUNG
;
4094 pr_info("mds%d hung\n", s
->s_mds
);
4097 if (s
->s_state
== CEPH_MDS_SESSION_NEW
||
4098 s
->s_state
== CEPH_MDS_SESSION_RESTARTING
||
4099 s
->s_state
== CEPH_MDS_SESSION_REJECTED
) {
4100 /* this mds is failed or recovering, just wait */
4101 ceph_put_mds_session(s
);
4104 mutex_unlock(&mdsc
->mutex
);
4106 mutex_lock(&s
->s_mutex
);
4108 send_renew_caps(mdsc
, s
);
4110 ceph_con_keepalive(&s
->s_con
);
4111 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
4112 s
->s_state
== CEPH_MDS_SESSION_HUNG
)
4113 ceph_send_cap_releases(mdsc
, s
);
4114 mutex_unlock(&s
->s_mutex
);
4115 ceph_put_mds_session(s
);
4117 mutex_lock(&mdsc
->mutex
);
4119 mutex_unlock(&mdsc
->mutex
);
4121 ceph_check_delayed_caps(mdsc
);
4123 ceph_queue_cap_reclaim_work(mdsc
);
4125 ceph_trim_snapid_map(mdsc
);
4127 maybe_recover_session(mdsc
);
4129 schedule_delayed(mdsc
);
4132 int ceph_mdsc_init(struct ceph_fs_client
*fsc
)
4135 struct ceph_mds_client
*mdsc
;
4137 mdsc
= kzalloc(sizeof(struct ceph_mds_client
), GFP_NOFS
);
4141 mutex_init(&mdsc
->mutex
);
4142 mdsc
->mdsmap
= kzalloc(sizeof(*mdsc
->mdsmap
), GFP_NOFS
);
4143 if (!mdsc
->mdsmap
) {
4149 init_completion(&mdsc
->safe_umount_waiters
);
4150 init_waitqueue_head(&mdsc
->session_close_wq
);
4151 INIT_LIST_HEAD(&mdsc
->waiting_for_map
);
4152 mdsc
->sessions
= NULL
;
4153 atomic_set(&mdsc
->num_sessions
, 0);
4154 mdsc
->max_sessions
= 0;
4156 atomic64_set(&mdsc
->quotarealms_count
, 0);
4157 mdsc
->quotarealms_inodes
= RB_ROOT
;
4158 mutex_init(&mdsc
->quotarealms_inodes_mutex
);
4159 mdsc
->last_snap_seq
= 0;
4160 init_rwsem(&mdsc
->snap_rwsem
);
4161 mdsc
->snap_realms
= RB_ROOT
;
4162 INIT_LIST_HEAD(&mdsc
->snap_empty
);
4163 mdsc
->num_snap_realms
= 0;
4164 spin_lock_init(&mdsc
->snap_empty_lock
);
4166 mdsc
->oldest_tid
= 0;
4167 mdsc
->request_tree
= RB_ROOT
;
4168 INIT_DELAYED_WORK(&mdsc
->delayed_work
, delayed_work
);
4169 mdsc
->last_renew_caps
= jiffies
;
4170 INIT_LIST_HEAD(&mdsc
->cap_delay_list
);
4171 spin_lock_init(&mdsc
->cap_delay_lock
);
4172 INIT_LIST_HEAD(&mdsc
->snap_flush_list
);
4173 spin_lock_init(&mdsc
->snap_flush_lock
);
4174 mdsc
->last_cap_flush_tid
= 1;
4175 INIT_LIST_HEAD(&mdsc
->cap_flush_list
);
4176 INIT_LIST_HEAD(&mdsc
->cap_dirty
);
4177 INIT_LIST_HEAD(&mdsc
->cap_dirty_migrating
);
4178 mdsc
->num_cap_flushing
= 0;
4179 spin_lock_init(&mdsc
->cap_dirty_lock
);
4180 init_waitqueue_head(&mdsc
->cap_flushing_wq
);
4181 INIT_WORK(&mdsc
->cap_reclaim_work
, ceph_cap_reclaim_work
);
4182 atomic_set(&mdsc
->cap_reclaim_pending
, 0);
4184 spin_lock_init(&mdsc
->dentry_list_lock
);
4185 INIT_LIST_HEAD(&mdsc
->dentry_leases
);
4186 INIT_LIST_HEAD(&mdsc
->dentry_dir_leases
);
4188 ceph_caps_init(mdsc
);
4189 ceph_adjust_caps_max_min(mdsc
, fsc
->mount_options
);
4191 spin_lock_init(&mdsc
->snapid_map_lock
);
4192 mdsc
->snapid_map_tree
= RB_ROOT
;
4193 INIT_LIST_HEAD(&mdsc
->snapid_map_lru
);
4195 init_rwsem(&mdsc
->pool_perm_rwsem
);
4196 mdsc
->pool_perm_tree
= RB_ROOT
;
4198 strscpy(mdsc
->nodename
, utsname()->nodename
,
4199 sizeof(mdsc
->nodename
));
4204 * Wait for safe replies on open mds requests. If we time out, drop
4205 * all requests from the tree to avoid dangling dentry refs.
4207 static void wait_requests(struct ceph_mds_client
*mdsc
)
4209 struct ceph_options
*opts
= mdsc
->fsc
->client
->options
;
4210 struct ceph_mds_request
*req
;
4212 mutex_lock(&mdsc
->mutex
);
4213 if (__get_oldest_req(mdsc
)) {
4214 mutex_unlock(&mdsc
->mutex
);
4216 dout("wait_requests waiting for requests\n");
4217 wait_for_completion_timeout(&mdsc
->safe_umount_waiters
,
4218 ceph_timeout_jiffies(opts
->mount_timeout
));
4220 /* tear down remaining requests */
4221 mutex_lock(&mdsc
->mutex
);
4222 while ((req
= __get_oldest_req(mdsc
))) {
4223 dout("wait_requests timed out on tid %llu\n",
4225 list_del_init(&req
->r_wait
);
4226 __unregister_request(mdsc
, req
);
4229 mutex_unlock(&mdsc
->mutex
);
4230 dout("wait_requests done\n");
4234 * called before mount is ro, and before dentries are torn down.
4235 * (hmm, does this still race with new lookups?)
4237 void ceph_mdsc_pre_umount(struct ceph_mds_client
*mdsc
)
4239 dout("pre_umount\n");
4242 lock_unlock_sessions(mdsc
);
4243 ceph_flush_dirty_caps(mdsc
);
4244 wait_requests(mdsc
);
4247 * wait for reply handlers to drop their request refs and
4248 * their inode/dcache refs
4252 ceph_cleanup_quotarealms_inodes(mdsc
);
4256 * wait for all write mds requests to flush.
4258 static void wait_unsafe_requests(struct ceph_mds_client
*mdsc
, u64 want_tid
)
4260 struct ceph_mds_request
*req
= NULL
, *nextreq
;
4263 mutex_lock(&mdsc
->mutex
);
4264 dout("wait_unsafe_requests want %lld\n", want_tid
);
4266 req
= __get_oldest_req(mdsc
);
4267 while (req
&& req
->r_tid
<= want_tid
) {
4268 /* find next request */
4269 n
= rb_next(&req
->r_node
);
4271 nextreq
= rb_entry(n
, struct ceph_mds_request
, r_node
);
4274 if (req
->r_op
!= CEPH_MDS_OP_SETFILELOCK
&&
4275 (req
->r_op
& CEPH_MDS_OP_WRITE
)) {
4277 ceph_mdsc_get_request(req
);
4279 ceph_mdsc_get_request(nextreq
);
4280 mutex_unlock(&mdsc
->mutex
);
4281 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
4282 req
->r_tid
, want_tid
);
4283 wait_for_completion(&req
->r_safe_completion
);
4284 mutex_lock(&mdsc
->mutex
);
4285 ceph_mdsc_put_request(req
);
4287 break; /* next dne before, so we're done! */
4288 if (RB_EMPTY_NODE(&nextreq
->r_node
)) {
4289 /* next request was removed from tree */
4290 ceph_mdsc_put_request(nextreq
);
4293 ceph_mdsc_put_request(nextreq
); /* won't go away */
4297 mutex_unlock(&mdsc
->mutex
);
4298 dout("wait_unsafe_requests done\n");
4301 void ceph_mdsc_sync(struct ceph_mds_client
*mdsc
)
4303 u64 want_tid
, want_flush
;
4305 if (READ_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
4309 mutex_lock(&mdsc
->mutex
);
4310 want_tid
= mdsc
->last_tid
;
4311 mutex_unlock(&mdsc
->mutex
);
4313 ceph_flush_dirty_caps(mdsc
);
4314 spin_lock(&mdsc
->cap_dirty_lock
);
4315 want_flush
= mdsc
->last_cap_flush_tid
;
4316 if (!list_empty(&mdsc
->cap_flush_list
)) {
4317 struct ceph_cap_flush
*cf
=
4318 list_last_entry(&mdsc
->cap_flush_list
,
4319 struct ceph_cap_flush
, g_list
);
4322 spin_unlock(&mdsc
->cap_dirty_lock
);
4324 dout("sync want tid %lld flush_seq %lld\n",
4325 want_tid
, want_flush
);
4327 wait_unsafe_requests(mdsc
, want_tid
);
4328 wait_caps_flush(mdsc
, want_flush
);
4332 * true if all sessions are closed, or we force unmount
4334 static bool done_closing_sessions(struct ceph_mds_client
*mdsc
, int skipped
)
4336 if (READ_ONCE(mdsc
->fsc
->mount_state
) == CEPH_MOUNT_SHUTDOWN
)
4338 return atomic_read(&mdsc
->num_sessions
) <= skipped
;
4342 * called after sb is ro.
4344 void ceph_mdsc_close_sessions(struct ceph_mds_client
*mdsc
)
4346 struct ceph_options
*opts
= mdsc
->fsc
->client
->options
;
4347 struct ceph_mds_session
*session
;
4351 dout("close_sessions\n");
4353 /* close sessions */
4354 mutex_lock(&mdsc
->mutex
);
4355 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
4356 session
= __ceph_lookup_mds_session(mdsc
, i
);
4359 mutex_unlock(&mdsc
->mutex
);
4360 mutex_lock(&session
->s_mutex
);
4361 if (__close_session(mdsc
, session
) <= 0)
4363 mutex_unlock(&session
->s_mutex
);
4364 ceph_put_mds_session(session
);
4365 mutex_lock(&mdsc
->mutex
);
4367 mutex_unlock(&mdsc
->mutex
);
4369 dout("waiting for sessions to close\n");
4370 wait_event_timeout(mdsc
->session_close_wq
,
4371 done_closing_sessions(mdsc
, skipped
),
4372 ceph_timeout_jiffies(opts
->mount_timeout
));
4374 /* tear down remaining sessions */
4375 mutex_lock(&mdsc
->mutex
);
4376 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
4377 if (mdsc
->sessions
[i
]) {
4378 session
= get_session(mdsc
->sessions
[i
]);
4379 __unregister_session(mdsc
, session
);
4380 mutex_unlock(&mdsc
->mutex
);
4381 mutex_lock(&session
->s_mutex
);
4382 remove_session_caps(session
);
4383 mutex_unlock(&session
->s_mutex
);
4384 ceph_put_mds_session(session
);
4385 mutex_lock(&mdsc
->mutex
);
4388 WARN_ON(!list_empty(&mdsc
->cap_delay_list
));
4389 mutex_unlock(&mdsc
->mutex
);
4391 ceph_cleanup_snapid_map(mdsc
);
4392 ceph_cleanup_empty_realms(mdsc
);
4394 cancel_work_sync(&mdsc
->cap_reclaim_work
);
4395 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
4400 void ceph_mdsc_force_umount(struct ceph_mds_client
*mdsc
)
4402 struct ceph_mds_session
*session
;
4405 dout("force umount\n");
4407 mutex_lock(&mdsc
->mutex
);
4408 for (mds
= 0; mds
< mdsc
->max_sessions
; mds
++) {
4409 session
= __ceph_lookup_mds_session(mdsc
, mds
);
4413 if (session
->s_state
== CEPH_MDS_SESSION_REJECTED
)
4414 __unregister_session(mdsc
, session
);
4415 __wake_requests(mdsc
, &session
->s_waiting
);
4416 mutex_unlock(&mdsc
->mutex
);
4418 mutex_lock(&session
->s_mutex
);
4419 __close_session(mdsc
, session
);
4420 if (session
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
4421 cleanup_session_requests(mdsc
, session
);
4422 remove_session_caps(session
);
4424 mutex_unlock(&session
->s_mutex
);
4425 ceph_put_mds_session(session
);
4427 mutex_lock(&mdsc
->mutex
);
4428 kick_requests(mdsc
, mds
);
4430 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
4431 mutex_unlock(&mdsc
->mutex
);
4434 static void ceph_mdsc_stop(struct ceph_mds_client
*mdsc
)
4437 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
4439 ceph_mdsmap_destroy(mdsc
->mdsmap
);
4440 kfree(mdsc
->sessions
);
4441 ceph_caps_finalize(mdsc
);
4442 ceph_pool_perm_destroy(mdsc
);
4445 void ceph_mdsc_destroy(struct ceph_fs_client
*fsc
)
4447 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
4448 dout("mdsc_destroy %p\n", mdsc
);
4453 /* flush out any connection work with references to us */
4456 ceph_mdsc_stop(mdsc
);
4460 dout("mdsc_destroy %p done\n", mdsc
);
4463 void ceph_mdsc_handle_fsmap(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
4465 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
4466 const char *mds_namespace
= fsc
->mount_options
->mds_namespace
;
4467 void *p
= msg
->front
.iov_base
;
4468 void *end
= p
+ msg
->front
.iov_len
;
4472 u32 mount_fscid
= (u32
)-1;
4473 u8 struct_v
, struct_cv
;
4476 ceph_decode_need(&p
, end
, sizeof(u32
), bad
);
4477 epoch
= ceph_decode_32(&p
);
4479 dout("handle_fsmap epoch %u\n", epoch
);
4481 ceph_decode_need(&p
, end
, 2 + sizeof(u32
), bad
);
4482 struct_v
= ceph_decode_8(&p
);
4483 struct_cv
= ceph_decode_8(&p
);
4484 map_len
= ceph_decode_32(&p
);
4486 ceph_decode_need(&p
, end
, sizeof(u32
) * 3, bad
);
4487 p
+= sizeof(u32
) * 2; /* skip epoch and legacy_client_fscid */
4489 num_fs
= ceph_decode_32(&p
);
4490 while (num_fs
-- > 0) {
4491 void *info_p
, *info_end
;
4496 ceph_decode_need(&p
, end
, 2 + sizeof(u32
), bad
);
4497 info_v
= ceph_decode_8(&p
);
4498 info_cv
= ceph_decode_8(&p
);
4499 info_len
= ceph_decode_32(&p
);
4500 ceph_decode_need(&p
, end
, info_len
, bad
);
4502 info_end
= p
+ info_len
;
4505 ceph_decode_need(&info_p
, info_end
, sizeof(u32
) * 2, bad
);
4506 fscid
= ceph_decode_32(&info_p
);
4507 namelen
= ceph_decode_32(&info_p
);
4508 ceph_decode_need(&info_p
, info_end
, namelen
, bad
);
4510 if (mds_namespace
&&
4511 strlen(mds_namespace
) == namelen
&&
4512 !strncmp(mds_namespace
, (char *)info_p
, namelen
)) {
4513 mount_fscid
= fscid
;
4518 ceph_monc_got_map(&fsc
->client
->monc
, CEPH_SUB_FSMAP
, epoch
);
4519 if (mount_fscid
!= (u32
)-1) {
4520 fsc
->client
->monc
.fs_cluster_id
= mount_fscid
;
4521 ceph_monc_want_map(&fsc
->client
->monc
, CEPH_SUB_MDSMAP
,
4523 ceph_monc_renew_subs(&fsc
->client
->monc
);
4531 pr_err("error decoding fsmap\n");
4533 mutex_lock(&mdsc
->mutex
);
4534 mdsc
->mdsmap_err
= err
;
4535 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
4536 mutex_unlock(&mdsc
->mutex
);
4540 * handle mds map update.
4542 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
4546 void *p
= msg
->front
.iov_base
;
4547 void *end
= p
+ msg
->front
.iov_len
;
4548 struct ceph_mdsmap
*newmap
, *oldmap
;
4549 struct ceph_fsid fsid
;
4552 ceph_decode_need(&p
, end
, sizeof(fsid
)+2*sizeof(u32
), bad
);
4553 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
4554 if (ceph_check_fsid(mdsc
->fsc
->client
, &fsid
) < 0)
4556 epoch
= ceph_decode_32(&p
);
4557 maplen
= ceph_decode_32(&p
);
4558 dout("handle_map epoch %u len %d\n", epoch
, (int)maplen
);
4560 /* do we need it? */
4561 mutex_lock(&mdsc
->mutex
);
4562 if (mdsc
->mdsmap
&& epoch
<= mdsc
->mdsmap
->m_epoch
) {
4563 dout("handle_map epoch %u <= our %u\n",
4564 epoch
, mdsc
->mdsmap
->m_epoch
);
4565 mutex_unlock(&mdsc
->mutex
);
4569 newmap
= ceph_mdsmap_decode(&p
, end
);
4570 if (IS_ERR(newmap
)) {
4571 err
= PTR_ERR(newmap
);
4575 /* swap into place */
4577 oldmap
= mdsc
->mdsmap
;
4578 mdsc
->mdsmap
= newmap
;
4579 check_new_map(mdsc
, newmap
, oldmap
);
4580 ceph_mdsmap_destroy(oldmap
);
4582 mdsc
->mdsmap
= newmap
; /* first mds map */
4584 mdsc
->fsc
->max_file_size
= min((loff_t
)mdsc
->mdsmap
->m_max_file_size
,
4587 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
4588 ceph_monc_got_map(&mdsc
->fsc
->client
->monc
, CEPH_SUB_MDSMAP
,
4589 mdsc
->mdsmap
->m_epoch
);
4591 mutex_unlock(&mdsc
->mutex
);
4592 schedule_delayed(mdsc
);
4596 mutex_unlock(&mdsc
->mutex
);
4598 pr_err("error decoding mdsmap %d\n", err
);
4602 static struct ceph_connection
*con_get(struct ceph_connection
*con
)
4604 struct ceph_mds_session
*s
= con
->private;
4606 if (get_session(s
)) {
4607 dout("mdsc con_get %p ok (%d)\n", s
, refcount_read(&s
->s_ref
));
4610 dout("mdsc con_get %p FAIL\n", s
);
4614 static void con_put(struct ceph_connection
*con
)
4616 struct ceph_mds_session
*s
= con
->private;
4618 dout("mdsc con_put %p (%d)\n", s
, refcount_read(&s
->s_ref
) - 1);
4619 ceph_put_mds_session(s
);
4623 * if the client is unresponsive for long enough, the mds will kill
4624 * the session entirely.
4626 static void peer_reset(struct ceph_connection
*con
)
4628 struct ceph_mds_session
*s
= con
->private;
4629 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4631 pr_warn("mds%d closed our session\n", s
->s_mds
);
4632 send_mds_reconnect(mdsc
, s
);
4635 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
4637 struct ceph_mds_session
*s
= con
->private;
4638 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4639 int type
= le16_to_cpu(msg
->hdr
.type
);
4641 mutex_lock(&mdsc
->mutex
);
4642 if (__verify_registered_session(mdsc
, s
) < 0) {
4643 mutex_unlock(&mdsc
->mutex
);
4646 mutex_unlock(&mdsc
->mutex
);
4649 case CEPH_MSG_MDS_MAP
:
4650 ceph_mdsc_handle_mdsmap(mdsc
, msg
);
4652 case CEPH_MSG_FS_MAP_USER
:
4653 ceph_mdsc_handle_fsmap(mdsc
, msg
);
4655 case CEPH_MSG_CLIENT_SESSION
:
4656 handle_session(s
, msg
);
4658 case CEPH_MSG_CLIENT_REPLY
:
4659 handle_reply(s
, msg
);
4661 case CEPH_MSG_CLIENT_REQUEST_FORWARD
:
4662 handle_forward(mdsc
, s
, msg
);
4664 case CEPH_MSG_CLIENT_CAPS
:
4665 ceph_handle_caps(s
, msg
);
4667 case CEPH_MSG_CLIENT_SNAP
:
4668 ceph_handle_snap(mdsc
, s
, msg
);
4670 case CEPH_MSG_CLIENT_LEASE
:
4671 handle_lease(mdsc
, s
, msg
);
4673 case CEPH_MSG_CLIENT_QUOTA
:
4674 ceph_handle_quota(mdsc
, s
, msg
);
4678 pr_err("received unknown message type %d %s\n", type
,
4679 ceph_msg_type_name(type
));
4690 * Note: returned pointer is the address of a structure that's
4691 * managed separately. Caller must *not* attempt to free it.
4693 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
4694 int *proto
, int force_new
)
4696 struct ceph_mds_session
*s
= con
->private;
4697 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4698 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
4699 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
4701 if (force_new
&& auth
->authorizer
) {
4702 ceph_auth_destroy_authorizer(auth
->authorizer
);
4703 auth
->authorizer
= NULL
;
4705 if (!auth
->authorizer
) {
4706 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
4709 return ERR_PTR(ret
);
4711 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
4714 return ERR_PTR(ret
);
4716 *proto
= ac
->protocol
;
4721 static int add_authorizer_challenge(struct ceph_connection
*con
,
4722 void *challenge_buf
, int challenge_buf_len
)
4724 struct ceph_mds_session
*s
= con
->private;
4725 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4726 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
4728 return ceph_auth_add_authorizer_challenge(ac
, s
->s_auth
.authorizer
,
4729 challenge_buf
, challenge_buf_len
);
4732 static int verify_authorizer_reply(struct ceph_connection
*con
)
4734 struct ceph_mds_session
*s
= con
->private;
4735 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4736 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
4738 return ceph_auth_verify_authorizer_reply(ac
, s
->s_auth
.authorizer
);
4741 static int invalidate_authorizer(struct ceph_connection
*con
)
4743 struct ceph_mds_session
*s
= con
->private;
4744 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
4745 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
4747 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
);
4749 return ceph_monc_validate_auth(&mdsc
->fsc
->client
->monc
);
4752 static struct ceph_msg
*mds_alloc_msg(struct ceph_connection
*con
,
4753 struct ceph_msg_header
*hdr
, int *skip
)
4755 struct ceph_msg
*msg
;
4756 int type
= (int) le16_to_cpu(hdr
->type
);
4757 int front_len
= (int) le32_to_cpu(hdr
->front_len
);
4763 msg
= ceph_msg_new(type
, front_len
, GFP_NOFS
, false);
4765 pr_err("unable to allocate msg type %d len %d\n",
4773 static int mds_sign_message(struct ceph_msg
*msg
)
4775 struct ceph_mds_session
*s
= msg
->con
->private;
4776 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
4778 return ceph_auth_sign_message(auth
, msg
);
4781 static int mds_check_message_signature(struct ceph_msg
*msg
)
4783 struct ceph_mds_session
*s
= msg
->con
->private;
4784 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
4786 return ceph_auth_check_message_signature(auth
, msg
);
4789 static const struct ceph_connection_operations mds_con_ops
= {
4792 .dispatch
= dispatch
,
4793 .get_authorizer
= get_authorizer
,
4794 .add_authorizer_challenge
= add_authorizer_challenge
,
4795 .verify_authorizer_reply
= verify_authorizer_reply
,
4796 .invalidate_authorizer
= invalidate_authorizer
,
4797 .peer_reset
= peer_reset
,
4798 .alloc_msg
= mds_alloc_msg
,
4799 .sign_message
= mds_sign_message
,
4800 .check_message_signature
= mds_check_message_signature
,