1 #include <linux/ceph/ceph_debug.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
6 #include <linux/sched.h>
7 #include <linux/debugfs.h>
8 #include <linux/seq_file.h>
11 #include "mds_client.h"
13 #include <linux/ceph/messenger.h>
14 #include <linux/ceph/decode.h>
15 #include <linux/ceph/pagelist.h>
16 #include <linux/ceph/auth.h>
17 #include <linux/ceph/debugfs.h>
20 * A cluster of MDS (metadata server) daemons is responsible for
21 * managing the file system namespace (the directory hierarchy and
22 * inodes) and for coordinating shared access to storage. Metadata is
23 * partitioning hierarchically across a number of servers, and that
24 * partition varies over time as the cluster adjusts the distribution
25 * in order to balance load.
27 * The MDS client is primarily responsible to managing synchronous
28 * metadata requests for operations like open, unlink, and so forth.
29 * If there is a MDS failure, we find out about it when we (possibly
30 * request and) receive a new MDS map, and can resubmit affected
33 * For the most part, though, we take advantage of a lossless
34 * communications channel to the MDS, and do not need to worry about
35 * timing out or resubmitting requests.
37 * We maintain a stateful "session" with each MDS we interact with.
38 * Within each session, we sent periodic heartbeat messages to ensure
39 * any capabilities or leases we have been issues remain valid. If
40 * the session times out and goes stale, our leases and capabilities
41 * are no longer valid.
44 struct ceph_reconnect_state
{
45 struct ceph_pagelist
*pagelist
;
49 static void __wake_requests(struct ceph_mds_client
*mdsc
,
50 struct list_head
*head
);
52 static const struct ceph_connection_operations mds_con_ops
;
60 * parse individual inode info
62 static int parse_reply_info_in(void **p
, void *end
,
63 struct ceph_mds_reply_info_in
*info
)
68 *p
+= sizeof(struct ceph_mds_reply_inode
) +
69 sizeof(*info
->in
->fragtree
.splits
) *
70 le32_to_cpu(info
->in
->fragtree
.nsplits
);
72 ceph_decode_32_safe(p
, end
, info
->symlink_len
, bad
);
73 ceph_decode_need(p
, end
, info
->symlink_len
, bad
);
75 *p
+= info
->symlink_len
;
77 ceph_decode_32_safe(p
, end
, info
->xattr_len
, bad
);
78 ceph_decode_need(p
, end
, info
->xattr_len
, bad
);
79 info
->xattr_data
= *p
;
80 *p
+= info
->xattr_len
;
87 * parse a normal reply, which may contain a (dir+)dentry and/or a
90 static int parse_reply_info_trace(void **p
, void *end
,
91 struct ceph_mds_reply_info_parsed
*info
)
95 if (info
->head
->is_dentry
) {
96 err
= parse_reply_info_in(p
, end
, &info
->diri
);
100 if (unlikely(*p
+ sizeof(*info
->dirfrag
) > end
))
103 *p
+= sizeof(*info
->dirfrag
) +
104 sizeof(u32
)*le32_to_cpu(info
->dirfrag
->ndist
);
105 if (unlikely(*p
> end
))
108 ceph_decode_32_safe(p
, end
, info
->dname_len
, bad
);
109 ceph_decode_need(p
, end
, info
->dname_len
, bad
);
111 *p
+= info
->dname_len
;
113 *p
+= sizeof(*info
->dlease
);
116 if (info
->head
->is_target
) {
117 err
= parse_reply_info_in(p
, end
, &info
->targeti
);
122 if (unlikely(*p
!= end
))
129 pr_err("problem parsing mds trace %d\n", err
);
134 * parse readdir results
136 static int parse_reply_info_dir(void **p
, void *end
,
137 struct ceph_mds_reply_info_parsed
*info
)
143 if (*p
+ sizeof(*info
->dir_dir
) > end
)
145 *p
+= sizeof(*info
->dir_dir
) +
146 sizeof(u32
)*le32_to_cpu(info
->dir_dir
->ndist
);
150 ceph_decode_need(p
, end
, sizeof(num
) + 2, bad
);
151 num
= ceph_decode_32(p
);
152 info
->dir_end
= ceph_decode_8(p
);
153 info
->dir_complete
= ceph_decode_8(p
);
157 /* alloc large array */
159 info
->dir_in
= kcalloc(num
, sizeof(*info
->dir_in
) +
160 sizeof(*info
->dir_dname
) +
161 sizeof(*info
->dir_dname_len
) +
162 sizeof(*info
->dir_dlease
),
164 if (info
->dir_in
== NULL
) {
168 info
->dir_dname
= (void *)(info
->dir_in
+ num
);
169 info
->dir_dname_len
= (void *)(info
->dir_dname
+ num
);
170 info
->dir_dlease
= (void *)(info
->dir_dname_len
+ num
);
174 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
175 info
->dir_dname_len
[i
] = ceph_decode_32(p
);
176 ceph_decode_need(p
, end
, info
->dir_dname_len
[i
], bad
);
177 info
->dir_dname
[i
] = *p
;
178 *p
+= info
->dir_dname_len
[i
];
179 dout("parsed dir dname '%.*s'\n", info
->dir_dname_len
[i
],
181 info
->dir_dlease
[i
] = *p
;
182 *p
+= sizeof(struct ceph_mds_reply_lease
);
185 err
= parse_reply_info_in(p
, end
, &info
->dir_in
[i
]);
200 pr_err("problem parsing dir contents %d\n", err
);
205 * parse fcntl F_GETLK results
207 static int parse_reply_info_filelock(void **p
, void *end
,
208 struct ceph_mds_reply_info_parsed
*info
)
210 if (*p
+ sizeof(*info
->filelock_reply
) > end
)
213 info
->filelock_reply
= *p
;
214 *p
+= sizeof(*info
->filelock_reply
);
216 if (unlikely(*p
!= end
))
225 * parse extra results
227 static int parse_reply_info_extra(void **p
, void *end
,
228 struct ceph_mds_reply_info_parsed
*info
)
230 if (info
->head
->op
== CEPH_MDS_OP_GETFILELOCK
)
231 return parse_reply_info_filelock(p
, end
, info
);
233 return parse_reply_info_dir(p
, end
, info
);
237 * parse entire mds reply
239 static int parse_reply_info(struct ceph_msg
*msg
,
240 struct ceph_mds_reply_info_parsed
*info
)
246 info
->head
= msg
->front
.iov_base
;
247 p
= msg
->front
.iov_base
+ sizeof(struct ceph_mds_reply_head
);
248 end
= p
+ msg
->front
.iov_len
- sizeof(struct ceph_mds_reply_head
);
251 ceph_decode_32_safe(&p
, end
, len
, bad
);
253 err
= parse_reply_info_trace(&p
, p
+len
, info
);
259 ceph_decode_32_safe(&p
, end
, len
, bad
);
261 err
= parse_reply_info_extra(&p
, p
+len
, info
);
267 ceph_decode_32_safe(&p
, end
, len
, bad
);
268 info
->snapblob_len
= len
;
279 pr_err("mds parse_reply err %d\n", err
);
283 static void destroy_reply_info(struct ceph_mds_reply_info_parsed
*info
)
292 static const char *session_state_name(int s
)
295 case CEPH_MDS_SESSION_NEW
: return "new";
296 case CEPH_MDS_SESSION_OPENING
: return "opening";
297 case CEPH_MDS_SESSION_OPEN
: return "open";
298 case CEPH_MDS_SESSION_HUNG
: return "hung";
299 case CEPH_MDS_SESSION_CLOSING
: return "closing";
300 case CEPH_MDS_SESSION_RESTARTING
: return "restarting";
301 case CEPH_MDS_SESSION_RECONNECTING
: return "reconnecting";
302 default: return "???";
306 static struct ceph_mds_session
*get_session(struct ceph_mds_session
*s
)
308 if (atomic_inc_not_zero(&s
->s_ref
)) {
309 dout("mdsc get_session %p %d -> %d\n", s
,
310 atomic_read(&s
->s_ref
)-1, atomic_read(&s
->s_ref
));
313 dout("mdsc get_session %p 0 -- FAIL", s
);
318 void ceph_put_mds_session(struct ceph_mds_session
*s
)
320 dout("mdsc put_session %p %d -> %d\n", s
,
321 atomic_read(&s
->s_ref
), atomic_read(&s
->s_ref
)-1);
322 if (atomic_dec_and_test(&s
->s_ref
)) {
324 s
->s_mdsc
->fsc
->client
->monc
.auth
->ops
->destroy_authorizer(
325 s
->s_mdsc
->fsc
->client
->monc
.auth
,
332 * called under mdsc->mutex
334 struct ceph_mds_session
*__ceph_lookup_mds_session(struct ceph_mds_client
*mdsc
,
337 struct ceph_mds_session
*session
;
339 if (mds
>= mdsc
->max_sessions
|| mdsc
->sessions
[mds
] == NULL
)
341 session
= mdsc
->sessions
[mds
];
342 dout("lookup_mds_session %p %d\n", session
,
343 atomic_read(&session
->s_ref
));
344 get_session(session
);
348 static bool __have_session(struct ceph_mds_client
*mdsc
, int mds
)
350 if (mds
>= mdsc
->max_sessions
)
352 return mdsc
->sessions
[mds
];
355 static int __verify_registered_session(struct ceph_mds_client
*mdsc
,
356 struct ceph_mds_session
*s
)
358 if (s
->s_mds
>= mdsc
->max_sessions
||
359 mdsc
->sessions
[s
->s_mds
] != s
)
365 * create+register a new session for given mds.
366 * called under mdsc->mutex.
368 static struct ceph_mds_session
*register_session(struct ceph_mds_client
*mdsc
,
371 struct ceph_mds_session
*s
;
373 s
= kzalloc(sizeof(*s
), GFP_NOFS
);
375 return ERR_PTR(-ENOMEM
);
378 s
->s_state
= CEPH_MDS_SESSION_NEW
;
381 mutex_init(&s
->s_mutex
);
383 ceph_con_init(mdsc
->fsc
->client
->msgr
, &s
->s_con
);
384 s
->s_con
.private = s
;
385 s
->s_con
.ops
= &mds_con_ops
;
386 s
->s_con
.peer_name
.type
= CEPH_ENTITY_TYPE_MDS
;
387 s
->s_con
.peer_name
.num
= cpu_to_le64(mds
);
389 spin_lock_init(&s
->s_cap_lock
);
392 s
->s_renew_requested
= 0;
394 INIT_LIST_HEAD(&s
->s_caps
);
397 atomic_set(&s
->s_ref
, 1);
398 INIT_LIST_HEAD(&s
->s_waiting
);
399 INIT_LIST_HEAD(&s
->s_unsafe
);
400 s
->s_num_cap_releases
= 0;
401 s
->s_cap_iterator
= NULL
;
402 INIT_LIST_HEAD(&s
->s_cap_releases
);
403 INIT_LIST_HEAD(&s
->s_cap_releases_done
);
404 INIT_LIST_HEAD(&s
->s_cap_flushing
);
405 INIT_LIST_HEAD(&s
->s_cap_snaps_flushing
);
407 dout("register_session mds%d\n", mds
);
408 if (mds
>= mdsc
->max_sessions
) {
409 int newmax
= 1 << get_count_order(mds
+1);
410 struct ceph_mds_session
**sa
;
412 dout("register_session realloc to %d\n", newmax
);
413 sa
= kcalloc(newmax
, sizeof(void *), GFP_NOFS
);
416 if (mdsc
->sessions
) {
417 memcpy(sa
, mdsc
->sessions
,
418 mdsc
->max_sessions
* sizeof(void *));
419 kfree(mdsc
->sessions
);
422 mdsc
->max_sessions
= newmax
;
424 mdsc
->sessions
[mds
] = s
;
425 atomic_inc(&s
->s_ref
); /* one ref to sessions[], one to caller */
427 ceph_con_open(&s
->s_con
, ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
433 return ERR_PTR(-ENOMEM
);
437 * called under mdsc->mutex
439 static void __unregister_session(struct ceph_mds_client
*mdsc
,
440 struct ceph_mds_session
*s
)
442 dout("__unregister_session mds%d %p\n", s
->s_mds
, s
);
443 BUG_ON(mdsc
->sessions
[s
->s_mds
] != s
);
444 mdsc
->sessions
[s
->s_mds
] = NULL
;
445 ceph_con_close(&s
->s_con
);
446 ceph_put_mds_session(s
);
450 * drop session refs in request.
452 * should be last request ref, or hold mdsc->mutex
454 static void put_request_session(struct ceph_mds_request
*req
)
456 if (req
->r_session
) {
457 ceph_put_mds_session(req
->r_session
);
458 req
->r_session
= NULL
;
462 void ceph_mdsc_release_request(struct kref
*kref
)
464 struct ceph_mds_request
*req
= container_of(kref
,
465 struct ceph_mds_request
,
468 ceph_msg_put(req
->r_request
);
470 ceph_msg_put(req
->r_reply
);
471 destroy_reply_info(&req
->r_reply_info
);
474 ceph_put_cap_refs(ceph_inode(req
->r_inode
),
478 if (req
->r_locked_dir
)
479 ceph_put_cap_refs(ceph_inode(req
->r_locked_dir
),
481 if (req
->r_target_inode
)
482 iput(req
->r_target_inode
);
485 if (req
->r_old_dentry
) {
487 ceph_inode(req
->r_old_dentry
->d_parent
->d_inode
),
489 dput(req
->r_old_dentry
);
493 put_request_session(req
);
494 ceph_unreserve_caps(req
->r_mdsc
, &req
->r_caps_reservation
);
499 * lookup session, bump ref if found.
501 * called under mdsc->mutex.
503 static struct ceph_mds_request
*__lookup_request(struct ceph_mds_client
*mdsc
,
506 struct ceph_mds_request
*req
;
507 struct rb_node
*n
= mdsc
->request_tree
.rb_node
;
510 req
= rb_entry(n
, struct ceph_mds_request
, r_node
);
511 if (tid
< req
->r_tid
)
513 else if (tid
> req
->r_tid
)
516 ceph_mdsc_get_request(req
);
523 static void __insert_request(struct ceph_mds_client
*mdsc
,
524 struct ceph_mds_request
*new)
526 struct rb_node
**p
= &mdsc
->request_tree
.rb_node
;
527 struct rb_node
*parent
= NULL
;
528 struct ceph_mds_request
*req
= NULL
;
532 req
= rb_entry(parent
, struct ceph_mds_request
, r_node
);
533 if (new->r_tid
< req
->r_tid
)
535 else if (new->r_tid
> req
->r_tid
)
541 rb_link_node(&new->r_node
, parent
, p
);
542 rb_insert_color(&new->r_node
, &mdsc
->request_tree
);
546 * Register an in-flight request, and assign a tid. Link to directory
547 * are modifying (if any).
549 * Called under mdsc->mutex.
551 static void __register_request(struct ceph_mds_client
*mdsc
,
552 struct ceph_mds_request
*req
,
555 req
->r_tid
= ++mdsc
->last_tid
;
557 ceph_reserve_caps(mdsc
, &req
->r_caps_reservation
,
559 dout("__register_request %p tid %lld\n", req
, req
->r_tid
);
560 ceph_mdsc_get_request(req
);
561 __insert_request(mdsc
, req
);
563 req
->r_uid
= current_fsuid();
564 req
->r_gid
= current_fsgid();
567 struct ceph_inode_info
*ci
= ceph_inode(dir
);
569 spin_lock(&ci
->i_unsafe_lock
);
570 req
->r_unsafe_dir
= dir
;
571 list_add_tail(&req
->r_unsafe_dir_item
, &ci
->i_unsafe_dirops
);
572 spin_unlock(&ci
->i_unsafe_lock
);
576 static void __unregister_request(struct ceph_mds_client
*mdsc
,
577 struct ceph_mds_request
*req
)
579 dout("__unregister_request %p tid %lld\n", req
, req
->r_tid
);
580 rb_erase(&req
->r_node
, &mdsc
->request_tree
);
581 RB_CLEAR_NODE(&req
->r_node
);
583 if (req
->r_unsafe_dir
) {
584 struct ceph_inode_info
*ci
= ceph_inode(req
->r_unsafe_dir
);
586 spin_lock(&ci
->i_unsafe_lock
);
587 list_del_init(&req
->r_unsafe_dir_item
);
588 spin_unlock(&ci
->i_unsafe_lock
);
591 ceph_mdsc_put_request(req
);
595 * Choose mds to send request to next. If there is a hint set in the
596 * request (e.g., due to a prior forward hint from the mds), use that.
597 * Otherwise, consult frag tree and/or caps to identify the
598 * appropriate mds. If all else fails, choose randomly.
600 * Called under mdsc->mutex.
602 struct dentry
*get_nonsnap_parent(struct dentry
*dentry
)
604 while (!IS_ROOT(dentry
) && ceph_snap(dentry
->d_inode
) != CEPH_NOSNAP
)
605 dentry
= dentry
->d_parent
;
609 static int __choose_mds(struct ceph_mds_client
*mdsc
,
610 struct ceph_mds_request
*req
)
613 struct ceph_inode_info
*ci
;
614 struct ceph_cap
*cap
;
615 int mode
= req
->r_direct_mode
;
617 u32 hash
= req
->r_direct_hash
;
618 bool is_hash
= req
->r_direct_is_hash
;
621 * is there a specific mds we should try? ignore hint if we have
622 * no session and the mds is not up (active or recovering).
624 if (req
->r_resend_mds
>= 0 &&
625 (__have_session(mdsc
, req
->r_resend_mds
) ||
626 ceph_mdsmap_get_state(mdsc
->mdsmap
, req
->r_resend_mds
) > 0)) {
627 dout("choose_mds using resend_mds mds%d\n",
629 return req
->r_resend_mds
;
632 if (mode
== USE_RANDOM_MDS
)
637 inode
= req
->r_inode
;
638 } else if (req
->r_dentry
) {
639 struct inode
*dir
= req
->r_dentry
->d_parent
->d_inode
;
641 if (dir
->i_sb
!= mdsc
->fsc
->sb
) {
643 inode
= req
->r_dentry
->d_inode
;
644 } else if (ceph_snap(dir
) != CEPH_NOSNAP
) {
645 /* direct snapped/virtual snapdir requests
646 * based on parent dir inode */
648 get_nonsnap_parent(req
->r_dentry
->d_parent
);
650 dout("__choose_mds using nonsnap parent %p\n", inode
);
651 } else if (req
->r_dentry
->d_inode
) {
653 inode
= req
->r_dentry
->d_inode
;
657 hash
= req
->r_dentry
->d_name
.hash
;
662 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode
, (int)is_hash
,
666 ci
= ceph_inode(inode
);
668 if (is_hash
&& S_ISDIR(inode
->i_mode
)) {
669 struct ceph_inode_frag frag
;
672 ceph_choose_frag(ci
, hash
, &frag
, &found
);
674 if (mode
== USE_ANY_MDS
&& frag
.ndist
> 0) {
677 /* choose a random replica */
678 get_random_bytes(&r
, 1);
681 dout("choose_mds %p %llx.%llx "
682 "frag %u mds%d (%d/%d)\n",
683 inode
, ceph_vinop(inode
),
689 /* since this file/dir wasn't known to be
690 * replicated, then we want to look for the
691 * authoritative mds. */
694 /* choose auth mds */
696 dout("choose_mds %p %llx.%llx "
697 "frag %u mds%d (auth)\n",
698 inode
, ceph_vinop(inode
), frag
.frag
, mds
);
704 spin_lock(&inode
->i_lock
);
706 if (mode
== USE_AUTH_MDS
)
707 cap
= ci
->i_auth_cap
;
708 if (!cap
&& !RB_EMPTY_ROOT(&ci
->i_caps
))
709 cap
= rb_entry(rb_first(&ci
->i_caps
), struct ceph_cap
, ci_node
);
711 spin_unlock(&inode
->i_lock
);
714 mds
= cap
->session
->s_mds
;
715 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
716 inode
, ceph_vinop(inode
), mds
,
717 cap
== ci
->i_auth_cap
? "auth " : "", cap
);
718 spin_unlock(&inode
->i_lock
);
722 mds
= ceph_mdsmap_get_random_mds(mdsc
->mdsmap
);
723 dout("choose_mds chose random mds%d\n", mds
);
731 static struct ceph_msg
*create_session_msg(u32 op
, u64 seq
)
733 struct ceph_msg
*msg
;
734 struct ceph_mds_session_head
*h
;
736 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
), GFP_NOFS
);
738 pr_err("create_session_msg ENOMEM creating msg\n");
741 h
= msg
->front
.iov_base
;
742 h
->op
= cpu_to_le32(op
);
743 h
->seq
= cpu_to_le64(seq
);
748 * send session open request.
750 * called under mdsc->mutex
752 static int __open_session(struct ceph_mds_client
*mdsc
,
753 struct ceph_mds_session
*session
)
755 struct ceph_msg
*msg
;
757 int mds
= session
->s_mds
;
759 /* wait for mds to go active? */
760 mstate
= ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
);
761 dout("open_session to mds%d (%s)\n", mds
,
762 ceph_mds_state_name(mstate
));
763 session
->s_state
= CEPH_MDS_SESSION_OPENING
;
764 session
->s_renew_requested
= jiffies
;
766 /* send connect message */
767 msg
= create_session_msg(CEPH_SESSION_REQUEST_OPEN
, session
->s_seq
);
770 ceph_con_send(&session
->s_con
, msg
);
775 * open sessions for any export targets for the given mds
777 * called under mdsc->mutex
779 static void __open_export_target_sessions(struct ceph_mds_client
*mdsc
,
780 struct ceph_mds_session
*session
)
782 struct ceph_mds_info
*mi
;
783 struct ceph_mds_session
*ts
;
784 int i
, mds
= session
->s_mds
;
787 if (mds
>= mdsc
->mdsmap
->m_max_mds
)
789 mi
= &mdsc
->mdsmap
->m_info
[mds
];
790 dout("open_export_target_sessions for mds%d (%d targets)\n",
791 session
->s_mds
, mi
->num_export_targets
);
793 for (i
= 0; i
< mi
->num_export_targets
; i
++) {
794 target
= mi
->export_targets
[i
];
795 ts
= __ceph_lookup_mds_session(mdsc
, target
);
797 ts
= register_session(mdsc
, target
);
801 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
802 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
803 __open_session(mdsc
, session
);
805 dout(" mds%d target mds%d %p is %s\n", session
->s_mds
,
806 i
, ts
, session_state_name(ts
->s_state
));
807 ceph_put_mds_session(ts
);
811 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client
*mdsc
,
812 struct ceph_mds_session
*session
)
814 mutex_lock(&mdsc
->mutex
);
815 __open_export_target_sessions(mdsc
, session
);
816 mutex_unlock(&mdsc
->mutex
);
824 * Free preallocated cap messages assigned to this session
826 static void cleanup_cap_releases(struct ceph_mds_session
*session
)
828 struct ceph_msg
*msg
;
830 spin_lock(&session
->s_cap_lock
);
831 while (!list_empty(&session
->s_cap_releases
)) {
832 msg
= list_first_entry(&session
->s_cap_releases
,
833 struct ceph_msg
, list_head
);
834 list_del_init(&msg
->list_head
);
837 while (!list_empty(&session
->s_cap_releases_done
)) {
838 msg
= list_first_entry(&session
->s_cap_releases_done
,
839 struct ceph_msg
, list_head
);
840 list_del_init(&msg
->list_head
);
843 spin_unlock(&session
->s_cap_lock
);
847 * Helper to safely iterate over all caps associated with a session, with
848 * special care taken to handle a racing __ceph_remove_cap().
850 * Caller must hold session s_mutex.
852 static int iterate_session_caps(struct ceph_mds_session
*session
,
853 int (*cb
)(struct inode
*, struct ceph_cap
*,
857 struct ceph_cap
*cap
;
858 struct inode
*inode
, *last_inode
= NULL
;
859 struct ceph_cap
*old_cap
= NULL
;
862 dout("iterate_session_caps %p mds%d\n", session
, session
->s_mds
);
863 spin_lock(&session
->s_cap_lock
);
864 p
= session
->s_caps
.next
;
865 while (p
!= &session
->s_caps
) {
866 cap
= list_entry(p
, struct ceph_cap
, session_caps
);
867 inode
= igrab(&cap
->ci
->vfs_inode
);
872 session
->s_cap_iterator
= cap
;
873 spin_unlock(&session
->s_cap_lock
);
880 ceph_put_cap(session
->s_mdsc
, old_cap
);
884 ret
= cb(inode
, cap
, arg
);
887 spin_lock(&session
->s_cap_lock
);
889 if (cap
->ci
== NULL
) {
890 dout("iterate_session_caps finishing cap %p removal\n",
892 BUG_ON(cap
->session
!= session
);
893 list_del_init(&cap
->session_caps
);
894 session
->s_nr_caps
--;
896 old_cap
= cap
; /* put_cap it w/o locks held */
903 session
->s_cap_iterator
= NULL
;
904 spin_unlock(&session
->s_cap_lock
);
909 ceph_put_cap(session
->s_mdsc
, old_cap
);
914 static int remove_session_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
917 struct ceph_inode_info
*ci
= ceph_inode(inode
);
920 dout("removing cap %p, ci is %p, inode is %p\n",
921 cap
, ci
, &ci
->vfs_inode
);
922 spin_lock(&inode
->i_lock
);
923 __ceph_remove_cap(cap
);
924 if (!__ceph_is_any_real_caps(ci
)) {
925 struct ceph_mds_client
*mdsc
=
926 ceph_sb_to_client(inode
->i_sb
)->mdsc
;
928 spin_lock(&mdsc
->cap_dirty_lock
);
929 if (!list_empty(&ci
->i_dirty_item
)) {
930 pr_info(" dropping dirty %s state for %p %lld\n",
931 ceph_cap_string(ci
->i_dirty_caps
),
932 inode
, ceph_ino(inode
));
933 ci
->i_dirty_caps
= 0;
934 list_del_init(&ci
->i_dirty_item
);
937 if (!list_empty(&ci
->i_flushing_item
)) {
938 pr_info(" dropping dirty+flushing %s state for %p %lld\n",
939 ceph_cap_string(ci
->i_flushing_caps
),
940 inode
, ceph_ino(inode
));
941 ci
->i_flushing_caps
= 0;
942 list_del_init(&ci
->i_flushing_item
);
943 mdsc
->num_cap_flushing
--;
946 if (drop
&& ci
->i_wrbuffer_ref
) {
947 pr_info(" dropping dirty data for %p %lld\n",
948 inode
, ceph_ino(inode
));
949 ci
->i_wrbuffer_ref
= 0;
950 ci
->i_wrbuffer_ref_head
= 0;
953 spin_unlock(&mdsc
->cap_dirty_lock
);
955 spin_unlock(&inode
->i_lock
);
962 * caller must hold session s_mutex
964 static void remove_session_caps(struct ceph_mds_session
*session
)
966 dout("remove_session_caps on %p\n", session
);
967 iterate_session_caps(session
, remove_session_caps_cb
, NULL
);
968 BUG_ON(session
->s_nr_caps
> 0);
969 BUG_ON(!list_empty(&session
->s_cap_flushing
));
970 cleanup_cap_releases(session
);
974 * wake up any threads waiting on this session's caps. if the cap is
975 * old (didn't get renewed on the client reconnect), remove it now.
977 * caller must hold s_mutex.
979 static int wake_up_session_cb(struct inode
*inode
, struct ceph_cap
*cap
,
982 struct ceph_inode_info
*ci
= ceph_inode(inode
);
984 wake_up_all(&ci
->i_cap_wq
);
986 spin_lock(&inode
->i_lock
);
987 ci
->i_wanted_max_size
= 0;
988 ci
->i_requested_max_size
= 0;
989 spin_unlock(&inode
->i_lock
);
994 static void wake_up_session_caps(struct ceph_mds_session
*session
,
997 dout("wake_up_session_caps %p mds%d\n", session
, session
->s_mds
);
998 iterate_session_caps(session
, wake_up_session_cb
,
999 (void *)(unsigned long)reconnect
);
1003 * Send periodic message to MDS renewing all currently held caps. The
1004 * ack will reset the expiration for all caps from this session.
1006 * caller holds s_mutex
1008 static int send_renew_caps(struct ceph_mds_client
*mdsc
,
1009 struct ceph_mds_session
*session
)
1011 struct ceph_msg
*msg
;
1014 if (time_after_eq(jiffies
, session
->s_cap_ttl
) &&
1015 time_after_eq(session
->s_cap_ttl
, session
->s_renew_requested
))
1016 pr_info("mds%d caps stale\n", session
->s_mds
);
1017 session
->s_renew_requested
= jiffies
;
1019 /* do not try to renew caps until a recovering mds has reconnected
1020 * with its clients. */
1021 state
= ceph_mdsmap_get_state(mdsc
->mdsmap
, session
->s_mds
);
1022 if (state
< CEPH_MDS_STATE_RECONNECT
) {
1023 dout("send_renew_caps ignoring mds%d (%s)\n",
1024 session
->s_mds
, ceph_mds_state_name(state
));
1028 dout("send_renew_caps to mds%d (%s)\n", session
->s_mds
,
1029 ceph_mds_state_name(state
));
1030 msg
= create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS
,
1031 ++session
->s_renew_seq
);
1034 ceph_con_send(&session
->s_con
, msg
);
1039 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1041 * Called under session->s_mutex
1043 static void renewed_caps(struct ceph_mds_client
*mdsc
,
1044 struct ceph_mds_session
*session
, int is_renew
)
1049 spin_lock(&session
->s_cap_lock
);
1050 was_stale
= is_renew
&& (session
->s_cap_ttl
== 0 ||
1051 time_after_eq(jiffies
, session
->s_cap_ttl
));
1053 session
->s_cap_ttl
= session
->s_renew_requested
+
1054 mdsc
->mdsmap
->m_session_timeout
*HZ
;
1057 if (time_before(jiffies
, session
->s_cap_ttl
)) {
1058 pr_info("mds%d caps renewed\n", session
->s_mds
);
1061 pr_info("mds%d caps still stale\n", session
->s_mds
);
1064 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1065 session
->s_mds
, session
->s_cap_ttl
, was_stale
? "stale" : "fresh",
1066 time_before(jiffies
, session
->s_cap_ttl
) ? "stale" : "fresh");
1067 spin_unlock(&session
->s_cap_lock
);
1070 wake_up_session_caps(session
, 0);
1074 * send a session close request
1076 static int request_close_session(struct ceph_mds_client
*mdsc
,
1077 struct ceph_mds_session
*session
)
1079 struct ceph_msg
*msg
;
1081 dout("request_close_session mds%d state %s seq %lld\n",
1082 session
->s_mds
, session_state_name(session
->s_state
),
1084 msg
= create_session_msg(CEPH_SESSION_REQUEST_CLOSE
, session
->s_seq
);
1087 ceph_con_send(&session
->s_con
, msg
);
1092 * Called with s_mutex held.
1094 static int __close_session(struct ceph_mds_client
*mdsc
,
1095 struct ceph_mds_session
*session
)
1097 if (session
->s_state
>= CEPH_MDS_SESSION_CLOSING
)
1099 session
->s_state
= CEPH_MDS_SESSION_CLOSING
;
1100 return request_close_session(mdsc
, session
);
1104 * Trim old(er) caps.
1106 * Because we can't cache an inode without one or more caps, we do
1107 * this indirectly: if a cap is unused, we prune its aliases, at which
1108 * point the inode will hopefully get dropped to.
1110 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1111 * memory pressure from the MDS, though, so it needn't be perfect.
1113 static int trim_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
, void *arg
)
1115 struct ceph_mds_session
*session
= arg
;
1116 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1117 int used
, oissued
, mine
;
1119 if (session
->s_trim_caps
<= 0)
1122 spin_lock(&inode
->i_lock
);
1123 mine
= cap
->issued
| cap
->implemented
;
1124 used
= __ceph_caps_used(ci
);
1125 oissued
= __ceph_caps_issued_other(ci
, cap
);
1127 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n",
1128 inode
, cap
, ceph_cap_string(mine
), ceph_cap_string(oissued
),
1129 ceph_cap_string(used
));
1130 if (ci
->i_dirty_caps
)
1131 goto out
; /* dirty caps */
1132 if ((used
& ~oissued
) & mine
)
1133 goto out
; /* we need these caps */
1135 session
->s_trim_caps
--;
1137 /* we aren't the only cap.. just remove us */
1138 __ceph_remove_cap(cap
);
1140 /* try to drop referring dentries */
1141 spin_unlock(&inode
->i_lock
);
1142 d_prune_aliases(inode
);
1143 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1144 inode
, cap
, atomic_read(&inode
->i_count
));
1149 spin_unlock(&inode
->i_lock
);
1154 * Trim session cap count down to some max number.
1156 static int trim_caps(struct ceph_mds_client
*mdsc
,
1157 struct ceph_mds_session
*session
,
1160 int trim_caps
= session
->s_nr_caps
- max_caps
;
1162 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1163 session
->s_mds
, session
->s_nr_caps
, max_caps
, trim_caps
);
1164 if (trim_caps
> 0) {
1165 session
->s_trim_caps
= trim_caps
;
1166 iterate_session_caps(session
, trim_caps_cb
, session
);
1167 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1168 session
->s_mds
, session
->s_nr_caps
, max_caps
,
1169 trim_caps
- session
->s_trim_caps
);
1170 session
->s_trim_caps
= 0;
1176 * Allocate cap_release messages. If there is a partially full message
1177 * in the queue, try to allocate enough to cover it's remainder, so that
1178 * we can send it immediately.
1180 * Called under s_mutex.
1182 int ceph_add_cap_releases(struct ceph_mds_client
*mdsc
,
1183 struct ceph_mds_session
*session
)
1185 struct ceph_msg
*msg
, *partial
= NULL
;
1186 struct ceph_mds_cap_release
*head
;
1188 int extra
= mdsc
->fsc
->mount_options
->cap_release_safety
;
1191 dout("add_cap_releases %p mds%d extra %d\n", session
, session
->s_mds
,
1194 spin_lock(&session
->s_cap_lock
);
1196 if (!list_empty(&session
->s_cap_releases
)) {
1197 msg
= list_first_entry(&session
->s_cap_releases
,
1200 head
= msg
->front
.iov_base
;
1201 num
= le32_to_cpu(head
->num
);
1203 dout(" partial %p with (%d/%d)\n", msg
, num
,
1204 (int)CEPH_CAPS_PER_RELEASE
);
1205 extra
+= CEPH_CAPS_PER_RELEASE
- num
;
1209 while (session
->s_num_cap_releases
< session
->s_nr_caps
+ extra
) {
1210 spin_unlock(&session
->s_cap_lock
);
1211 msg
= ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE
, PAGE_CACHE_SIZE
,
1215 dout("add_cap_releases %p msg %p now %d\n", session
, msg
,
1216 (int)msg
->front
.iov_len
);
1217 head
= msg
->front
.iov_base
;
1218 head
->num
= cpu_to_le32(0);
1219 msg
->front
.iov_len
= sizeof(*head
);
1220 spin_lock(&session
->s_cap_lock
);
1221 list_add(&msg
->list_head
, &session
->s_cap_releases
);
1222 session
->s_num_cap_releases
+= CEPH_CAPS_PER_RELEASE
;
1226 head
= partial
->front
.iov_base
;
1227 num
= le32_to_cpu(head
->num
);
1228 dout(" queueing partial %p with %d/%d\n", partial
, num
,
1229 (int)CEPH_CAPS_PER_RELEASE
);
1230 list_move_tail(&partial
->list_head
,
1231 &session
->s_cap_releases_done
);
1232 session
->s_num_cap_releases
-= CEPH_CAPS_PER_RELEASE
- num
;
1235 spin_unlock(&session
->s_cap_lock
);
1241 * flush all dirty inode data to disk.
1243 * returns true if we've flushed through want_flush_seq
1245 static int check_cap_flush(struct ceph_mds_client
*mdsc
, u64 want_flush_seq
)
1249 dout("check_cap_flush want %lld\n", want_flush_seq
);
1250 mutex_lock(&mdsc
->mutex
);
1251 for (mds
= 0; ret
&& mds
< mdsc
->max_sessions
; mds
++) {
1252 struct ceph_mds_session
*session
= mdsc
->sessions
[mds
];
1256 get_session(session
);
1257 mutex_unlock(&mdsc
->mutex
);
1259 mutex_lock(&session
->s_mutex
);
1260 if (!list_empty(&session
->s_cap_flushing
)) {
1261 struct ceph_inode_info
*ci
=
1262 list_entry(session
->s_cap_flushing
.next
,
1263 struct ceph_inode_info
,
1265 struct inode
*inode
= &ci
->vfs_inode
;
1267 spin_lock(&inode
->i_lock
);
1268 if (ci
->i_cap_flush_seq
<= want_flush_seq
) {
1269 dout("check_cap_flush still flushing %p "
1270 "seq %lld <= %lld to mds%d\n", inode
,
1271 ci
->i_cap_flush_seq
, want_flush_seq
,
1275 spin_unlock(&inode
->i_lock
);
1277 mutex_unlock(&session
->s_mutex
);
1278 ceph_put_mds_session(session
);
1282 mutex_lock(&mdsc
->mutex
);
1285 mutex_unlock(&mdsc
->mutex
);
1286 dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq
);
1291 * called under s_mutex
1293 void ceph_send_cap_releases(struct ceph_mds_client
*mdsc
,
1294 struct ceph_mds_session
*session
)
1296 struct ceph_msg
*msg
;
1298 dout("send_cap_releases mds%d\n", session
->s_mds
);
1299 spin_lock(&session
->s_cap_lock
);
1300 while (!list_empty(&session
->s_cap_releases_done
)) {
1301 msg
= list_first_entry(&session
->s_cap_releases_done
,
1302 struct ceph_msg
, list_head
);
1303 list_del_init(&msg
->list_head
);
1304 spin_unlock(&session
->s_cap_lock
);
1305 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1306 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1307 ceph_con_send(&session
->s_con
, msg
);
1308 spin_lock(&session
->s_cap_lock
);
1310 spin_unlock(&session
->s_cap_lock
);
1313 static void discard_cap_releases(struct ceph_mds_client
*mdsc
,
1314 struct ceph_mds_session
*session
)
1316 struct ceph_msg
*msg
;
1317 struct ceph_mds_cap_release
*head
;
1320 dout("discard_cap_releases mds%d\n", session
->s_mds
);
1321 spin_lock(&session
->s_cap_lock
);
1323 /* zero out the in-progress message */
1324 msg
= list_first_entry(&session
->s_cap_releases
,
1325 struct ceph_msg
, list_head
);
1326 head
= msg
->front
.iov_base
;
1327 num
= le32_to_cpu(head
->num
);
1328 dout("discard_cap_releases mds%d %p %u\n", session
->s_mds
, msg
, num
);
1329 head
->num
= cpu_to_le32(0);
1330 session
->s_num_cap_releases
+= num
;
1332 /* requeue completed messages */
1333 while (!list_empty(&session
->s_cap_releases_done
)) {
1334 msg
= list_first_entry(&session
->s_cap_releases_done
,
1335 struct ceph_msg
, list_head
);
1336 list_del_init(&msg
->list_head
);
1338 head
= msg
->front
.iov_base
;
1339 num
= le32_to_cpu(head
->num
);
1340 dout("discard_cap_releases mds%d %p %u\n", session
->s_mds
, msg
,
1342 session
->s_num_cap_releases
+= num
;
1343 head
->num
= cpu_to_le32(0);
1344 msg
->front
.iov_len
= sizeof(*head
);
1345 list_add(&msg
->list_head
, &session
->s_cap_releases
);
1348 spin_unlock(&session
->s_cap_lock
);
1356 * Create an mds request.
1358 struct ceph_mds_request
*
1359 ceph_mdsc_create_request(struct ceph_mds_client
*mdsc
, int op
, int mode
)
1361 struct ceph_mds_request
*req
= kzalloc(sizeof(*req
), GFP_NOFS
);
1364 return ERR_PTR(-ENOMEM
);
1366 mutex_init(&req
->r_fill_mutex
);
1368 req
->r_started
= jiffies
;
1369 req
->r_resend_mds
= -1;
1370 INIT_LIST_HEAD(&req
->r_unsafe_dir_item
);
1372 kref_init(&req
->r_kref
);
1373 INIT_LIST_HEAD(&req
->r_wait
);
1374 init_completion(&req
->r_completion
);
1375 init_completion(&req
->r_safe_completion
);
1376 INIT_LIST_HEAD(&req
->r_unsafe_item
);
1379 req
->r_direct_mode
= mode
;
1384 * return oldest (lowest) request, tid in request tree, 0 if none.
1386 * called under mdsc->mutex.
1388 static struct ceph_mds_request
*__get_oldest_req(struct ceph_mds_client
*mdsc
)
1390 if (RB_EMPTY_ROOT(&mdsc
->request_tree
))
1392 return rb_entry(rb_first(&mdsc
->request_tree
),
1393 struct ceph_mds_request
, r_node
);
1396 static u64
__get_oldest_tid(struct ceph_mds_client
*mdsc
)
1398 struct ceph_mds_request
*req
= __get_oldest_req(mdsc
);
1406 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1407 * on build_path_from_dentry in fs/cifs/dir.c.
1409 * If @stop_on_nosnap, generate path relative to the first non-snapped
1412 * Encode hidden .snap dirs as a double /, i.e.
1413 * foo/.snap/bar -> foo//bar
1415 char *ceph_mdsc_build_path(struct dentry
*dentry
, int *plen
, u64
*base
,
1418 struct dentry
*temp
;
1423 return ERR_PTR(-EINVAL
);
1427 for (temp
= dentry
; !IS_ROOT(temp
);) {
1428 struct inode
*inode
= temp
->d_inode
;
1429 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
)
1430 len
++; /* slash only */
1431 else if (stop_on_nosnap
&& inode
&&
1432 ceph_snap(inode
) == CEPH_NOSNAP
)
1435 len
+= 1 + temp
->d_name
.len
;
1436 temp
= temp
->d_parent
;
1438 pr_err("build_path corrupt dentry %p\n", dentry
);
1439 return ERR_PTR(-EINVAL
);
1443 len
--; /* no leading '/' */
1445 path
= kmalloc(len
+1, GFP_NOFS
);
1447 return ERR_PTR(-ENOMEM
);
1449 path
[pos
] = 0; /* trailing null */
1450 for (temp
= dentry
; !IS_ROOT(temp
) && pos
!= 0; ) {
1451 struct inode
*inode
= temp
->d_inode
;
1453 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
) {
1454 dout("build_path path+%d: %p SNAPDIR\n",
1456 } else if (stop_on_nosnap
&& inode
&&
1457 ceph_snap(inode
) == CEPH_NOSNAP
) {
1460 pos
-= temp
->d_name
.len
;
1463 strncpy(path
+ pos
, temp
->d_name
.name
,
1468 temp
= temp
->d_parent
;
1470 pr_err("build_path corrupt dentry\n");
1472 return ERR_PTR(-EINVAL
);
1476 pr_err("build_path did not end path lookup where "
1477 "expected, namelen is %d, pos is %d\n", len
, pos
);
1478 /* presumably this is only possible if racing with a
1479 rename of one of the parent directories (we can not
1480 lock the dentries above us to prevent this, but
1481 retrying should be harmless) */
1486 *base
= ceph_ino(temp
->d_inode
);
1488 dout("build_path on %p %d built %llx '%.*s'\n",
1489 dentry
, atomic_read(&dentry
->d_count
), *base
, len
, path
);
1493 static int build_dentry_path(struct dentry
*dentry
,
1494 const char **ppath
, int *ppathlen
, u64
*pino
,
1499 if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_NOSNAP
) {
1500 *pino
= ceph_ino(dentry
->d_parent
->d_inode
);
1501 *ppath
= dentry
->d_name
.name
;
1502 *ppathlen
= dentry
->d_name
.len
;
1505 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1507 return PTR_ERR(path
);
1513 static int build_inode_path(struct inode
*inode
,
1514 const char **ppath
, int *ppathlen
, u64
*pino
,
1517 struct dentry
*dentry
;
1520 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
1521 *pino
= ceph_ino(inode
);
1525 dentry
= d_find_alias(inode
);
1526 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1529 return PTR_ERR(path
);
1536 * request arguments may be specified via an inode *, a dentry *, or
1537 * an explicit ino+path.
1539 static int set_request_path_attr(struct inode
*rinode
, struct dentry
*rdentry
,
1540 const char *rpath
, u64 rino
,
1541 const char **ppath
, int *pathlen
,
1542 u64
*ino
, int *freepath
)
1547 r
= build_inode_path(rinode
, ppath
, pathlen
, ino
, freepath
);
1548 dout(" inode %p %llx.%llx\n", rinode
, ceph_ino(rinode
),
1550 } else if (rdentry
) {
1551 r
= build_dentry_path(rdentry
, ppath
, pathlen
, ino
, freepath
);
1552 dout(" dentry %p %llx/%.*s\n", rdentry
, *ino
, *pathlen
,
1557 *pathlen
= strlen(rpath
);
1558 dout(" path %.*s\n", *pathlen
, rpath
);
1565 * called under mdsc->mutex
1567 static struct ceph_msg
*create_request_message(struct ceph_mds_client
*mdsc
,
1568 struct ceph_mds_request
*req
,
1571 struct ceph_msg
*msg
;
1572 struct ceph_mds_request_head
*head
;
1573 const char *path1
= NULL
;
1574 const char *path2
= NULL
;
1575 u64 ino1
= 0, ino2
= 0;
1576 int pathlen1
= 0, pathlen2
= 0;
1577 int freepath1
= 0, freepath2
= 0;
1583 ret
= set_request_path_attr(req
->r_inode
, req
->r_dentry
,
1584 req
->r_path1
, req
->r_ino1
.ino
,
1585 &path1
, &pathlen1
, &ino1
, &freepath1
);
1591 ret
= set_request_path_attr(NULL
, req
->r_old_dentry
,
1592 req
->r_path2
, req
->r_ino2
.ino
,
1593 &path2
, &pathlen2
, &ino2
, &freepath2
);
1599 len
= sizeof(*head
) +
1600 pathlen1
+ pathlen2
+ 2*(1 + sizeof(u32
) + sizeof(u64
));
1602 /* calculate (max) length for cap releases */
1603 len
+= sizeof(struct ceph_mds_request_release
) *
1604 (!!req
->r_inode_drop
+ !!req
->r_dentry_drop
+
1605 !!req
->r_old_inode_drop
+ !!req
->r_old_dentry_drop
);
1606 if (req
->r_dentry_drop
)
1607 len
+= req
->r_dentry
->d_name
.len
;
1608 if (req
->r_old_dentry_drop
)
1609 len
+= req
->r_old_dentry
->d_name
.len
;
1611 msg
= ceph_msg_new(CEPH_MSG_CLIENT_REQUEST
, len
, GFP_NOFS
);
1613 msg
= ERR_PTR(-ENOMEM
);
1617 msg
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
1619 head
= msg
->front
.iov_base
;
1620 p
= msg
->front
.iov_base
+ sizeof(*head
);
1621 end
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1623 head
->mdsmap_epoch
= cpu_to_le32(mdsc
->mdsmap
->m_epoch
);
1624 head
->op
= cpu_to_le32(req
->r_op
);
1625 head
->caller_uid
= cpu_to_le32(req
->r_uid
);
1626 head
->caller_gid
= cpu_to_le32(req
->r_gid
);
1627 head
->args
= req
->r_args
;
1629 ceph_encode_filepath(&p
, end
, ino1
, path1
);
1630 ceph_encode_filepath(&p
, end
, ino2
, path2
);
1632 /* make note of release offset, in case we need to replay */
1633 req
->r_request_release_offset
= p
- msg
->front
.iov_base
;
1637 if (req
->r_inode_drop
)
1638 releases
+= ceph_encode_inode_release(&p
,
1639 req
->r_inode
? req
->r_inode
: req
->r_dentry
->d_inode
,
1640 mds
, req
->r_inode_drop
, req
->r_inode_unless
, 0);
1641 if (req
->r_dentry_drop
)
1642 releases
+= ceph_encode_dentry_release(&p
, req
->r_dentry
,
1643 mds
, req
->r_dentry_drop
, req
->r_dentry_unless
);
1644 if (req
->r_old_dentry_drop
)
1645 releases
+= ceph_encode_dentry_release(&p
, req
->r_old_dentry
,
1646 mds
, req
->r_old_dentry_drop
, req
->r_old_dentry_unless
);
1647 if (req
->r_old_inode_drop
)
1648 releases
+= ceph_encode_inode_release(&p
,
1649 req
->r_old_dentry
->d_inode
,
1650 mds
, req
->r_old_inode_drop
, req
->r_old_inode_unless
, 0);
1651 head
->num_releases
= cpu_to_le16(releases
);
1654 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1655 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1657 msg
->pages
= req
->r_pages
;
1658 msg
->nr_pages
= req
->r_num_pages
;
1659 msg
->hdr
.data_len
= cpu_to_le32(req
->r_data_len
);
1660 msg
->hdr
.data_off
= cpu_to_le16(0);
1664 kfree((char *)path2
);
1667 kfree((char *)path1
);
1673 * called under mdsc->mutex if error, under no mutex if
1676 static void complete_request(struct ceph_mds_client
*mdsc
,
1677 struct ceph_mds_request
*req
)
1679 if (req
->r_callback
)
1680 req
->r_callback(mdsc
, req
);
1682 complete_all(&req
->r_completion
);
1686 * called under mdsc->mutex
1688 static int __prepare_send_request(struct ceph_mds_client
*mdsc
,
1689 struct ceph_mds_request
*req
,
1692 struct ceph_mds_request_head
*rhead
;
1693 struct ceph_msg
*msg
;
1699 struct ceph_cap
*cap
=
1700 ceph_get_cap_for_mds(ceph_inode(req
->r_inode
), mds
);
1703 req
->r_sent_on_mseq
= cap
->mseq
;
1705 req
->r_sent_on_mseq
= -1;
1707 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req
,
1708 req
->r_tid
, ceph_mds_op_name(req
->r_op
), req
->r_attempts
);
1710 if (req
->r_got_unsafe
) {
1712 * Replay. Do not regenerate message (and rebuild
1713 * paths, etc.); just use the original message.
1714 * Rebuilding paths will break for renames because
1715 * d_move mangles the src name.
1717 msg
= req
->r_request
;
1718 rhead
= msg
->front
.iov_base
;
1720 flags
= le32_to_cpu(rhead
->flags
);
1721 flags
|= CEPH_MDS_FLAG_REPLAY
;
1722 rhead
->flags
= cpu_to_le32(flags
);
1724 if (req
->r_target_inode
)
1725 rhead
->ino
= cpu_to_le64(ceph_ino(req
->r_target_inode
));
1727 rhead
->num_retry
= req
->r_attempts
- 1;
1729 /* remove cap/dentry releases from message */
1730 rhead
->num_releases
= 0;
1731 msg
->hdr
.front_len
= cpu_to_le32(req
->r_request_release_offset
);
1732 msg
->front
.iov_len
= req
->r_request_release_offset
;
1736 if (req
->r_request
) {
1737 ceph_msg_put(req
->r_request
);
1738 req
->r_request
= NULL
;
1740 msg
= create_request_message(mdsc
, req
, mds
);
1742 req
->r_err
= PTR_ERR(msg
);
1743 complete_request(mdsc
, req
);
1744 return PTR_ERR(msg
);
1746 req
->r_request
= msg
;
1748 rhead
= msg
->front
.iov_base
;
1749 rhead
->oldest_client_tid
= cpu_to_le64(__get_oldest_tid(mdsc
));
1750 if (req
->r_got_unsafe
)
1751 flags
|= CEPH_MDS_FLAG_REPLAY
;
1752 if (req
->r_locked_dir
)
1753 flags
|= CEPH_MDS_FLAG_WANT_DENTRY
;
1754 rhead
->flags
= cpu_to_le32(flags
);
1755 rhead
->num_fwd
= req
->r_num_fwd
;
1756 rhead
->num_retry
= req
->r_attempts
- 1;
1759 dout(" r_locked_dir = %p\n", req
->r_locked_dir
);
1764 * send request, or put it on the appropriate wait list.
1766 static int __do_request(struct ceph_mds_client
*mdsc
,
1767 struct ceph_mds_request
*req
)
1769 struct ceph_mds_session
*session
= NULL
;
1773 if (req
->r_err
|| req
->r_got_result
)
1776 if (req
->r_timeout
&&
1777 time_after_eq(jiffies
, req
->r_started
+ req
->r_timeout
)) {
1778 dout("do_request timed out\n");
1783 mds
= __choose_mds(mdsc
, req
);
1785 ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) < CEPH_MDS_STATE_ACTIVE
) {
1786 dout("do_request no mds or not active, waiting for map\n");
1787 list_add(&req
->r_wait
, &mdsc
->waiting_for_map
);
1791 /* get, open session */
1792 session
= __ceph_lookup_mds_session(mdsc
, mds
);
1794 session
= register_session(mdsc
, mds
);
1795 if (IS_ERR(session
)) {
1796 err
= PTR_ERR(session
);
1800 dout("do_request mds%d session %p state %s\n", mds
, session
,
1801 session_state_name(session
->s_state
));
1802 if (session
->s_state
!= CEPH_MDS_SESSION_OPEN
&&
1803 session
->s_state
!= CEPH_MDS_SESSION_HUNG
) {
1804 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
1805 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
1806 __open_session(mdsc
, session
);
1807 list_add(&req
->r_wait
, &session
->s_waiting
);
1812 req
->r_session
= get_session(session
);
1813 req
->r_resend_mds
= -1; /* forget any previous mds hint */
1815 if (req
->r_request_started
== 0) /* note request start time */
1816 req
->r_request_started
= jiffies
;
1818 err
= __prepare_send_request(mdsc
, req
, mds
);
1820 ceph_msg_get(req
->r_request
);
1821 ceph_con_send(&session
->s_con
, req
->r_request
);
1825 ceph_put_mds_session(session
);
1831 complete_request(mdsc
, req
);
1836 * called under mdsc->mutex
1838 static void __wake_requests(struct ceph_mds_client
*mdsc
,
1839 struct list_head
*head
)
1841 struct ceph_mds_request
*req
, *nreq
;
1843 list_for_each_entry_safe(req
, nreq
, head
, r_wait
) {
1844 list_del_init(&req
->r_wait
);
1845 __do_request(mdsc
, req
);
1850 * Wake up threads with requests pending for @mds, so that they can
1851 * resubmit their requests to a possibly different mds.
1853 static void kick_requests(struct ceph_mds_client
*mdsc
, int mds
)
1855 struct ceph_mds_request
*req
;
1858 dout("kick_requests mds%d\n", mds
);
1859 for (p
= rb_first(&mdsc
->request_tree
); p
; p
= rb_next(p
)) {
1860 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
1861 if (req
->r_got_unsafe
)
1863 if (req
->r_session
&&
1864 req
->r_session
->s_mds
== mds
) {
1865 dout(" kicking tid %llu\n", req
->r_tid
);
1866 put_request_session(req
);
1867 __do_request(mdsc
, req
);
1872 void ceph_mdsc_submit_request(struct ceph_mds_client
*mdsc
,
1873 struct ceph_mds_request
*req
)
1875 dout("submit_request on %p\n", req
);
1876 mutex_lock(&mdsc
->mutex
);
1877 __register_request(mdsc
, req
, NULL
);
1878 __do_request(mdsc
, req
);
1879 mutex_unlock(&mdsc
->mutex
);
1883 * Synchrously perform an mds request. Take care of all of the
1884 * session setup, forwarding, retry details.
1886 int ceph_mdsc_do_request(struct ceph_mds_client
*mdsc
,
1888 struct ceph_mds_request
*req
)
1892 dout("do_request on %p\n", req
);
1894 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
1896 ceph_get_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
1897 if (req
->r_locked_dir
)
1898 ceph_get_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
1899 if (req
->r_old_dentry
)
1901 ceph_inode(req
->r_old_dentry
->d_parent
->d_inode
),
1905 mutex_lock(&mdsc
->mutex
);
1906 __register_request(mdsc
, req
, dir
);
1907 __do_request(mdsc
, req
);
1911 __unregister_request(mdsc
, req
);
1912 dout("do_request early error %d\n", err
);
1917 mutex_unlock(&mdsc
->mutex
);
1918 dout("do_request waiting\n");
1919 if (req
->r_timeout
) {
1920 err
= (long)wait_for_completion_killable_timeout(
1921 &req
->r_completion
, req
->r_timeout
);
1925 err
= wait_for_completion_killable(&req
->r_completion
);
1927 dout("do_request waited, got %d\n", err
);
1928 mutex_lock(&mdsc
->mutex
);
1930 /* only abort if we didn't race with a real reply */
1931 if (req
->r_got_result
) {
1932 err
= le32_to_cpu(req
->r_reply_info
.head
->result
);
1933 } else if (err
< 0) {
1934 dout("aborted request %lld with %d\n", req
->r_tid
, err
);
1937 * ensure we aren't running concurrently with
1938 * ceph_fill_trace or ceph_readdir_prepopulate, which
1939 * rely on locks (dir mutex) held by our caller.
1941 mutex_lock(&req
->r_fill_mutex
);
1943 req
->r_aborted
= true;
1944 mutex_unlock(&req
->r_fill_mutex
);
1946 if (req
->r_locked_dir
&&
1947 (req
->r_op
& CEPH_MDS_OP_WRITE
))
1948 ceph_invalidate_dir_request(req
);
1954 mutex_unlock(&mdsc
->mutex
);
1955 dout("do_request %p done, result %d\n", req
, err
);
1960 * Invalidate dir I_COMPLETE, dentry lease state on an aborted MDS
1961 * namespace request.
1963 void ceph_invalidate_dir_request(struct ceph_mds_request
*req
)
1965 struct inode
*inode
= req
->r_locked_dir
;
1966 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1968 dout("invalidate_dir_request %p (I_COMPLETE, lease(s))\n", inode
);
1969 spin_lock(&inode
->i_lock
);
1970 ci
->i_ceph_flags
&= ~CEPH_I_COMPLETE
;
1971 ci
->i_release_count
++;
1972 spin_unlock(&inode
->i_lock
);
1975 ceph_invalidate_dentry_lease(req
->r_dentry
);
1976 if (req
->r_old_dentry
)
1977 ceph_invalidate_dentry_lease(req
->r_old_dentry
);
1983 * We take the session mutex and parse and process the reply immediately.
1984 * This preserves the logical ordering of replies, capabilities, etc., sent
1985 * by the MDS as they are applied to our local cache.
1987 static void handle_reply(struct ceph_mds_session
*session
, struct ceph_msg
*msg
)
1989 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
1990 struct ceph_mds_request
*req
;
1991 struct ceph_mds_reply_head
*head
= msg
->front
.iov_base
;
1992 struct ceph_mds_reply_info_parsed
*rinfo
; /* parsed reply info */
1995 int mds
= session
->s_mds
;
1997 if (msg
->front
.iov_len
< sizeof(*head
)) {
1998 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2003 /* get request, session */
2004 tid
= le64_to_cpu(msg
->hdr
.tid
);
2005 mutex_lock(&mdsc
->mutex
);
2006 req
= __lookup_request(mdsc
, tid
);
2008 dout("handle_reply on unknown tid %llu\n", tid
);
2009 mutex_unlock(&mdsc
->mutex
);
2012 dout("handle_reply %p\n", req
);
2014 /* correct session? */
2015 if (req
->r_session
!= session
) {
2016 pr_err("mdsc_handle_reply got %llu on session mds%d"
2017 " not mds%d\n", tid
, session
->s_mds
,
2018 req
->r_session
? req
->r_session
->s_mds
: -1);
2019 mutex_unlock(&mdsc
->mutex
);
2024 if ((req
->r_got_unsafe
&& !head
->safe
) ||
2025 (req
->r_got_safe
&& head
->safe
)) {
2026 pr_warning("got a dup %s reply on %llu from mds%d\n",
2027 head
->safe
? "safe" : "unsafe", tid
, mds
);
2028 mutex_unlock(&mdsc
->mutex
);
2031 if (req
->r_got_safe
&& !head
->safe
) {
2032 pr_warning("got unsafe after safe on %llu from mds%d\n",
2034 mutex_unlock(&mdsc
->mutex
);
2038 result
= le32_to_cpu(head
->result
);
2042 * if we're not talking to the authority, send to them
2043 * if the authority has changed while we weren't looking,
2044 * send to new authority
2045 * Otherwise we just have to return an ESTALE
2047 if (result
== -ESTALE
) {
2048 dout("got ESTALE on request %llu", req
->r_tid
);
2049 if (!req
->r_inode
) {
2050 /* do nothing; not an authority problem */
2051 } else if (req
->r_direct_mode
!= USE_AUTH_MDS
) {
2052 dout("not using auth, setting for that now");
2053 req
->r_direct_mode
= USE_AUTH_MDS
;
2054 __do_request(mdsc
, req
);
2055 mutex_unlock(&mdsc
->mutex
);
2058 struct ceph_inode_info
*ci
= ceph_inode(req
->r_inode
);
2059 struct ceph_cap
*cap
=
2060 ceph_get_cap_for_mds(ci
, req
->r_mds
);;
2062 dout("already using auth");
2063 if ((!cap
|| cap
!= ci
->i_auth_cap
) ||
2064 (cap
->mseq
!= req
->r_sent_on_mseq
)) {
2065 dout("but cap changed, so resending");
2066 __do_request(mdsc
, req
);
2067 mutex_unlock(&mdsc
->mutex
);
2071 dout("have to return ESTALE on request %llu", req
->r_tid
);
2076 req
->r_got_safe
= true;
2077 __unregister_request(mdsc
, req
);
2078 complete_all(&req
->r_safe_completion
);
2080 if (req
->r_got_unsafe
) {
2082 * We already handled the unsafe response, now do the
2083 * cleanup. No need to examine the response; the MDS
2084 * doesn't include any result info in the safe
2085 * response. And even if it did, there is nothing
2086 * useful we could do with a revised return value.
2088 dout("got safe reply %llu, mds%d\n", tid
, mds
);
2089 list_del_init(&req
->r_unsafe_item
);
2091 /* last unsafe request during umount? */
2092 if (mdsc
->stopping
&& !__get_oldest_req(mdsc
))
2093 complete_all(&mdsc
->safe_umount_waiters
);
2094 mutex_unlock(&mdsc
->mutex
);
2098 req
->r_got_unsafe
= true;
2099 list_add_tail(&req
->r_unsafe_item
, &req
->r_session
->s_unsafe
);
2102 dout("handle_reply tid %lld result %d\n", tid
, result
);
2103 rinfo
= &req
->r_reply_info
;
2104 err
= parse_reply_info(msg
, rinfo
);
2105 mutex_unlock(&mdsc
->mutex
);
2107 mutex_lock(&session
->s_mutex
);
2109 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds
, tid
);
2115 if (rinfo
->snapblob_len
) {
2116 down_write(&mdsc
->snap_rwsem
);
2117 ceph_update_snap_trace(mdsc
, rinfo
->snapblob
,
2118 rinfo
->snapblob
+ rinfo
->snapblob_len
,
2119 le32_to_cpu(head
->op
) == CEPH_MDS_OP_RMSNAP
);
2120 downgrade_write(&mdsc
->snap_rwsem
);
2122 down_read(&mdsc
->snap_rwsem
);
2125 /* insert trace into our cache */
2126 mutex_lock(&req
->r_fill_mutex
);
2127 err
= ceph_fill_trace(mdsc
->fsc
->sb
, req
, req
->r_session
);
2129 if (result
== 0 && req
->r_op
!= CEPH_MDS_OP_GETFILELOCK
&&
2131 ceph_readdir_prepopulate(req
, req
->r_session
);
2132 ceph_unreserve_caps(mdsc
, &req
->r_caps_reservation
);
2134 mutex_unlock(&req
->r_fill_mutex
);
2136 up_read(&mdsc
->snap_rwsem
);
2138 mutex_lock(&mdsc
->mutex
);
2139 if (!req
->r_aborted
) {
2145 req
->r_got_result
= true;
2148 dout("reply arrived after request %lld was aborted\n", tid
);
2150 mutex_unlock(&mdsc
->mutex
);
2152 ceph_add_cap_releases(mdsc
, req
->r_session
);
2153 mutex_unlock(&session
->s_mutex
);
2155 /* kick calling process */
2156 complete_request(mdsc
, req
);
2158 ceph_mdsc_put_request(req
);
2165 * handle mds notification that our request has been forwarded.
2167 static void handle_forward(struct ceph_mds_client
*mdsc
,
2168 struct ceph_mds_session
*session
,
2169 struct ceph_msg
*msg
)
2171 struct ceph_mds_request
*req
;
2172 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
2176 void *p
= msg
->front
.iov_base
;
2177 void *end
= p
+ msg
->front
.iov_len
;
2179 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
2180 next_mds
= ceph_decode_32(&p
);
2181 fwd_seq
= ceph_decode_32(&p
);
2183 mutex_lock(&mdsc
->mutex
);
2184 req
= __lookup_request(mdsc
, tid
);
2186 dout("forward tid %llu to mds%d - req dne\n", tid
, next_mds
);
2187 goto out
; /* dup reply? */
2190 if (req
->r_aborted
) {
2191 dout("forward tid %llu aborted, unregistering\n", tid
);
2192 __unregister_request(mdsc
, req
);
2193 } else if (fwd_seq
<= req
->r_num_fwd
) {
2194 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2195 tid
, next_mds
, req
->r_num_fwd
, fwd_seq
);
2197 /* resend. forward race not possible; mds would drop */
2198 dout("forward tid %llu to mds%d (we resend)\n", tid
, next_mds
);
2200 BUG_ON(req
->r_got_result
);
2201 req
->r_num_fwd
= fwd_seq
;
2202 req
->r_resend_mds
= next_mds
;
2203 put_request_session(req
);
2204 __do_request(mdsc
, req
);
2206 ceph_mdsc_put_request(req
);
2208 mutex_unlock(&mdsc
->mutex
);
2212 pr_err("mdsc_handle_forward decode error err=%d\n", err
);
2216 * handle a mds session control message
2218 static void handle_session(struct ceph_mds_session
*session
,
2219 struct ceph_msg
*msg
)
2221 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2224 int mds
= session
->s_mds
;
2225 struct ceph_mds_session_head
*h
= msg
->front
.iov_base
;
2229 if (msg
->front
.iov_len
!= sizeof(*h
))
2231 op
= le32_to_cpu(h
->op
);
2232 seq
= le64_to_cpu(h
->seq
);
2234 mutex_lock(&mdsc
->mutex
);
2235 if (op
== CEPH_SESSION_CLOSE
)
2236 __unregister_session(mdsc
, session
);
2237 /* FIXME: this ttl calculation is generous */
2238 session
->s_ttl
= jiffies
+ HZ
*mdsc
->mdsmap
->m_session_autoclose
;
2239 mutex_unlock(&mdsc
->mutex
);
2241 mutex_lock(&session
->s_mutex
);
2243 dout("handle_session mds%d %s %p state %s seq %llu\n",
2244 mds
, ceph_session_op_name(op
), session
,
2245 session_state_name(session
->s_state
), seq
);
2247 if (session
->s_state
== CEPH_MDS_SESSION_HUNG
) {
2248 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2249 pr_info("mds%d came back\n", session
->s_mds
);
2253 case CEPH_SESSION_OPEN
:
2254 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2255 pr_info("mds%d reconnect success\n", session
->s_mds
);
2256 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2257 renewed_caps(mdsc
, session
, 0);
2260 __close_session(mdsc
, session
);
2263 case CEPH_SESSION_RENEWCAPS
:
2264 if (session
->s_renew_seq
== seq
)
2265 renewed_caps(mdsc
, session
, 1);
2268 case CEPH_SESSION_CLOSE
:
2269 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2270 pr_info("mds%d reconnect denied\n", session
->s_mds
);
2271 remove_session_caps(session
);
2272 wake
= 1; /* for good measure */
2273 wake_up_all(&mdsc
->session_close_wq
);
2274 kick_requests(mdsc
, mds
);
2277 case CEPH_SESSION_STALE
:
2278 pr_info("mds%d caps went stale, renewing\n",
2280 spin_lock(&session
->s_cap_lock
);
2281 session
->s_cap_gen
++;
2282 session
->s_cap_ttl
= 0;
2283 spin_unlock(&session
->s_cap_lock
);
2284 send_renew_caps(mdsc
, session
);
2287 case CEPH_SESSION_RECALL_STATE
:
2288 trim_caps(mdsc
, session
, le32_to_cpu(h
->max_caps
));
2292 pr_err("mdsc_handle_session bad op %d mds%d\n", op
, mds
);
2296 mutex_unlock(&session
->s_mutex
);
2298 mutex_lock(&mdsc
->mutex
);
2299 __wake_requests(mdsc
, &session
->s_waiting
);
2300 mutex_unlock(&mdsc
->mutex
);
2305 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds
,
2306 (int)msg
->front
.iov_len
);
2313 * called under session->mutex.
2315 static void replay_unsafe_requests(struct ceph_mds_client
*mdsc
,
2316 struct ceph_mds_session
*session
)
2318 struct ceph_mds_request
*req
, *nreq
;
2321 dout("replay_unsafe_requests mds%d\n", session
->s_mds
);
2323 mutex_lock(&mdsc
->mutex
);
2324 list_for_each_entry_safe(req
, nreq
, &session
->s_unsafe
, r_unsafe_item
) {
2325 err
= __prepare_send_request(mdsc
, req
, session
->s_mds
);
2327 ceph_msg_get(req
->r_request
);
2328 ceph_con_send(&session
->s_con
, req
->r_request
);
2331 mutex_unlock(&mdsc
->mutex
);
2335 * Encode information about a cap for a reconnect with the MDS.
2337 static int encode_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
2341 struct ceph_mds_cap_reconnect v2
;
2342 struct ceph_mds_cap_reconnect_v1 v1
;
2345 struct ceph_inode_info
*ci
;
2346 struct ceph_reconnect_state
*recon_state
= arg
;
2347 struct ceph_pagelist
*pagelist
= recon_state
->pagelist
;
2351 struct dentry
*dentry
;
2355 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2356 inode
, ceph_vinop(inode
), cap
, cap
->cap_id
,
2357 ceph_cap_string(cap
->issued
));
2358 err
= ceph_pagelist_encode_64(pagelist
, ceph_ino(inode
));
2362 dentry
= d_find_alias(inode
);
2364 path
= ceph_mdsc_build_path(dentry
, &pathlen
, &pathbase
, 0);
2366 err
= PTR_ERR(path
);
2373 err
= ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
2377 spin_lock(&inode
->i_lock
);
2378 cap
->seq
= 0; /* reset cap seq */
2379 cap
->issue_seq
= 0; /* and issue_seq */
2381 if (recon_state
->flock
) {
2382 rec
.v2
.cap_id
= cpu_to_le64(cap
->cap_id
);
2383 rec
.v2
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2384 rec
.v2
.issued
= cpu_to_le32(cap
->issued
);
2385 rec
.v2
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2386 rec
.v2
.pathbase
= cpu_to_le64(pathbase
);
2387 rec
.v2
.flock_len
= 0;
2388 reclen
= sizeof(rec
.v2
);
2390 rec
.v1
.cap_id
= cpu_to_le64(cap
->cap_id
);
2391 rec
.v1
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2392 rec
.v1
.issued
= cpu_to_le32(cap
->issued
);
2393 rec
.v1
.size
= cpu_to_le64(inode
->i_size
);
2394 ceph_encode_timespec(&rec
.v1
.mtime
, &inode
->i_mtime
);
2395 ceph_encode_timespec(&rec
.v1
.atime
, &inode
->i_atime
);
2396 rec
.v1
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2397 rec
.v1
.pathbase
= cpu_to_le64(pathbase
);
2398 reclen
= sizeof(rec
.v1
);
2400 spin_unlock(&inode
->i_lock
);
2402 if (recon_state
->flock
) {
2403 int num_fcntl_locks
, num_flock_locks
;
2404 struct ceph_pagelist_cursor trunc_point
;
2406 ceph_pagelist_set_cursor(pagelist
, &trunc_point
);
2409 ceph_count_locks(inode
, &num_fcntl_locks
,
2411 rec
.v2
.flock_len
= (2*sizeof(u32
) +
2412 (num_fcntl_locks
+num_flock_locks
) *
2413 sizeof(struct ceph_filelock
));
2416 /* pre-alloc pagelist */
2417 ceph_pagelist_truncate(pagelist
, &trunc_point
);
2418 err
= ceph_pagelist_append(pagelist
, &rec
, reclen
);
2420 err
= ceph_pagelist_reserve(pagelist
,
2426 err
= ceph_encode_locks(inode
,
2432 } while (err
== -ENOSPC
);
2434 err
= ceph_pagelist_append(pagelist
, &rec
, reclen
);
2446 * If an MDS fails and recovers, clients need to reconnect in order to
2447 * reestablish shared state. This includes all caps issued through
2448 * this session _and_ the snap_realm hierarchy. Because it's not
2449 * clear which snap realms the mds cares about, we send everything we
2450 * know about.. that ensures we'll then get any new info the
2451 * recovering MDS might have.
2453 * This is a relatively heavyweight operation, but it's rare.
2455 * called with mdsc->mutex held.
2457 static void send_mds_reconnect(struct ceph_mds_client
*mdsc
,
2458 struct ceph_mds_session
*session
)
2460 struct ceph_msg
*reply
;
2462 int mds
= session
->s_mds
;
2464 struct ceph_pagelist
*pagelist
;
2465 struct ceph_reconnect_state recon_state
;
2467 pr_info("mds%d reconnect start\n", mds
);
2469 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
2471 goto fail_nopagelist
;
2472 ceph_pagelist_init(pagelist
);
2474 reply
= ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT
, 0, GFP_NOFS
);
2478 mutex_lock(&session
->s_mutex
);
2479 session
->s_state
= CEPH_MDS_SESSION_RECONNECTING
;
2482 ceph_con_open(&session
->s_con
,
2483 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
2485 /* replay unsafe requests */
2486 replay_unsafe_requests(mdsc
, session
);
2488 down_read(&mdsc
->snap_rwsem
);
2490 dout("session %p state %s\n", session
,
2491 session_state_name(session
->s_state
));
2493 /* drop old cap expires; we're about to reestablish that state */
2494 discard_cap_releases(mdsc
, session
);
2496 /* traverse this session's caps */
2497 err
= ceph_pagelist_encode_32(pagelist
, session
->s_nr_caps
);
2501 recon_state
.pagelist
= pagelist
;
2502 recon_state
.flock
= session
->s_con
.peer_features
& CEPH_FEATURE_FLOCK
;
2503 err
= iterate_session_caps(session
, encode_caps_cb
, &recon_state
);
2508 * snaprealms. we provide mds with the ino, seq (version), and
2509 * parent for all of our realms. If the mds has any newer info,
2512 for (p
= rb_first(&mdsc
->snap_realms
); p
; p
= rb_next(p
)) {
2513 struct ceph_snap_realm
*realm
=
2514 rb_entry(p
, struct ceph_snap_realm
, node
);
2515 struct ceph_mds_snaprealm_reconnect sr_rec
;
2517 dout(" adding snap realm %llx seq %lld parent %llx\n",
2518 realm
->ino
, realm
->seq
, realm
->parent_ino
);
2519 sr_rec
.ino
= cpu_to_le64(realm
->ino
);
2520 sr_rec
.seq
= cpu_to_le64(realm
->seq
);
2521 sr_rec
.parent
= cpu_to_le64(realm
->parent_ino
);
2522 err
= ceph_pagelist_append(pagelist
, &sr_rec
, sizeof(sr_rec
));
2527 reply
->pagelist
= pagelist
;
2528 if (recon_state
.flock
)
2529 reply
->hdr
.version
= cpu_to_le16(2);
2530 reply
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
2531 reply
->nr_pages
= calc_pages_for(0, pagelist
->length
);
2532 ceph_con_send(&session
->s_con
, reply
);
2534 mutex_unlock(&session
->s_mutex
);
2536 mutex_lock(&mdsc
->mutex
);
2537 __wake_requests(mdsc
, &session
->s_waiting
);
2538 mutex_unlock(&mdsc
->mutex
);
2540 up_read(&mdsc
->snap_rwsem
);
2544 ceph_msg_put(reply
);
2545 up_read(&mdsc
->snap_rwsem
);
2546 mutex_unlock(&session
->s_mutex
);
2548 ceph_pagelist_release(pagelist
);
2551 pr_err("error %d preparing reconnect for mds%d\n", err
, mds
);
2557 * compare old and new mdsmaps, kicking requests
2558 * and closing out old connections as necessary
2560 * called under mdsc->mutex.
2562 static void check_new_map(struct ceph_mds_client
*mdsc
,
2563 struct ceph_mdsmap
*newmap
,
2564 struct ceph_mdsmap
*oldmap
)
2567 int oldstate
, newstate
;
2568 struct ceph_mds_session
*s
;
2570 dout("check_new_map new %u old %u\n",
2571 newmap
->m_epoch
, oldmap
->m_epoch
);
2573 for (i
= 0; i
< oldmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
2574 if (mdsc
->sessions
[i
] == NULL
)
2576 s
= mdsc
->sessions
[i
];
2577 oldstate
= ceph_mdsmap_get_state(oldmap
, i
);
2578 newstate
= ceph_mdsmap_get_state(newmap
, i
);
2580 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
2581 i
, ceph_mds_state_name(oldstate
),
2582 ceph_mdsmap_is_laggy(oldmap
, i
) ? " (laggy)" : "",
2583 ceph_mds_state_name(newstate
),
2584 ceph_mdsmap_is_laggy(newmap
, i
) ? " (laggy)" : "",
2585 session_state_name(s
->s_state
));
2587 if (memcmp(ceph_mdsmap_get_addr(oldmap
, i
),
2588 ceph_mdsmap_get_addr(newmap
, i
),
2589 sizeof(struct ceph_entity_addr
))) {
2590 if (s
->s_state
== CEPH_MDS_SESSION_OPENING
) {
2591 /* the session never opened, just close it
2593 __wake_requests(mdsc
, &s
->s_waiting
);
2594 __unregister_session(mdsc
, s
);
2597 mutex_unlock(&mdsc
->mutex
);
2598 mutex_lock(&s
->s_mutex
);
2599 mutex_lock(&mdsc
->mutex
);
2600 ceph_con_close(&s
->s_con
);
2601 mutex_unlock(&s
->s_mutex
);
2602 s
->s_state
= CEPH_MDS_SESSION_RESTARTING
;
2605 /* kick any requests waiting on the recovering mds */
2606 kick_requests(mdsc
, i
);
2607 } else if (oldstate
== newstate
) {
2608 continue; /* nothing new with this mds */
2614 if (s
->s_state
== CEPH_MDS_SESSION_RESTARTING
&&
2615 newstate
>= CEPH_MDS_STATE_RECONNECT
) {
2616 mutex_unlock(&mdsc
->mutex
);
2617 send_mds_reconnect(mdsc
, s
);
2618 mutex_lock(&mdsc
->mutex
);
2622 * kick request on any mds that has gone active.
2624 if (oldstate
< CEPH_MDS_STATE_ACTIVE
&&
2625 newstate
>= CEPH_MDS_STATE_ACTIVE
) {
2626 if (oldstate
!= CEPH_MDS_STATE_CREATING
&&
2627 oldstate
!= CEPH_MDS_STATE_STARTING
)
2628 pr_info("mds%d recovery completed\n", s
->s_mds
);
2629 kick_requests(mdsc
, i
);
2630 ceph_kick_flushing_caps(mdsc
, s
);
2631 wake_up_session_caps(s
, 1);
2635 for (i
= 0; i
< newmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
2636 s
= mdsc
->sessions
[i
];
2639 if (!ceph_mdsmap_is_laggy(newmap
, i
))
2641 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
2642 s
->s_state
== CEPH_MDS_SESSION_HUNG
||
2643 s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
2644 dout(" connecting to export targets of laggy mds%d\n",
2646 __open_export_target_sessions(mdsc
, s
);
2658 * caller must hold session s_mutex, dentry->d_lock
2660 void __ceph_mdsc_drop_dentry_lease(struct dentry
*dentry
)
2662 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
2664 ceph_put_mds_session(di
->lease_session
);
2665 di
->lease_session
= NULL
;
2668 static void handle_lease(struct ceph_mds_client
*mdsc
,
2669 struct ceph_mds_session
*session
,
2670 struct ceph_msg
*msg
)
2672 struct super_block
*sb
= mdsc
->fsc
->sb
;
2673 struct inode
*inode
;
2674 struct ceph_inode_info
*ci
;
2675 struct dentry
*parent
, *dentry
;
2676 struct ceph_dentry_info
*di
;
2677 int mds
= session
->s_mds
;
2678 struct ceph_mds_lease
*h
= msg
->front
.iov_base
;
2680 struct ceph_vino vino
;
2685 dout("handle_lease from mds%d\n", mds
);
2688 if (msg
->front
.iov_len
< sizeof(*h
) + sizeof(u32
))
2690 vino
.ino
= le64_to_cpu(h
->ino
);
2691 vino
.snap
= CEPH_NOSNAP
;
2692 mask
= le16_to_cpu(h
->mask
);
2693 seq
= le32_to_cpu(h
->seq
);
2694 dname
.name
= (void *)h
+ sizeof(*h
) + sizeof(u32
);
2695 dname
.len
= msg
->front
.iov_len
- sizeof(*h
) - sizeof(u32
);
2696 if (dname
.len
!= get_unaligned_le32(h
+1))
2699 mutex_lock(&session
->s_mutex
);
2703 inode
= ceph_find_inode(sb
, vino
);
2704 dout("handle_lease %s, mask %d, ino %llx %p %.*s\n",
2705 ceph_lease_op_name(h
->action
), mask
, vino
.ino
, inode
,
2706 dname
.len
, dname
.name
);
2707 if (inode
== NULL
) {
2708 dout("handle_lease no inode %llx\n", vino
.ino
);
2711 ci
= ceph_inode(inode
);
2714 parent
= d_find_alias(inode
);
2716 dout("no parent dentry on inode %p\n", inode
);
2718 goto release
; /* hrm... */
2720 dname
.hash
= full_name_hash(dname
.name
, dname
.len
);
2721 dentry
= d_lookup(parent
, &dname
);
2726 spin_lock(&dentry
->d_lock
);
2727 di
= ceph_dentry(dentry
);
2728 switch (h
->action
) {
2729 case CEPH_MDS_LEASE_REVOKE
:
2730 if (di
&& di
->lease_session
== session
) {
2731 if (ceph_seq_cmp(di
->lease_seq
, seq
) > 0)
2732 h
->seq
= cpu_to_le32(di
->lease_seq
);
2733 __ceph_mdsc_drop_dentry_lease(dentry
);
2738 case CEPH_MDS_LEASE_RENEW
:
2739 if (di
&& di
->lease_session
== session
&&
2740 di
->lease_gen
== session
->s_cap_gen
&&
2741 di
->lease_renew_from
&&
2742 di
->lease_renew_after
== 0) {
2743 unsigned long duration
=
2744 le32_to_cpu(h
->duration_ms
) * HZ
/ 1000;
2746 di
->lease_seq
= seq
;
2747 dentry
->d_time
= di
->lease_renew_from
+ duration
;
2748 di
->lease_renew_after
= di
->lease_renew_from
+
2750 di
->lease_renew_from
= 0;
2754 spin_unlock(&dentry
->d_lock
);
2761 /* let's just reuse the same message */
2762 h
->action
= CEPH_MDS_LEASE_REVOKE_ACK
;
2764 ceph_con_send(&session
->s_con
, msg
);
2768 mutex_unlock(&session
->s_mutex
);
2772 pr_err("corrupt lease message\n");
2776 void ceph_mdsc_lease_send_msg(struct ceph_mds_session
*session
,
2777 struct inode
*inode
,
2778 struct dentry
*dentry
, char action
,
2781 struct ceph_msg
*msg
;
2782 struct ceph_mds_lease
*lease
;
2783 int len
= sizeof(*lease
) + sizeof(u32
);
2786 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
2787 inode
, dentry
, ceph_lease_op_name(action
), session
->s_mds
);
2788 dnamelen
= dentry
->d_name
.len
;
2791 msg
= ceph_msg_new(CEPH_MSG_CLIENT_LEASE
, len
, GFP_NOFS
);
2794 lease
= msg
->front
.iov_base
;
2795 lease
->action
= action
;
2796 lease
->mask
= cpu_to_le16(1);
2797 lease
->ino
= cpu_to_le64(ceph_vino(inode
).ino
);
2798 lease
->first
= lease
->last
= cpu_to_le64(ceph_vino(inode
).snap
);
2799 lease
->seq
= cpu_to_le32(seq
);
2800 put_unaligned_le32(dnamelen
, lease
+ 1);
2801 memcpy((void *)(lease
+ 1) + 4, dentry
->d_name
.name
, dnamelen
);
2804 * if this is a preemptive lease RELEASE, no need to
2805 * flush request stream, since the actual request will
2808 msg
->more_to_follow
= (action
== CEPH_MDS_LEASE_RELEASE
);
2810 ceph_con_send(&session
->s_con
, msg
);
2814 * Preemptively release a lease we expect to invalidate anyway.
2815 * Pass @inode always, @dentry is optional.
2817 void ceph_mdsc_lease_release(struct ceph_mds_client
*mdsc
, struct inode
*inode
,
2818 struct dentry
*dentry
, int mask
)
2820 struct ceph_dentry_info
*di
;
2821 struct ceph_mds_session
*session
;
2824 BUG_ON(inode
== NULL
);
2825 BUG_ON(dentry
== NULL
);
2828 /* is dentry lease valid? */
2829 spin_lock(&dentry
->d_lock
);
2830 di
= ceph_dentry(dentry
);
2831 if (!di
|| !di
->lease_session
||
2832 di
->lease_session
->s_mds
< 0 ||
2833 di
->lease_gen
!= di
->lease_session
->s_cap_gen
||
2834 !time_before(jiffies
, dentry
->d_time
)) {
2835 dout("lease_release inode %p dentry %p -- "
2837 inode
, dentry
, mask
);
2838 spin_unlock(&dentry
->d_lock
);
2842 /* we do have a lease on this dentry; note mds and seq */
2843 session
= ceph_get_mds_session(di
->lease_session
);
2844 seq
= di
->lease_seq
;
2845 __ceph_mdsc_drop_dentry_lease(dentry
);
2846 spin_unlock(&dentry
->d_lock
);
2848 dout("lease_release inode %p dentry %p mask %d to mds%d\n",
2849 inode
, dentry
, mask
, session
->s_mds
);
2850 ceph_mdsc_lease_send_msg(session
, inode
, dentry
,
2851 CEPH_MDS_LEASE_RELEASE
, seq
);
2852 ceph_put_mds_session(session
);
2856 * drop all leases (and dentry refs) in preparation for umount
2858 static void drop_leases(struct ceph_mds_client
*mdsc
)
2862 dout("drop_leases\n");
2863 mutex_lock(&mdsc
->mutex
);
2864 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
2865 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
2868 mutex_unlock(&mdsc
->mutex
);
2869 mutex_lock(&s
->s_mutex
);
2870 mutex_unlock(&s
->s_mutex
);
2871 ceph_put_mds_session(s
);
2872 mutex_lock(&mdsc
->mutex
);
2874 mutex_unlock(&mdsc
->mutex
);
2880 * delayed work -- periodically trim expired leases, renew caps with mds
2882 static void schedule_delayed(struct ceph_mds_client
*mdsc
)
2885 unsigned hz
= round_jiffies_relative(HZ
* delay
);
2886 schedule_delayed_work(&mdsc
->delayed_work
, hz
);
2889 static void delayed_work(struct work_struct
*work
)
2892 struct ceph_mds_client
*mdsc
=
2893 container_of(work
, struct ceph_mds_client
, delayed_work
.work
);
2897 dout("mdsc delayed_work\n");
2898 ceph_check_delayed_caps(mdsc
);
2900 mutex_lock(&mdsc
->mutex
);
2901 renew_interval
= mdsc
->mdsmap
->m_session_timeout
>> 2;
2902 renew_caps
= time_after_eq(jiffies
, HZ
*renew_interval
+
2903 mdsc
->last_renew_caps
);
2905 mdsc
->last_renew_caps
= jiffies
;
2907 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
2908 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
2911 if (s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
2912 dout("resending session close request for mds%d\n",
2914 request_close_session(mdsc
, s
);
2915 ceph_put_mds_session(s
);
2918 if (s
->s_ttl
&& time_after(jiffies
, s
->s_ttl
)) {
2919 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
) {
2920 s
->s_state
= CEPH_MDS_SESSION_HUNG
;
2921 pr_info("mds%d hung\n", s
->s_mds
);
2924 if (s
->s_state
< CEPH_MDS_SESSION_OPEN
) {
2925 /* this mds is failed or recovering, just wait */
2926 ceph_put_mds_session(s
);
2929 mutex_unlock(&mdsc
->mutex
);
2931 mutex_lock(&s
->s_mutex
);
2933 send_renew_caps(mdsc
, s
);
2935 ceph_con_keepalive(&s
->s_con
);
2936 ceph_add_cap_releases(mdsc
, s
);
2937 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
2938 s
->s_state
== CEPH_MDS_SESSION_HUNG
)
2939 ceph_send_cap_releases(mdsc
, s
);
2940 mutex_unlock(&s
->s_mutex
);
2941 ceph_put_mds_session(s
);
2943 mutex_lock(&mdsc
->mutex
);
2945 mutex_unlock(&mdsc
->mutex
);
2947 schedule_delayed(mdsc
);
2950 int ceph_mdsc_init(struct ceph_fs_client
*fsc
)
2953 struct ceph_mds_client
*mdsc
;
2955 mdsc
= kzalloc(sizeof(struct ceph_mds_client
), GFP_NOFS
);
2960 mutex_init(&mdsc
->mutex
);
2961 mdsc
->mdsmap
= kzalloc(sizeof(*mdsc
->mdsmap
), GFP_NOFS
);
2962 if (mdsc
->mdsmap
== NULL
)
2965 init_completion(&mdsc
->safe_umount_waiters
);
2966 init_waitqueue_head(&mdsc
->session_close_wq
);
2967 INIT_LIST_HEAD(&mdsc
->waiting_for_map
);
2968 mdsc
->sessions
= NULL
;
2969 mdsc
->max_sessions
= 0;
2971 init_rwsem(&mdsc
->snap_rwsem
);
2972 mdsc
->snap_realms
= RB_ROOT
;
2973 INIT_LIST_HEAD(&mdsc
->snap_empty
);
2974 spin_lock_init(&mdsc
->snap_empty_lock
);
2976 mdsc
->request_tree
= RB_ROOT
;
2977 INIT_DELAYED_WORK(&mdsc
->delayed_work
, delayed_work
);
2978 mdsc
->last_renew_caps
= jiffies
;
2979 INIT_LIST_HEAD(&mdsc
->cap_delay_list
);
2980 spin_lock_init(&mdsc
->cap_delay_lock
);
2981 INIT_LIST_HEAD(&mdsc
->snap_flush_list
);
2982 spin_lock_init(&mdsc
->snap_flush_lock
);
2983 mdsc
->cap_flush_seq
= 0;
2984 INIT_LIST_HEAD(&mdsc
->cap_dirty
);
2985 mdsc
->num_cap_flushing
= 0;
2986 spin_lock_init(&mdsc
->cap_dirty_lock
);
2987 init_waitqueue_head(&mdsc
->cap_flushing_wq
);
2988 spin_lock_init(&mdsc
->dentry_lru_lock
);
2989 INIT_LIST_HEAD(&mdsc
->dentry_lru
);
2991 ceph_caps_init(mdsc
);
2992 ceph_adjust_min_caps(mdsc
, fsc
->min_caps
);
2998 * Wait for safe replies on open mds requests. If we time out, drop
2999 * all requests from the tree to avoid dangling dentry refs.
3001 static void wait_requests(struct ceph_mds_client
*mdsc
)
3003 struct ceph_mds_request
*req
;
3004 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
3006 mutex_lock(&mdsc
->mutex
);
3007 if (__get_oldest_req(mdsc
)) {
3008 mutex_unlock(&mdsc
->mutex
);
3010 dout("wait_requests waiting for requests\n");
3011 wait_for_completion_timeout(&mdsc
->safe_umount_waiters
,
3012 fsc
->client
->options
->mount_timeout
* HZ
);
3014 /* tear down remaining requests */
3015 mutex_lock(&mdsc
->mutex
);
3016 while ((req
= __get_oldest_req(mdsc
))) {
3017 dout("wait_requests timed out on tid %llu\n",
3019 __unregister_request(mdsc
, req
);
3022 mutex_unlock(&mdsc
->mutex
);
3023 dout("wait_requests done\n");
3027 * called before mount is ro, and before dentries are torn down.
3028 * (hmm, does this still race with new lookups?)
3030 void ceph_mdsc_pre_umount(struct ceph_mds_client
*mdsc
)
3032 dout("pre_umount\n");
3036 ceph_flush_dirty_caps(mdsc
);
3037 wait_requests(mdsc
);
3040 * wait for reply handlers to drop their request refs and
3041 * their inode/dcache refs
3047 * wait for all write mds requests to flush.
3049 static void wait_unsafe_requests(struct ceph_mds_client
*mdsc
, u64 want_tid
)
3051 struct ceph_mds_request
*req
= NULL
, *nextreq
;
3054 mutex_lock(&mdsc
->mutex
);
3055 dout("wait_unsafe_requests want %lld\n", want_tid
);
3057 req
= __get_oldest_req(mdsc
);
3058 while (req
&& req
->r_tid
<= want_tid
) {
3059 /* find next request */
3060 n
= rb_next(&req
->r_node
);
3062 nextreq
= rb_entry(n
, struct ceph_mds_request
, r_node
);
3065 if ((req
->r_op
& CEPH_MDS_OP_WRITE
)) {
3067 ceph_mdsc_get_request(req
);
3069 ceph_mdsc_get_request(nextreq
);
3070 mutex_unlock(&mdsc
->mutex
);
3071 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
3072 req
->r_tid
, want_tid
);
3073 wait_for_completion(&req
->r_safe_completion
);
3074 mutex_lock(&mdsc
->mutex
);
3075 ceph_mdsc_put_request(req
);
3077 break; /* next dne before, so we're done! */
3078 if (RB_EMPTY_NODE(&nextreq
->r_node
)) {
3079 /* next request was removed from tree */
3080 ceph_mdsc_put_request(nextreq
);
3083 ceph_mdsc_put_request(nextreq
); /* won't go away */
3087 mutex_unlock(&mdsc
->mutex
);
3088 dout("wait_unsafe_requests done\n");
3091 void ceph_mdsc_sync(struct ceph_mds_client
*mdsc
)
3093 u64 want_tid
, want_flush
;
3095 if (mdsc
->fsc
->mount_state
== CEPH_MOUNT_SHUTDOWN
)
3099 mutex_lock(&mdsc
->mutex
);
3100 want_tid
= mdsc
->last_tid
;
3101 want_flush
= mdsc
->cap_flush_seq
;
3102 mutex_unlock(&mdsc
->mutex
);
3103 dout("sync want tid %lld flush_seq %lld\n", want_tid
, want_flush
);
3105 ceph_flush_dirty_caps(mdsc
);
3107 wait_unsafe_requests(mdsc
, want_tid
);
3108 wait_event(mdsc
->cap_flushing_wq
, check_cap_flush(mdsc
, want_flush
));
3112 * true if all sessions are closed, or we force unmount
3114 bool done_closing_sessions(struct ceph_mds_client
*mdsc
)
3118 if (mdsc
->fsc
->mount_state
== CEPH_MOUNT_SHUTDOWN
)
3121 mutex_lock(&mdsc
->mutex
);
3122 for (i
= 0; i
< mdsc
->max_sessions
; i
++)
3123 if (mdsc
->sessions
[i
])
3125 mutex_unlock(&mdsc
->mutex
);
3130 * called after sb is ro.
3132 void ceph_mdsc_close_sessions(struct ceph_mds_client
*mdsc
)
3134 struct ceph_mds_session
*session
;
3136 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
3137 unsigned long timeout
= fsc
->client
->options
->mount_timeout
* HZ
;
3139 dout("close_sessions\n");
3141 /* close sessions */
3142 mutex_lock(&mdsc
->mutex
);
3143 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3144 session
= __ceph_lookup_mds_session(mdsc
, i
);
3147 mutex_unlock(&mdsc
->mutex
);
3148 mutex_lock(&session
->s_mutex
);
3149 __close_session(mdsc
, session
);
3150 mutex_unlock(&session
->s_mutex
);
3151 ceph_put_mds_session(session
);
3152 mutex_lock(&mdsc
->mutex
);
3154 mutex_unlock(&mdsc
->mutex
);
3156 dout("waiting for sessions to close\n");
3157 wait_event_timeout(mdsc
->session_close_wq
, done_closing_sessions(mdsc
),
3160 /* tear down remaining sessions */
3161 mutex_lock(&mdsc
->mutex
);
3162 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3163 if (mdsc
->sessions
[i
]) {
3164 session
= get_session(mdsc
->sessions
[i
]);
3165 __unregister_session(mdsc
, session
);
3166 mutex_unlock(&mdsc
->mutex
);
3167 mutex_lock(&session
->s_mutex
);
3168 remove_session_caps(session
);
3169 mutex_unlock(&session
->s_mutex
);
3170 ceph_put_mds_session(session
);
3171 mutex_lock(&mdsc
->mutex
);
3174 WARN_ON(!list_empty(&mdsc
->cap_delay_list
));
3175 mutex_unlock(&mdsc
->mutex
);
3177 ceph_cleanup_empty_realms(mdsc
);
3179 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3184 static void ceph_mdsc_stop(struct ceph_mds_client
*mdsc
)
3187 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3189 ceph_mdsmap_destroy(mdsc
->mdsmap
);
3190 kfree(mdsc
->sessions
);
3191 ceph_caps_finalize(mdsc
);
3194 void ceph_mdsc_destroy(struct ceph_fs_client
*fsc
)
3196 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
3198 ceph_mdsc_stop(mdsc
);
3205 * handle mds map update.
3207 void ceph_mdsc_handle_map(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
3211 void *p
= msg
->front
.iov_base
;
3212 void *end
= p
+ msg
->front
.iov_len
;
3213 struct ceph_mdsmap
*newmap
, *oldmap
;
3214 struct ceph_fsid fsid
;
3217 ceph_decode_need(&p
, end
, sizeof(fsid
)+2*sizeof(u32
), bad
);
3218 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3219 if (ceph_check_fsid(mdsc
->fsc
->client
, &fsid
) < 0)
3221 epoch
= ceph_decode_32(&p
);
3222 maplen
= ceph_decode_32(&p
);
3223 dout("handle_map epoch %u len %d\n", epoch
, (int)maplen
);
3225 /* do we need it? */
3226 ceph_monc_got_mdsmap(&mdsc
->fsc
->client
->monc
, epoch
);
3227 mutex_lock(&mdsc
->mutex
);
3228 if (mdsc
->mdsmap
&& epoch
<= mdsc
->mdsmap
->m_epoch
) {
3229 dout("handle_map epoch %u <= our %u\n",
3230 epoch
, mdsc
->mdsmap
->m_epoch
);
3231 mutex_unlock(&mdsc
->mutex
);
3235 newmap
= ceph_mdsmap_decode(&p
, end
);
3236 if (IS_ERR(newmap
)) {
3237 err
= PTR_ERR(newmap
);
3241 /* swap into place */
3243 oldmap
= mdsc
->mdsmap
;
3244 mdsc
->mdsmap
= newmap
;
3245 check_new_map(mdsc
, newmap
, oldmap
);
3246 ceph_mdsmap_destroy(oldmap
);
3248 mdsc
->mdsmap
= newmap
; /* first mds map */
3250 mdsc
->fsc
->sb
->s_maxbytes
= mdsc
->mdsmap
->m_max_file_size
;
3252 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3254 mutex_unlock(&mdsc
->mutex
);
3255 schedule_delayed(mdsc
);
3259 mutex_unlock(&mdsc
->mutex
);
3261 pr_err("error decoding mdsmap %d\n", err
);
3265 static struct ceph_connection
*con_get(struct ceph_connection
*con
)
3267 struct ceph_mds_session
*s
= con
->private;
3269 if (get_session(s
)) {
3270 dout("mdsc con_get %p ok (%d)\n", s
, atomic_read(&s
->s_ref
));
3273 dout("mdsc con_get %p FAIL\n", s
);
3277 static void con_put(struct ceph_connection
*con
)
3279 struct ceph_mds_session
*s
= con
->private;
3281 ceph_put_mds_session(s
);
3282 dout("mdsc con_put %p (%d)\n", s
, atomic_read(&s
->s_ref
));
3286 * if the client is unresponsive for long enough, the mds will kill
3287 * the session entirely.
3289 static void peer_reset(struct ceph_connection
*con
)
3291 struct ceph_mds_session
*s
= con
->private;
3292 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3294 pr_warning("mds%d closed our session\n", s
->s_mds
);
3295 send_mds_reconnect(mdsc
, s
);
3298 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
3300 struct ceph_mds_session
*s
= con
->private;
3301 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3302 int type
= le16_to_cpu(msg
->hdr
.type
);
3304 mutex_lock(&mdsc
->mutex
);
3305 if (__verify_registered_session(mdsc
, s
) < 0) {
3306 mutex_unlock(&mdsc
->mutex
);
3309 mutex_unlock(&mdsc
->mutex
);
3312 case CEPH_MSG_MDS_MAP
:
3313 ceph_mdsc_handle_map(mdsc
, msg
);
3315 case CEPH_MSG_CLIENT_SESSION
:
3316 handle_session(s
, msg
);
3318 case CEPH_MSG_CLIENT_REPLY
:
3319 handle_reply(s
, msg
);
3321 case CEPH_MSG_CLIENT_REQUEST_FORWARD
:
3322 handle_forward(mdsc
, s
, msg
);
3324 case CEPH_MSG_CLIENT_CAPS
:
3325 ceph_handle_caps(s
, msg
);
3327 case CEPH_MSG_CLIENT_SNAP
:
3328 ceph_handle_snap(mdsc
, s
, msg
);
3330 case CEPH_MSG_CLIENT_LEASE
:
3331 handle_lease(mdsc
, s
, msg
);
3335 pr_err("received unknown message type %d %s\n", type
,
3336 ceph_msg_type_name(type
));
3345 static int get_authorizer(struct ceph_connection
*con
,
3346 void **buf
, int *len
, int *proto
,
3347 void **reply_buf
, int *reply_len
, int force_new
)
3349 struct ceph_mds_session
*s
= con
->private;
3350 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3351 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3354 if (force_new
&& s
->s_authorizer
) {
3355 ac
->ops
->destroy_authorizer(ac
, s
->s_authorizer
);
3356 s
->s_authorizer
= NULL
;
3358 if (s
->s_authorizer
== NULL
) {
3359 if (ac
->ops
->create_authorizer
) {
3360 ret
= ac
->ops
->create_authorizer(
3361 ac
, CEPH_ENTITY_TYPE_MDS
,
3363 &s
->s_authorizer_buf
,
3364 &s
->s_authorizer_buf_len
,
3365 &s
->s_authorizer_reply_buf
,
3366 &s
->s_authorizer_reply_buf_len
);
3372 *proto
= ac
->protocol
;
3373 *buf
= s
->s_authorizer_buf
;
3374 *len
= s
->s_authorizer_buf_len
;
3375 *reply_buf
= s
->s_authorizer_reply_buf
;
3376 *reply_len
= s
->s_authorizer_reply_buf_len
;
3381 static int verify_authorizer_reply(struct ceph_connection
*con
, int len
)
3383 struct ceph_mds_session
*s
= con
->private;
3384 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3385 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3387 return ac
->ops
->verify_authorizer_reply(ac
, s
->s_authorizer
, len
);
3390 static int invalidate_authorizer(struct ceph_connection
*con
)
3392 struct ceph_mds_session
*s
= con
->private;
3393 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3394 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3396 if (ac
->ops
->invalidate_authorizer
)
3397 ac
->ops
->invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
);
3399 return ceph_monc_validate_auth(&mdsc
->fsc
->client
->monc
);
3402 static const struct ceph_connection_operations mds_con_ops
= {
3405 .dispatch
= dispatch
,
3406 .get_authorizer
= get_authorizer
,
3407 .verify_authorizer_reply
= verify_authorizer_reply
,
3408 .invalidate_authorizer
= invalidate_authorizer
,
3409 .peer_reset
= peer_reset
,