4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
64 static int drbd_do_features(struct drbd_tconn
*tconn
);
65 static int drbd_do_auth(struct drbd_tconn
*tconn
);
66 static int drbd_disconnected(struct drbd_conf
*mdev
);
68 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_tconn
*, struct drbd_epoch
*, enum epoch_event
);
69 static int e_end_block(struct drbd_work
*, int);
72 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
79 /* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
83 static struct page
*page_chain_del(struct page
**head
, int n
)
97 tmp
= page_chain_next(page
);
99 break; /* found sufficient pages */
101 /* insufficient pages, don't use any of them. */
106 /* add end of list marker for the returned list */
107 set_page_private(page
, 0);
108 /* actual return value, and adjustment of head */
114 /* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117 static struct page
*page_chain_tail(struct page
*page
, int *len
)
121 while ((tmp
= page_chain_next(page
)))
128 static int page_chain_free(struct page
*page
)
132 page_chain_for_each_safe(page
, tmp
) {
139 static void page_chain_add(struct page
**head
,
140 struct page
*chain_first
, struct page
*chain_last
)
144 tmp
= page_chain_tail(chain_first
, NULL
);
145 BUG_ON(tmp
!= chain_last
);
148 /* add chain to head */
149 set_page_private(chain_last
, (unsigned long)*head
);
153 static struct page
*__drbd_alloc_pages(struct drbd_conf
*mdev
,
156 struct page
*page
= NULL
;
157 struct page
*tmp
= NULL
;
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
162 if (drbd_pp_vacant
>= number
) {
163 spin_lock(&drbd_pp_lock
);
164 page
= page_chain_del(&drbd_pp_pool
, number
);
166 drbd_pp_vacant
-= number
;
167 spin_unlock(&drbd_pp_lock
);
172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
175 for (i
= 0; i
< number
; i
++) {
176 tmp
= alloc_page(GFP_TRY
);
179 set_page_private(tmp
, (unsigned long)page
);
186 /* Not enough pages immediately available this time.
187 * No need to jump around here, drbd_alloc_pages will retry this
188 * function "soon". */
190 tmp
= page_chain_tail(page
, NULL
);
191 spin_lock(&drbd_pp_lock
);
192 page_chain_add(&drbd_pp_pool
, page
, tmp
);
194 spin_unlock(&drbd_pp_lock
);
199 static void reclaim_finished_net_peer_reqs(struct drbd_conf
*mdev
,
200 struct list_head
*to_be_freed
)
202 struct drbd_peer_request
*peer_req
;
203 struct list_head
*le
, *tle
;
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
210 list_for_each_safe(le
, tle
, &mdev
->net_ee
) {
211 peer_req
= list_entry(le
, struct drbd_peer_request
, w
.list
);
212 if (drbd_peer_req_has_active_page(peer_req
))
214 list_move(le
, to_be_freed
);
218 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf
*mdev
)
220 LIST_HEAD(reclaimed
);
221 struct drbd_peer_request
*peer_req
, *t
;
223 spin_lock_irq(&mdev
->tconn
->req_lock
);
224 reclaim_finished_net_peer_reqs(mdev
, &reclaimed
);
225 spin_unlock_irq(&mdev
->tconn
->req_lock
);
227 list_for_each_entry_safe(peer_req
, t
, &reclaimed
, w
.list
)
228 drbd_free_net_peer_req(mdev
, peer_req
);
232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
233 * @mdev: DRBD device.
234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
241 * Returns a page chain linked via page->private.
243 struct page
*drbd_alloc_pages(struct drbd_conf
*mdev
, unsigned int number
,
246 struct page
*page
= NULL
;
251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
254 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
255 mxb
= nc
? nc
->max_buffers
: 1000000;
258 if (atomic_read(&mdev
->pp_in_use
) < mxb
)
259 page
= __drbd_alloc_pages(mdev
, number
);
261 while (page
== NULL
) {
262 prepare_to_wait(&drbd_pp_wait
, &wait
, TASK_INTERRUPTIBLE
);
264 drbd_kick_lo_and_reclaim_net(mdev
);
266 if (atomic_read(&mdev
->pp_in_use
) < mxb
) {
267 page
= __drbd_alloc_pages(mdev
, number
);
275 if (signal_pending(current
)) {
276 dev_warn(DEV
, "drbd_alloc_pages interrupted!\n");
282 finish_wait(&drbd_pp_wait
, &wait
);
285 atomic_add(number
, &mdev
->pp_in_use
);
289 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
293 static void drbd_free_pages(struct drbd_conf
*mdev
, struct page
*page
, int is_net
)
295 atomic_t
*a
= is_net
? &mdev
->pp_in_use_by_net
: &mdev
->pp_in_use
;
301 if (drbd_pp_vacant
> (DRBD_MAX_BIO_SIZE
/PAGE_SIZE
) * minor_count
)
302 i
= page_chain_free(page
);
305 tmp
= page_chain_tail(page
, &i
);
306 spin_lock(&drbd_pp_lock
);
307 page_chain_add(&drbd_pp_pool
, page
, tmp
);
309 spin_unlock(&drbd_pp_lock
);
311 i
= atomic_sub_return(i
, a
);
313 dev_warn(DEV
, "ASSERTION FAILED: %s: %d < 0\n",
314 is_net
? "pp_in_use_by_net" : "pp_in_use", i
);
315 wake_up(&drbd_pp_wait
);
319 You need to hold the req_lock:
320 _drbd_wait_ee_list_empty()
322 You must not have the req_lock:
324 drbd_alloc_peer_req()
325 drbd_free_peer_reqs()
327 drbd_finish_peer_reqs()
329 drbd_wait_ee_list_empty()
332 struct drbd_peer_request
*
333 drbd_alloc_peer_req(struct drbd_conf
*mdev
, u64 id
, sector_t sector
,
334 unsigned int data_size
, gfp_t gfp_mask
) __must_hold(local
)
336 struct drbd_peer_request
*peer_req
;
337 struct page
*page
= NULL
;
338 unsigned nr_pages
= (data_size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
340 if (drbd_insert_fault(mdev
, DRBD_FAULT_AL_EE
))
343 peer_req
= mempool_alloc(drbd_ee_mempool
, gfp_mask
& ~__GFP_HIGHMEM
);
345 if (!(gfp_mask
& __GFP_NOWARN
))
346 dev_err(DEV
, "%s: allocation failed\n", __func__
);
351 page
= drbd_alloc_pages(mdev
, nr_pages
, (gfp_mask
& __GFP_WAIT
));
356 drbd_clear_interval(&peer_req
->i
);
357 peer_req
->i
.size
= data_size
;
358 peer_req
->i
.sector
= sector
;
359 peer_req
->i
.local
= false;
360 peer_req
->i
.waiting
= false;
362 peer_req
->epoch
= NULL
;
363 peer_req
->w
.mdev
= mdev
;
364 peer_req
->pages
= page
;
365 atomic_set(&peer_req
->pending_bios
, 0);
368 * The block_id is opaque to the receiver. It is not endianness
369 * converted, and sent back to the sender unchanged.
371 peer_req
->block_id
= id
;
376 mempool_free(peer_req
, drbd_ee_mempool
);
380 void __drbd_free_peer_req(struct drbd_conf
*mdev
, struct drbd_peer_request
*peer_req
,
383 if (peer_req
->flags
& EE_HAS_DIGEST
)
384 kfree(peer_req
->digest
);
385 drbd_free_pages(mdev
, peer_req
->pages
, is_net
);
386 D_ASSERT(atomic_read(&peer_req
->pending_bios
) == 0);
387 D_ASSERT(drbd_interval_empty(&peer_req
->i
));
388 mempool_free(peer_req
, drbd_ee_mempool
);
391 int drbd_free_peer_reqs(struct drbd_conf
*mdev
, struct list_head
*list
)
393 LIST_HEAD(work_list
);
394 struct drbd_peer_request
*peer_req
, *t
;
396 int is_net
= list
== &mdev
->net_ee
;
398 spin_lock_irq(&mdev
->tconn
->req_lock
);
399 list_splice_init(list
, &work_list
);
400 spin_unlock_irq(&mdev
->tconn
->req_lock
);
402 list_for_each_entry_safe(peer_req
, t
, &work_list
, w
.list
) {
403 __drbd_free_peer_req(mdev
, peer_req
, is_net
);
410 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
412 static int drbd_finish_peer_reqs(struct drbd_conf
*mdev
)
414 LIST_HEAD(work_list
);
415 LIST_HEAD(reclaimed
);
416 struct drbd_peer_request
*peer_req
, *t
;
419 spin_lock_irq(&mdev
->tconn
->req_lock
);
420 reclaim_finished_net_peer_reqs(mdev
, &reclaimed
);
421 list_splice_init(&mdev
->done_ee
, &work_list
);
422 spin_unlock_irq(&mdev
->tconn
->req_lock
);
424 list_for_each_entry_safe(peer_req
, t
, &reclaimed
, w
.list
)
425 drbd_free_net_peer_req(mdev
, peer_req
);
427 /* possible callbacks here:
428 * e_end_block, and e_end_resync_block, e_send_superseded.
429 * all ignore the last argument.
431 list_for_each_entry_safe(peer_req
, t
, &work_list
, w
.list
) {
434 /* list_del not necessary, next/prev members not touched */
435 err2
= peer_req
->w
.cb(&peer_req
->w
, !!err
);
438 drbd_free_peer_req(mdev
, peer_req
);
440 wake_up(&mdev
->ee_wait
);
445 static void _drbd_wait_ee_list_empty(struct drbd_conf
*mdev
,
446 struct list_head
*head
)
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head
)) {
453 prepare_to_wait(&mdev
->ee_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
454 spin_unlock_irq(&mdev
->tconn
->req_lock
);
456 finish_wait(&mdev
->ee_wait
, &wait
);
457 spin_lock_irq(&mdev
->tconn
->req_lock
);
461 static void drbd_wait_ee_list_empty(struct drbd_conf
*mdev
,
462 struct list_head
*head
)
464 spin_lock_irq(&mdev
->tconn
->req_lock
);
465 _drbd_wait_ee_list_empty(mdev
, head
);
466 spin_unlock_irq(&mdev
->tconn
->req_lock
);
469 static int drbd_recv_short(struct socket
*sock
, void *buf
, size_t size
, int flags
)
476 struct msghdr msg
= {
478 .msg_iov
= (struct iovec
*)&iov
,
479 .msg_flags
= (flags
? flags
: MSG_WAITALL
| MSG_NOSIGNAL
)
485 rv
= sock_recvmsg(sock
, &msg
, size
, msg
.msg_flags
);
491 static int drbd_recv(struct drbd_tconn
*tconn
, void *buf
, size_t size
)
495 rv
= drbd_recv_short(tconn
->data
.socket
, buf
, size
, 0);
498 if (rv
== -ECONNRESET
)
499 conn_info(tconn
, "sock was reset by peer\n");
500 else if (rv
!= -ERESTARTSYS
)
501 conn_err(tconn
, "sock_recvmsg returned %d\n", rv
);
502 } else if (rv
== 0) {
503 if (test_bit(DISCONNECT_SENT
, &tconn
->flags
)) {
506 t
= rcu_dereference(tconn
->net_conf
)->ping_timeo
* HZ
/10;
509 t
= wait_event_timeout(tconn
->ping_wait
, tconn
->cstate
< C_WF_REPORT_PARAMS
, t
);
514 conn_info(tconn
, "sock was shut down by peer\n");
518 conn_request_state(tconn
, NS(conn
, C_BROKEN_PIPE
), CS_HARD
);
524 static int drbd_recv_all(struct drbd_tconn
*tconn
, void *buf
, size_t size
)
528 err
= drbd_recv(tconn
, buf
, size
);
537 static int drbd_recv_all_warn(struct drbd_tconn
*tconn
, void *buf
, size_t size
)
541 err
= drbd_recv_all(tconn
, buf
, size
);
542 if (err
&& !signal_pending(current
))
543 conn_warn(tconn
, "short read (expected size %d)\n", (int)size
);
548 * On individual connections, the socket buffer size must be set prior to the
549 * listen(2) or connect(2) calls in order to have it take effect.
550 * This is our wrapper to do so.
552 static void drbd_setbufsize(struct socket
*sock
, unsigned int snd
,
555 /* open coded SO_SNDBUF, SO_RCVBUF */
557 sock
->sk
->sk_sndbuf
= snd
;
558 sock
->sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
561 sock
->sk
->sk_rcvbuf
= rcv
;
562 sock
->sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
566 static struct socket
*drbd_try_connect(struct drbd_tconn
*tconn
)
570 struct sockaddr_in6 src_in6
;
571 struct sockaddr_in6 peer_in6
;
573 int err
, peer_addr_len
, my_addr_len
;
574 int sndbuf_size
, rcvbuf_size
, connect_int
;
575 int disconnect_on_error
= 1;
578 nc
= rcu_dereference(tconn
->net_conf
);
583 sndbuf_size
= nc
->sndbuf_size
;
584 rcvbuf_size
= nc
->rcvbuf_size
;
585 connect_int
= nc
->connect_int
;
588 my_addr_len
= min_t(int, tconn
->my_addr_len
, sizeof(src_in6
));
589 memcpy(&src_in6
, &tconn
->my_addr
, my_addr_len
);
591 if (((struct sockaddr
*)&tconn
->my_addr
)->sa_family
== AF_INET6
)
592 src_in6
.sin6_port
= 0;
594 ((struct sockaddr_in
*)&src_in6
)->sin_port
= 0; /* AF_INET & AF_SCI */
596 peer_addr_len
= min_t(int, tconn
->peer_addr_len
, sizeof(src_in6
));
597 memcpy(&peer_in6
, &tconn
->peer_addr
, peer_addr_len
);
599 what
= "sock_create_kern";
600 err
= sock_create_kern(((struct sockaddr
*)&src_in6
)->sa_family
,
601 SOCK_STREAM
, IPPROTO_TCP
, &sock
);
607 sock
->sk
->sk_rcvtimeo
=
608 sock
->sk
->sk_sndtimeo
= connect_int
* HZ
;
609 drbd_setbufsize(sock
, sndbuf_size
, rcvbuf_size
);
611 /* explicitly bind to the configured IP as source IP
612 * for the outgoing connections.
613 * This is needed for multihomed hosts and to be
614 * able to use lo: interfaces for drbd.
615 * Make sure to use 0 as port number, so linux selects
616 * a free one dynamically.
618 what
= "bind before connect";
619 err
= sock
->ops
->bind(sock
, (struct sockaddr
*) &src_in6
, my_addr_len
);
623 /* connect may fail, peer not yet available.
624 * stay C_WF_CONNECTION, don't go Disconnecting! */
625 disconnect_on_error
= 0;
627 err
= sock
->ops
->connect(sock
, (struct sockaddr
*) &peer_in6
, peer_addr_len
, 0);
636 /* timeout, busy, signal pending */
637 case ETIMEDOUT
: case EAGAIN
: case EINPROGRESS
:
638 case EINTR
: case ERESTARTSYS
:
639 /* peer not (yet) available, network problem */
640 case ECONNREFUSED
: case ENETUNREACH
:
641 case EHOSTDOWN
: case EHOSTUNREACH
:
642 disconnect_on_error
= 0;
645 conn_err(tconn
, "%s failed, err = %d\n", what
, err
);
647 if (disconnect_on_error
)
648 conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
654 struct accept_wait_data
{
655 struct drbd_tconn
*tconn
;
656 struct socket
*s_listen
;
657 struct completion door_bell
;
658 void (*original_sk_state_change
)(struct sock
*sk
);
662 static void drbd_incoming_connection(struct sock
*sk
)
664 struct accept_wait_data
*ad
= sk
->sk_user_data
;
665 void (*state_change
)(struct sock
*sk
);
667 state_change
= ad
->original_sk_state_change
;
668 if (sk
->sk_state
== TCP_ESTABLISHED
)
669 complete(&ad
->door_bell
);
673 static int prepare_listen_socket(struct drbd_tconn
*tconn
, struct accept_wait_data
*ad
)
675 int err
, sndbuf_size
, rcvbuf_size
, my_addr_len
;
676 struct sockaddr_in6 my_addr
;
677 struct socket
*s_listen
;
682 nc
= rcu_dereference(tconn
->net_conf
);
687 sndbuf_size
= nc
->sndbuf_size
;
688 rcvbuf_size
= nc
->rcvbuf_size
;
691 my_addr_len
= min_t(int, tconn
->my_addr_len
, sizeof(struct sockaddr_in6
));
692 memcpy(&my_addr
, &tconn
->my_addr
, my_addr_len
);
694 what
= "sock_create_kern";
695 err
= sock_create_kern(((struct sockaddr
*)&my_addr
)->sa_family
,
696 SOCK_STREAM
, IPPROTO_TCP
, &s_listen
);
702 s_listen
->sk
->sk_reuse
= SK_CAN_REUSE
; /* SO_REUSEADDR */
703 drbd_setbufsize(s_listen
, sndbuf_size
, rcvbuf_size
);
705 what
= "bind before listen";
706 err
= s_listen
->ops
->bind(s_listen
, (struct sockaddr
*)&my_addr
, my_addr_len
);
710 ad
->s_listen
= s_listen
;
711 write_lock_bh(&s_listen
->sk
->sk_callback_lock
);
712 ad
->original_sk_state_change
= s_listen
->sk
->sk_state_change
;
713 s_listen
->sk
->sk_state_change
= drbd_incoming_connection
;
714 s_listen
->sk
->sk_user_data
= ad
;
715 write_unlock_bh(&s_listen
->sk
->sk_callback_lock
);
718 err
= s_listen
->ops
->listen(s_listen
, 5);
725 sock_release(s_listen
);
727 if (err
!= -EAGAIN
&& err
!= -EINTR
&& err
!= -ERESTARTSYS
) {
728 conn_err(tconn
, "%s failed, err = %d\n", what
, err
);
729 conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
736 static void unregister_state_change(struct sock
*sk
, struct accept_wait_data
*ad
)
738 write_lock_bh(&sk
->sk_callback_lock
);
739 sk
->sk_state_change
= ad
->original_sk_state_change
;
740 sk
->sk_user_data
= NULL
;
741 write_unlock_bh(&sk
->sk_callback_lock
);
744 static struct socket
*drbd_wait_for_connect(struct drbd_tconn
*tconn
, struct accept_wait_data
*ad
)
746 int timeo
, connect_int
, err
= 0;
747 struct socket
*s_estab
= NULL
;
751 nc
= rcu_dereference(tconn
->net_conf
);
756 connect_int
= nc
->connect_int
;
759 timeo
= connect_int
* HZ
;
760 timeo
+= (random32() & 1) ? timeo
/ 7 : -timeo
/ 7; /* 28.5% random jitter */
762 err
= wait_for_completion_interruptible_timeout(&ad
->door_bell
, timeo
);
766 err
= kernel_accept(ad
->s_listen
, &s_estab
, 0);
768 if (err
!= -EAGAIN
&& err
!= -EINTR
&& err
!= -ERESTARTSYS
) {
769 conn_err(tconn
, "accept failed, err = %d\n", err
);
770 conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
775 unregister_state_change(s_estab
->sk
, ad
);
780 static int decode_header(struct drbd_tconn
*, void *, struct packet_info
*);
782 static int send_first_packet(struct drbd_tconn
*tconn
, struct drbd_socket
*sock
,
783 enum drbd_packet cmd
)
785 if (!conn_prepare_command(tconn
, sock
))
787 return conn_send_command(tconn
, sock
, cmd
, 0, NULL
, 0);
790 static int receive_first_packet(struct drbd_tconn
*tconn
, struct socket
*sock
)
792 unsigned int header_size
= drbd_header_size(tconn
);
793 struct packet_info pi
;
796 err
= drbd_recv_short(sock
, tconn
->data
.rbuf
, header_size
, 0);
797 if (err
!= header_size
) {
802 err
= decode_header(tconn
, tconn
->data
.rbuf
, &pi
);
809 * drbd_socket_okay() - Free the socket if its connection is not okay
810 * @sock: pointer to the pointer to the socket.
812 static int drbd_socket_okay(struct socket
**sock
)
820 rr
= drbd_recv_short(*sock
, tb
, 4, MSG_DONTWAIT
| MSG_PEEK
);
822 if (rr
> 0 || rr
== -EAGAIN
) {
830 /* Gets called if a connection is established, or if a new minor gets created
832 int drbd_connected(struct drbd_conf
*mdev
)
836 atomic_set(&mdev
->packet_seq
, 0);
839 mdev
->state_mutex
= mdev
->tconn
->agreed_pro_version
< 100 ?
840 &mdev
->tconn
->cstate_mutex
:
841 &mdev
->own_state_mutex
;
843 err
= drbd_send_sync_param(mdev
);
845 err
= drbd_send_sizes(mdev
, 0, 0);
847 err
= drbd_send_uuids(mdev
);
849 err
= drbd_send_current_state(mdev
);
850 clear_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
851 clear_bit(RESIZE_PENDING
, &mdev
->flags
);
852 mod_timer(&mdev
->request_timer
, jiffies
+ HZ
); /* just start it here. */
858 * 1 yes, we have a valid connection
859 * 0 oops, did not work out, please try again
860 * -1 peer talks different language,
861 * no point in trying again, please go standalone.
862 * -2 We do not have a network config...
864 static int conn_connect(struct drbd_tconn
*tconn
)
866 struct drbd_socket sock
, msock
;
867 struct drbd_conf
*mdev
;
869 int vnr
, timeout
, h
, ok
;
870 bool discard_my_data
;
871 enum drbd_state_rv rv
;
872 struct accept_wait_data ad
= {
874 .door_bell
= COMPLETION_INITIALIZER_ONSTACK(ad
.door_bell
),
877 clear_bit(DISCONNECT_SENT
, &tconn
->flags
);
878 if (conn_request_state(tconn
, NS(conn
, C_WF_CONNECTION
), CS_VERBOSE
) < SS_SUCCESS
)
881 mutex_init(&sock
.mutex
);
882 sock
.sbuf
= tconn
->data
.sbuf
;
883 sock
.rbuf
= tconn
->data
.rbuf
;
885 mutex_init(&msock
.mutex
);
886 msock
.sbuf
= tconn
->meta
.sbuf
;
887 msock
.rbuf
= tconn
->meta
.rbuf
;
890 /* Assume that the peer only understands protocol 80 until we know better. */
891 tconn
->agreed_pro_version
= 80;
893 if (prepare_listen_socket(tconn
, &ad
))
899 s
= drbd_try_connect(tconn
);
903 send_first_packet(tconn
, &sock
, P_INITIAL_DATA
);
904 } else if (!msock
.socket
) {
905 clear_bit(RESOLVE_CONFLICTS
, &tconn
->flags
);
907 send_first_packet(tconn
, &msock
, P_INITIAL_META
);
909 conn_err(tconn
, "Logic error in conn_connect()\n");
910 goto out_release_sockets
;
914 if (sock
.socket
&& msock
.socket
) {
916 nc
= rcu_dereference(tconn
->net_conf
);
917 timeout
= nc
->ping_timeo
* HZ
/ 10;
919 schedule_timeout_interruptible(timeout
);
920 ok
= drbd_socket_okay(&sock
.socket
);
921 ok
= drbd_socket_okay(&msock
.socket
) && ok
;
927 s
= drbd_wait_for_connect(tconn
, &ad
);
929 int fp
= receive_first_packet(tconn
, s
);
930 drbd_socket_okay(&sock
.socket
);
931 drbd_socket_okay(&msock
.socket
);
935 conn_warn(tconn
, "initial packet S crossed\n");
936 sock_release(sock
.socket
);
943 set_bit(RESOLVE_CONFLICTS
, &tconn
->flags
);
945 conn_warn(tconn
, "initial packet M crossed\n");
946 sock_release(msock
.socket
);
953 conn_warn(tconn
, "Error receiving initial packet\n");
961 if (tconn
->cstate
<= C_DISCONNECTING
)
962 goto out_release_sockets
;
963 if (signal_pending(current
)) {
964 flush_signals(current
);
966 if (get_t_state(&tconn
->receiver
) == EXITING
)
967 goto out_release_sockets
;
970 ok
= drbd_socket_okay(&sock
.socket
);
971 ok
= drbd_socket_okay(&msock
.socket
) && ok
;
975 sock_release(ad
.s_listen
);
977 sock
.socket
->sk
->sk_reuse
= SK_CAN_REUSE
; /* SO_REUSEADDR */
978 msock
.socket
->sk
->sk_reuse
= SK_CAN_REUSE
; /* SO_REUSEADDR */
980 sock
.socket
->sk
->sk_allocation
= GFP_NOIO
;
981 msock
.socket
->sk
->sk_allocation
= GFP_NOIO
;
983 sock
.socket
->sk
->sk_priority
= TC_PRIO_INTERACTIVE_BULK
;
984 msock
.socket
->sk
->sk_priority
= TC_PRIO_INTERACTIVE
;
987 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
988 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
989 * first set it to the P_CONNECTION_FEATURES timeout,
990 * which we set to 4x the configured ping_timeout. */
992 nc
= rcu_dereference(tconn
->net_conf
);
994 sock
.socket
->sk
->sk_sndtimeo
=
995 sock
.socket
->sk
->sk_rcvtimeo
= nc
->ping_timeo
*4*HZ
/10;
997 msock
.socket
->sk
->sk_rcvtimeo
= nc
->ping_int
*HZ
;
998 timeout
= nc
->timeout
* HZ
/ 10;
999 discard_my_data
= nc
->discard_my_data
;
1002 msock
.socket
->sk
->sk_sndtimeo
= timeout
;
1004 /* we don't want delays.
1005 * we use TCP_CORK where appropriate, though */
1006 drbd_tcp_nodelay(sock
.socket
);
1007 drbd_tcp_nodelay(msock
.socket
);
1009 tconn
->data
.socket
= sock
.socket
;
1010 tconn
->meta
.socket
= msock
.socket
;
1011 tconn
->last_received
= jiffies
;
1013 h
= drbd_do_features(tconn
);
1017 if (tconn
->cram_hmac_tfm
) {
1018 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
1019 switch (drbd_do_auth(tconn
)) {
1021 conn_err(tconn
, "Authentication of peer failed\n");
1024 conn_err(tconn
, "Authentication of peer failed, trying again.\n");
1029 tconn
->data
.socket
->sk
->sk_sndtimeo
= timeout
;
1030 tconn
->data
.socket
->sk
->sk_rcvtimeo
= MAX_SCHEDULE_TIMEOUT
;
1032 if (drbd_send_protocol(tconn
) == -EOPNOTSUPP
)
1035 set_bit(STATE_SENT
, &tconn
->flags
);
1038 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1039 kref_get(&mdev
->kref
);
1040 /* Prevent a race between resync-handshake and
1041 * being promoted to Primary.
1043 * Grab and release the state mutex, so we know that any current
1044 * drbd_set_role() is finished, and any incoming drbd_set_role
1045 * will see the STATE_SENT flag, and wait for it to be cleared.
1047 mutex_lock(mdev
->state_mutex
);
1048 mutex_unlock(mdev
->state_mutex
);
1052 if (discard_my_data
)
1053 set_bit(DISCARD_MY_DATA
, &mdev
->flags
);
1055 clear_bit(DISCARD_MY_DATA
, &mdev
->flags
);
1057 drbd_connected(mdev
);
1058 kref_put(&mdev
->kref
, &drbd_minor_destroy
);
1063 rv
= conn_request_state(tconn
, NS(conn
, C_WF_REPORT_PARAMS
), CS_VERBOSE
);
1064 if (rv
< SS_SUCCESS
|| tconn
->cstate
!= C_WF_REPORT_PARAMS
) {
1065 clear_bit(STATE_SENT
, &tconn
->flags
);
1069 drbd_thread_start(&tconn
->asender
);
1071 mutex_lock(&tconn
->conf_update
);
1072 /* The discard_my_data flag is a single-shot modifier to the next
1073 * connection attempt, the handshake of which is now well underway.
1074 * No need for rcu style copying of the whole struct
1075 * just to clear a single value. */
1076 tconn
->net_conf
->discard_my_data
= 0;
1077 mutex_unlock(&tconn
->conf_update
);
1081 out_release_sockets
:
1083 sock_release(ad
.s_listen
);
1085 sock_release(sock
.socket
);
1087 sock_release(msock
.socket
);
1091 static int decode_header(struct drbd_tconn
*tconn
, void *header
, struct packet_info
*pi
)
1093 unsigned int header_size
= drbd_header_size(tconn
);
1095 if (header_size
== sizeof(struct p_header100
) &&
1096 *(__be32
*)header
== cpu_to_be32(DRBD_MAGIC_100
)) {
1097 struct p_header100
*h
= header
;
1099 conn_err(tconn
, "Header padding is not zero\n");
1102 pi
->vnr
= be16_to_cpu(h
->volume
);
1103 pi
->cmd
= be16_to_cpu(h
->command
);
1104 pi
->size
= be32_to_cpu(h
->length
);
1105 } else if (header_size
== sizeof(struct p_header95
) &&
1106 *(__be16
*)header
== cpu_to_be16(DRBD_MAGIC_BIG
)) {
1107 struct p_header95
*h
= header
;
1108 pi
->cmd
= be16_to_cpu(h
->command
);
1109 pi
->size
= be32_to_cpu(h
->length
);
1111 } else if (header_size
== sizeof(struct p_header80
) &&
1112 *(__be32
*)header
== cpu_to_be32(DRBD_MAGIC
)) {
1113 struct p_header80
*h
= header
;
1114 pi
->cmd
= be16_to_cpu(h
->command
);
1115 pi
->size
= be16_to_cpu(h
->length
);
1118 conn_err(tconn
, "Wrong magic value 0x%08x in protocol version %d\n",
1119 be32_to_cpu(*(__be32
*)header
),
1120 tconn
->agreed_pro_version
);
1123 pi
->data
= header
+ header_size
;
1127 static int drbd_recv_header(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
1129 void *buffer
= tconn
->data
.rbuf
;
1132 err
= drbd_recv_all_warn(tconn
, buffer
, drbd_header_size(tconn
));
1136 err
= decode_header(tconn
, buffer
, pi
);
1137 tconn
->last_received
= jiffies
;
1142 static void drbd_flush(struct drbd_tconn
*tconn
)
1145 struct drbd_conf
*mdev
;
1148 if (tconn
->write_ordering
>= WO_bdev_flush
) {
1150 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1151 if (!get_ldev(mdev
))
1153 kref_get(&mdev
->kref
);
1156 rv
= blkdev_issue_flush(mdev
->ldev
->backing_bdev
,
1159 dev_info(DEV
, "local disk flush failed with status %d\n", rv
);
1160 /* would rather check on EOPNOTSUPP, but that is not reliable.
1161 * don't try again for ANY return value != 0
1162 * if (rv == -EOPNOTSUPP) */
1163 drbd_bump_write_ordering(tconn
, WO_drain_io
);
1166 kref_put(&mdev
->kref
, &drbd_minor_destroy
);
1177 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1178 * @mdev: DRBD device.
1179 * @epoch: Epoch object.
1182 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_tconn
*tconn
,
1183 struct drbd_epoch
*epoch
,
1184 enum epoch_event ev
)
1187 struct drbd_epoch
*next_epoch
;
1188 enum finish_epoch rv
= FE_STILL_LIVE
;
1190 spin_lock(&tconn
->epoch_lock
);
1194 epoch_size
= atomic_read(&epoch
->epoch_size
);
1196 switch (ev
& ~EV_CLEANUP
) {
1198 atomic_dec(&epoch
->active
);
1200 case EV_GOT_BARRIER_NR
:
1201 set_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
);
1203 case EV_BECAME_LAST
:
1208 if (epoch_size
!= 0 &&
1209 atomic_read(&epoch
->active
) == 0 &&
1210 (test_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
) || ev
& EV_CLEANUP
)) {
1211 if (!(ev
& EV_CLEANUP
)) {
1212 spin_unlock(&tconn
->epoch_lock
);
1213 drbd_send_b_ack(epoch
->tconn
, epoch
->barrier_nr
, epoch_size
);
1214 spin_lock(&tconn
->epoch_lock
);
1217 /* FIXME: dec unacked on connection, once we have
1218 * something to count pending connection packets in. */
1219 if (test_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
))
1220 dec_unacked(epoch
->tconn
);
1223 if (tconn
->current_epoch
!= epoch
) {
1224 next_epoch
= list_entry(epoch
->list
.next
, struct drbd_epoch
, list
);
1225 list_del(&epoch
->list
);
1226 ev
= EV_BECAME_LAST
| (ev
& EV_CLEANUP
);
1230 if (rv
== FE_STILL_LIVE
)
1234 atomic_set(&epoch
->epoch_size
, 0);
1235 /* atomic_set(&epoch->active, 0); is already zero */
1236 if (rv
== FE_STILL_LIVE
)
1247 spin_unlock(&tconn
->epoch_lock
);
1253 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1254 * @tconn: DRBD connection.
1255 * @wo: Write ordering method to try.
1257 void drbd_bump_write_ordering(struct drbd_tconn
*tconn
, enum write_ordering_e wo
)
1259 struct disk_conf
*dc
;
1260 struct drbd_conf
*mdev
;
1261 enum write_ordering_e pwo
;
1263 static char *write_ordering_str
[] = {
1265 [WO_drain_io
] = "drain",
1266 [WO_bdev_flush
] = "flush",
1269 pwo
= tconn
->write_ordering
;
1272 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1273 if (!get_ldev_if_state(mdev
, D_ATTACHING
))
1275 dc
= rcu_dereference(mdev
->ldev
->disk_conf
);
1277 if (wo
== WO_bdev_flush
&& !dc
->disk_flushes
)
1279 if (wo
== WO_drain_io
&& !dc
->disk_drain
)
1284 tconn
->write_ordering
= wo
;
1285 if (pwo
!= tconn
->write_ordering
|| wo
== WO_bdev_flush
)
1286 conn_info(tconn
, "Method to ensure write ordering: %s\n", write_ordering_str
[tconn
->write_ordering
]);
1290 * drbd_submit_peer_request()
1291 * @mdev: DRBD device.
1292 * @peer_req: peer request
1293 * @rw: flag field, see bio->bi_rw
1295 * May spread the pages to multiple bios,
1296 * depending on bio_add_page restrictions.
1298 * Returns 0 if all bios have been submitted,
1299 * -ENOMEM if we could not allocate enough bios,
1300 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1301 * single page to an empty bio (which should never happen and likely indicates
1302 * that the lower level IO stack is in some way broken). This has been observed
1303 * on certain Xen deployments.
1305 /* TODO allocate from our own bio_set. */
1306 int drbd_submit_peer_request(struct drbd_conf
*mdev
,
1307 struct drbd_peer_request
*peer_req
,
1308 const unsigned rw
, const int fault_type
)
1310 struct bio
*bios
= NULL
;
1312 struct page
*page
= peer_req
->pages
;
1313 sector_t sector
= peer_req
->i
.sector
;
1314 unsigned ds
= peer_req
->i
.size
;
1315 unsigned n_bios
= 0;
1316 unsigned nr_pages
= (ds
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
1319 /* In most cases, we will only need one bio. But in case the lower
1320 * level restrictions happen to be different at this offset on this
1321 * side than those of the sending peer, we may need to submit the
1322 * request in more than one bio.
1324 * Plain bio_alloc is good enough here, this is no DRBD internally
1325 * generated bio, but a bio allocated on behalf of the peer.
1328 bio
= bio_alloc(GFP_NOIO
, nr_pages
);
1330 dev_err(DEV
, "submit_ee: Allocation of a bio failed\n");
1333 /* > peer_req->i.sector, unless this is the first bio */
1334 bio
->bi_sector
= sector
;
1335 bio
->bi_bdev
= mdev
->ldev
->backing_bdev
;
1337 bio
->bi_private
= peer_req
;
1338 bio
->bi_end_io
= drbd_peer_request_endio
;
1340 bio
->bi_next
= bios
;
1344 page_chain_for_each(page
) {
1345 unsigned len
= min_t(unsigned, ds
, PAGE_SIZE
);
1346 if (!bio_add_page(bio
, page
, len
, 0)) {
1347 /* A single page must always be possible!
1348 * But in case it fails anyways,
1349 * we deal with it, and complain (below). */
1350 if (bio
->bi_vcnt
== 0) {
1352 "bio_add_page failed for len=%u, "
1353 "bi_vcnt=0 (bi_sector=%llu)\n",
1354 len
, (unsigned long long)bio
->bi_sector
);
1364 D_ASSERT(page
== NULL
);
1367 atomic_set(&peer_req
->pending_bios
, n_bios
);
1370 bios
= bios
->bi_next
;
1371 bio
->bi_next
= NULL
;
1373 drbd_generic_make_request(mdev
, fault_type
, bio
);
1380 bios
= bios
->bi_next
;
1386 static void drbd_remove_epoch_entry_interval(struct drbd_conf
*mdev
,
1387 struct drbd_peer_request
*peer_req
)
1389 struct drbd_interval
*i
= &peer_req
->i
;
1391 drbd_remove_interval(&mdev
->write_requests
, i
);
1392 drbd_clear_interval(i
);
1394 /* Wake up any processes waiting for this peer request to complete. */
1396 wake_up(&mdev
->misc_wait
);
1399 void conn_wait_active_ee_empty(struct drbd_tconn
*tconn
)
1401 struct drbd_conf
*mdev
;
1405 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1406 kref_get(&mdev
->kref
);
1408 drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
1409 kref_put(&mdev
->kref
, &drbd_minor_destroy
);
1415 static int receive_Barrier(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
1418 struct p_barrier
*p
= pi
->data
;
1419 struct drbd_epoch
*epoch
;
1421 /* FIXME these are unacked on connection,
1422 * not a specific (peer)device.
1424 tconn
->current_epoch
->barrier_nr
= p
->barrier
;
1425 tconn
->current_epoch
->tconn
= tconn
;
1426 rv
= drbd_may_finish_epoch(tconn
, tconn
->current_epoch
, EV_GOT_BARRIER_NR
);
1428 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1429 * the activity log, which means it would not be resynced in case the
1430 * R_PRIMARY crashes now.
1431 * Therefore we must send the barrier_ack after the barrier request was
1433 switch (tconn
->write_ordering
) {
1435 if (rv
== FE_RECYCLED
)
1438 /* receiver context, in the writeout path of the other node.
1439 * avoid potential distributed deadlock */
1440 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1444 conn_warn(tconn
, "Allocation of an epoch failed, slowing down\n");
1449 conn_wait_active_ee_empty(tconn
);
1452 if (atomic_read(&tconn
->current_epoch
->epoch_size
)) {
1453 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1460 conn_err(tconn
, "Strangeness in tconn->write_ordering %d\n", tconn
->write_ordering
);
1465 atomic_set(&epoch
->epoch_size
, 0);
1466 atomic_set(&epoch
->active
, 0);
1468 spin_lock(&tconn
->epoch_lock
);
1469 if (atomic_read(&tconn
->current_epoch
->epoch_size
)) {
1470 list_add(&epoch
->list
, &tconn
->current_epoch
->list
);
1471 tconn
->current_epoch
= epoch
;
1474 /* The current_epoch got recycled while we allocated this one... */
1477 spin_unlock(&tconn
->epoch_lock
);
1482 /* used from receive_RSDataReply (recv_resync_read)
1483 * and from receive_Data */
1484 static struct drbd_peer_request
*
1485 read_in_block(struct drbd_conf
*mdev
, u64 id
, sector_t sector
,
1486 int data_size
) __must_hold(local
)
1488 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
1489 struct drbd_peer_request
*peer_req
;
1492 void *dig_in
= mdev
->tconn
->int_dig_in
;
1493 void *dig_vv
= mdev
->tconn
->int_dig_vv
;
1494 unsigned long *data
;
1497 if (mdev
->tconn
->peer_integrity_tfm
) {
1498 dgs
= crypto_hash_digestsize(mdev
->tconn
->peer_integrity_tfm
);
1500 * FIXME: Receive the incoming digest into the receive buffer
1501 * here, together with its struct p_data?
1503 err
= drbd_recv_all_warn(mdev
->tconn
, dig_in
, dgs
);
1509 if (!expect(IS_ALIGNED(data_size
, 512)))
1511 if (!expect(data_size
<= DRBD_MAX_BIO_SIZE
))
1514 /* even though we trust out peer,
1515 * we sometimes have to double check. */
1516 if (sector
+ (data_size
>>9) > capacity
) {
1517 dev_err(DEV
, "request from peer beyond end of local disk: "
1518 "capacity: %llus < sector: %llus + size: %u\n",
1519 (unsigned long long)capacity
,
1520 (unsigned long long)sector
, data_size
);
1524 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1525 * "criss-cross" setup, that might cause write-out on some other DRBD,
1526 * which in turn might block on the other node at this very place. */
1527 peer_req
= drbd_alloc_peer_req(mdev
, id
, sector
, data_size
, GFP_NOIO
);
1535 page
= peer_req
->pages
;
1536 page_chain_for_each(page
) {
1537 unsigned len
= min_t(int, ds
, PAGE_SIZE
);
1539 err
= drbd_recv_all_warn(mdev
->tconn
, data
, len
);
1540 if (drbd_insert_fault(mdev
, DRBD_FAULT_RECEIVE
)) {
1541 dev_err(DEV
, "Fault injection: Corrupting data on receive\n");
1542 data
[0] = data
[0] ^ (unsigned long)-1;
1546 drbd_free_peer_req(mdev
, peer_req
);
1553 drbd_csum_ee(mdev
, mdev
->tconn
->peer_integrity_tfm
, peer_req
, dig_vv
);
1554 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1555 dev_err(DEV
, "Digest integrity check FAILED: %llus +%u\n",
1556 (unsigned long long)sector
, data_size
);
1557 drbd_free_peer_req(mdev
, peer_req
);
1561 mdev
->recv_cnt
+= data_size
>>9;
1565 /* drbd_drain_block() just takes a data block
1566 * out of the socket input buffer, and discards it.
1568 static int drbd_drain_block(struct drbd_conf
*mdev
, int data_size
)
1577 page
= drbd_alloc_pages(mdev
, 1, 1);
1581 unsigned int len
= min_t(int, data_size
, PAGE_SIZE
);
1583 err
= drbd_recv_all_warn(mdev
->tconn
, data
, len
);
1589 drbd_free_pages(mdev
, page
, 0);
1593 static int recv_dless_read(struct drbd_conf
*mdev
, struct drbd_request
*req
,
1594 sector_t sector
, int data_size
)
1596 struct bio_vec
*bvec
;
1598 int dgs
, err
, i
, expect
;
1599 void *dig_in
= mdev
->tconn
->int_dig_in
;
1600 void *dig_vv
= mdev
->tconn
->int_dig_vv
;
1603 if (mdev
->tconn
->peer_integrity_tfm
) {
1604 dgs
= crypto_hash_digestsize(mdev
->tconn
->peer_integrity_tfm
);
1605 err
= drbd_recv_all_warn(mdev
->tconn
, dig_in
, dgs
);
1611 /* optimistically update recv_cnt. if receiving fails below,
1612 * we disconnect anyways, and counters will be reset. */
1613 mdev
->recv_cnt
+= data_size
>>9;
1615 bio
= req
->master_bio
;
1616 D_ASSERT(sector
== bio
->bi_sector
);
1618 bio_for_each_segment(bvec
, bio
, i
) {
1619 void *mapped
= kmap(bvec
->bv_page
) + bvec
->bv_offset
;
1620 expect
= min_t(int, data_size
, bvec
->bv_len
);
1621 err
= drbd_recv_all_warn(mdev
->tconn
, mapped
, expect
);
1622 kunmap(bvec
->bv_page
);
1625 data_size
-= expect
;
1629 drbd_csum_bio(mdev
, mdev
->tconn
->peer_integrity_tfm
, bio
, dig_vv
);
1630 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1631 dev_err(DEV
, "Digest integrity check FAILED. Broken NICs?\n");
1636 D_ASSERT(data_size
== 0);
1641 * e_end_resync_block() is called in asender context via
1642 * drbd_finish_peer_reqs().
1644 static int e_end_resync_block(struct drbd_work
*w
, int unused
)
1646 struct drbd_peer_request
*peer_req
=
1647 container_of(w
, struct drbd_peer_request
, w
);
1648 struct drbd_conf
*mdev
= w
->mdev
;
1649 sector_t sector
= peer_req
->i
.sector
;
1652 D_ASSERT(drbd_interval_empty(&peer_req
->i
));
1654 if (likely((peer_req
->flags
& EE_WAS_ERROR
) == 0)) {
1655 drbd_set_in_sync(mdev
, sector
, peer_req
->i
.size
);
1656 err
= drbd_send_ack(mdev
, P_RS_WRITE_ACK
, peer_req
);
1658 /* Record failure to sync */
1659 drbd_rs_failed_io(mdev
, sector
, peer_req
->i
.size
);
1661 err
= drbd_send_ack(mdev
, P_NEG_ACK
, peer_req
);
1668 static int recv_resync_read(struct drbd_conf
*mdev
, sector_t sector
, int data_size
) __releases(local
)
1670 struct drbd_peer_request
*peer_req
;
1672 peer_req
= read_in_block(mdev
, ID_SYNCER
, sector
, data_size
);
1676 dec_rs_pending(mdev
);
1679 /* corresponding dec_unacked() in e_end_resync_block()
1680 * respective _drbd_clear_done_ee */
1682 peer_req
->w
.cb
= e_end_resync_block
;
1684 spin_lock_irq(&mdev
->tconn
->req_lock
);
1685 list_add(&peer_req
->w
.list
, &mdev
->sync_ee
);
1686 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1688 atomic_add(data_size
>> 9, &mdev
->rs_sect_ev
);
1689 if (drbd_submit_peer_request(mdev
, peer_req
, WRITE
, DRBD_FAULT_RS_WR
) == 0)
1692 /* don't care for the reason here */
1693 dev_err(DEV
, "submit failed, triggering re-connect\n");
1694 spin_lock_irq(&mdev
->tconn
->req_lock
);
1695 list_del(&peer_req
->w
.list
);
1696 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1698 drbd_free_peer_req(mdev
, peer_req
);
1704 static struct drbd_request
*
1705 find_request(struct drbd_conf
*mdev
, struct rb_root
*root
, u64 id
,
1706 sector_t sector
, bool missing_ok
, const char *func
)
1708 struct drbd_request
*req
;
1710 /* Request object according to our peer */
1711 req
= (struct drbd_request
*)(unsigned long)id
;
1712 if (drbd_contains_interval(root
, sector
, &req
->i
) && req
->i
.local
)
1715 dev_err(DEV
, "%s: failed to find request 0x%lx, sector %llus\n", func
,
1716 (unsigned long)id
, (unsigned long long)sector
);
1721 static int receive_DataReply(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
1723 struct drbd_conf
*mdev
;
1724 struct drbd_request
*req
;
1727 struct p_data
*p
= pi
->data
;
1729 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
1733 sector
= be64_to_cpu(p
->sector
);
1735 spin_lock_irq(&mdev
->tconn
->req_lock
);
1736 req
= find_request(mdev
, &mdev
->read_requests
, p
->block_id
, sector
, false, __func__
);
1737 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1741 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1742 * special casing it there for the various failure cases.
1743 * still no race with drbd_fail_pending_reads */
1744 err
= recv_dless_read(mdev
, req
, sector
, pi
->size
);
1746 req_mod(req
, DATA_RECEIVED
);
1747 /* else: nothing. handled from drbd_disconnect...
1748 * I don't think we may complete this just yet
1749 * in case we are "on-disconnect: freeze" */
1754 static int receive_RSDataReply(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
1756 struct drbd_conf
*mdev
;
1759 struct p_data
*p
= pi
->data
;
1761 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
1765 sector
= be64_to_cpu(p
->sector
);
1766 D_ASSERT(p
->block_id
== ID_SYNCER
);
1768 if (get_ldev(mdev
)) {
1769 /* data is submitted to disk within recv_resync_read.
1770 * corresponding put_ldev done below on error,
1771 * or in drbd_peer_request_endio. */
1772 err
= recv_resync_read(mdev
, sector
, pi
->size
);
1774 if (__ratelimit(&drbd_ratelimit_state
))
1775 dev_err(DEV
, "Can not write resync data to local disk.\n");
1777 err
= drbd_drain_block(mdev
, pi
->size
);
1779 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
, pi
->size
);
1782 atomic_add(pi
->size
>> 9, &mdev
->rs_sect_in
);
1787 static void restart_conflicting_writes(struct drbd_conf
*mdev
,
1788 sector_t sector
, int size
)
1790 struct drbd_interval
*i
;
1791 struct drbd_request
*req
;
1793 drbd_for_each_overlap(i
, &mdev
->write_requests
, sector
, size
) {
1796 req
= container_of(i
, struct drbd_request
, i
);
1797 if (req
->rq_state
& RQ_LOCAL_PENDING
||
1798 !(req
->rq_state
& RQ_POSTPONED
))
1800 /* as it is RQ_POSTPONED, this will cause it to
1801 * be queued on the retry workqueue. */
1802 __req_mod(req
, CONFLICT_RESOLVED
, NULL
);
1807 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
1809 static int e_end_block(struct drbd_work
*w
, int cancel
)
1811 struct drbd_peer_request
*peer_req
=
1812 container_of(w
, struct drbd_peer_request
, w
);
1813 struct drbd_conf
*mdev
= w
->mdev
;
1814 sector_t sector
= peer_req
->i
.sector
;
1817 if (peer_req
->flags
& EE_SEND_WRITE_ACK
) {
1818 if (likely((peer_req
->flags
& EE_WAS_ERROR
) == 0)) {
1819 pcmd
= (mdev
->state
.conn
>= C_SYNC_SOURCE
&&
1820 mdev
->state
.conn
<= C_PAUSED_SYNC_T
&&
1821 peer_req
->flags
& EE_MAY_SET_IN_SYNC
) ?
1822 P_RS_WRITE_ACK
: P_WRITE_ACK
;
1823 err
= drbd_send_ack(mdev
, pcmd
, peer_req
);
1824 if (pcmd
== P_RS_WRITE_ACK
)
1825 drbd_set_in_sync(mdev
, sector
, peer_req
->i
.size
);
1827 err
= drbd_send_ack(mdev
, P_NEG_ACK
, peer_req
);
1828 /* we expect it to be marked out of sync anyways...
1829 * maybe assert this? */
1833 /* we delete from the conflict detection hash _after_ we sent out the
1834 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1835 if (peer_req
->flags
& EE_IN_INTERVAL_TREE
) {
1836 spin_lock_irq(&mdev
->tconn
->req_lock
);
1837 D_ASSERT(!drbd_interval_empty(&peer_req
->i
));
1838 drbd_remove_epoch_entry_interval(mdev
, peer_req
);
1839 if (peer_req
->flags
& EE_RESTART_REQUESTS
)
1840 restart_conflicting_writes(mdev
, sector
, peer_req
->i
.size
);
1841 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1843 D_ASSERT(drbd_interval_empty(&peer_req
->i
));
1845 drbd_may_finish_epoch(mdev
->tconn
, peer_req
->epoch
, EV_PUT
+ (cancel
? EV_CLEANUP
: 0));
1850 static int e_send_ack(struct drbd_work
*w
, enum drbd_packet ack
)
1852 struct drbd_conf
*mdev
= w
->mdev
;
1853 struct drbd_peer_request
*peer_req
=
1854 container_of(w
, struct drbd_peer_request
, w
);
1857 err
= drbd_send_ack(mdev
, ack
, peer_req
);
1863 static int e_send_superseded(struct drbd_work
*w
, int unused
)
1865 return e_send_ack(w
, P_SUPERSEDED
);
1868 static int e_send_retry_write(struct drbd_work
*w
, int unused
)
1870 struct drbd_tconn
*tconn
= w
->mdev
->tconn
;
1872 return e_send_ack(w
, tconn
->agreed_pro_version
>= 100 ?
1873 P_RETRY_WRITE
: P_SUPERSEDED
);
1876 static bool seq_greater(u32 a
, u32 b
)
1879 * We assume 32-bit wrap-around here.
1880 * For 24-bit wrap-around, we would have to shift:
1883 return (s32
)a
- (s32
)b
> 0;
1886 static u32
seq_max(u32 a
, u32 b
)
1888 return seq_greater(a
, b
) ? a
: b
;
1891 static bool need_peer_seq(struct drbd_conf
*mdev
)
1893 struct drbd_tconn
*tconn
= mdev
->tconn
;
1897 * We only need to keep track of the last packet_seq number of our peer
1898 * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
1899 * handle_write_conflicts().
1903 tp
= rcu_dereference(mdev
->tconn
->net_conf
)->two_primaries
;
1906 return tp
&& test_bit(RESOLVE_CONFLICTS
, &tconn
->flags
);
1909 static void update_peer_seq(struct drbd_conf
*mdev
, unsigned int peer_seq
)
1911 unsigned int newest_peer_seq
;
1913 if (need_peer_seq(mdev
)) {
1914 spin_lock(&mdev
->peer_seq_lock
);
1915 newest_peer_seq
= seq_max(mdev
->peer_seq
, peer_seq
);
1916 mdev
->peer_seq
= newest_peer_seq
;
1917 spin_unlock(&mdev
->peer_seq_lock
);
1918 /* wake up only if we actually changed mdev->peer_seq */
1919 if (peer_seq
== newest_peer_seq
)
1920 wake_up(&mdev
->seq_wait
);
1924 static inline int overlaps(sector_t s1
, int l1
, sector_t s2
, int l2
)
1926 return !((s1
+ (l1
>>9) <= s2
) || (s1
>= s2
+ (l2
>>9)));
1929 /* maybe change sync_ee into interval trees as well? */
1930 static bool overlapping_resync_write(struct drbd_conf
*mdev
, struct drbd_peer_request
*peer_req
)
1932 struct drbd_peer_request
*rs_req
;
1935 spin_lock_irq(&mdev
->tconn
->req_lock
);
1936 list_for_each_entry(rs_req
, &mdev
->sync_ee
, w
.list
) {
1937 if (overlaps(peer_req
->i
.sector
, peer_req
->i
.size
,
1938 rs_req
->i
.sector
, rs_req
->i
.size
)) {
1943 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1948 /* Called from receive_Data.
1949 * Synchronize packets on sock with packets on msock.
1951 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1952 * packet traveling on msock, they are still processed in the order they have
1955 * Note: we don't care for Ack packets overtaking P_DATA packets.
1957 * In case packet_seq is larger than mdev->peer_seq number, there are
1958 * outstanding packets on the msock. We wait for them to arrive.
1959 * In case we are the logically next packet, we update mdev->peer_seq
1960 * ourselves. Correctly handles 32bit wrap around.
1962 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1963 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1964 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1965 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1967 * returns 0 if we may process the packet,
1968 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1969 static int wait_for_and_update_peer_seq(struct drbd_conf
*mdev
, const u32 peer_seq
)
1975 if (!need_peer_seq(mdev
))
1978 spin_lock(&mdev
->peer_seq_lock
);
1980 if (!seq_greater(peer_seq
- 1, mdev
->peer_seq
)) {
1981 mdev
->peer_seq
= seq_max(mdev
->peer_seq
, peer_seq
);
1985 if (signal_pending(current
)) {
1989 prepare_to_wait(&mdev
->seq_wait
, &wait
, TASK_INTERRUPTIBLE
);
1990 spin_unlock(&mdev
->peer_seq_lock
);
1992 timeout
= rcu_dereference(mdev
->tconn
->net_conf
)->ping_timeo
*HZ
/10;
1994 timeout
= schedule_timeout(timeout
);
1995 spin_lock(&mdev
->peer_seq_lock
);
1998 dev_err(DEV
, "Timed out waiting for missing ack packets; disconnecting\n");
2002 spin_unlock(&mdev
->peer_seq_lock
);
2003 finish_wait(&mdev
->seq_wait
, &wait
);
2007 /* see also bio_flags_to_wire()
2008 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2009 * flags and back. We may replicate to other kernel versions. */
2010 static unsigned long wire_flags_to_bio(struct drbd_conf
*mdev
, u32 dpf
)
2012 return (dpf
& DP_RW_SYNC
? REQ_SYNC
: 0) |
2013 (dpf
& DP_FUA
? REQ_FUA
: 0) |
2014 (dpf
& DP_FLUSH
? REQ_FLUSH
: 0) |
2015 (dpf
& DP_DISCARD
? REQ_DISCARD
: 0);
2018 static void fail_postponed_requests(struct drbd_conf
*mdev
, sector_t sector
,
2021 struct drbd_interval
*i
;
2024 drbd_for_each_overlap(i
, &mdev
->write_requests
, sector
, size
) {
2025 struct drbd_request
*req
;
2026 struct bio_and_error m
;
2030 req
= container_of(i
, struct drbd_request
, i
);
2031 if (!(req
->rq_state
& RQ_POSTPONED
))
2033 req
->rq_state
&= ~RQ_POSTPONED
;
2034 __req_mod(req
, NEG_ACKED
, &m
);
2035 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2037 complete_master_bio(mdev
, &m
);
2038 spin_lock_irq(&mdev
->tconn
->req_lock
);
2043 static int handle_write_conflicts(struct drbd_conf
*mdev
,
2044 struct drbd_peer_request
*peer_req
)
2046 struct drbd_tconn
*tconn
= mdev
->tconn
;
2047 bool resolve_conflicts
= test_bit(RESOLVE_CONFLICTS
, &tconn
->flags
);
2048 sector_t sector
= peer_req
->i
.sector
;
2049 const unsigned int size
= peer_req
->i
.size
;
2050 struct drbd_interval
*i
;
2055 * Inserting the peer request into the write_requests tree will prevent
2056 * new conflicting local requests from being added.
2058 drbd_insert_interval(&mdev
->write_requests
, &peer_req
->i
);
2061 drbd_for_each_overlap(i
, &mdev
->write_requests
, sector
, size
) {
2062 if (i
== &peer_req
->i
)
2067 * Our peer has sent a conflicting remote request; this
2068 * should not happen in a two-node setup. Wait for the
2069 * earlier peer request to complete.
2071 err
= drbd_wait_misc(mdev
, i
);
2077 equal
= i
->sector
== sector
&& i
->size
== size
;
2078 if (resolve_conflicts
) {
2080 * If the peer request is fully contained within the
2081 * overlapping request, it can be considered overwritten
2082 * and thus superseded; otherwise, it will be retried
2083 * once all overlapping requests have completed.
2085 bool superseded
= i
->sector
<= sector
&& i
->sector
+
2086 (i
->size
>> 9) >= sector
+ (size
>> 9);
2089 dev_alert(DEV
, "Concurrent writes detected: "
2090 "local=%llus +%u, remote=%llus +%u, "
2091 "assuming %s came first\n",
2092 (unsigned long long)i
->sector
, i
->size
,
2093 (unsigned long long)sector
, size
,
2094 superseded
? "local" : "remote");
2097 peer_req
->w
.cb
= superseded
? e_send_superseded
:
2099 list_add_tail(&peer_req
->w
.list
, &mdev
->done_ee
);
2100 wake_asender(mdev
->tconn
);
2105 struct drbd_request
*req
=
2106 container_of(i
, struct drbd_request
, i
);
2109 dev_alert(DEV
, "Concurrent writes detected: "
2110 "local=%llus +%u, remote=%llus +%u\n",
2111 (unsigned long long)i
->sector
, i
->size
,
2112 (unsigned long long)sector
, size
);
2114 if (req
->rq_state
& RQ_LOCAL_PENDING
||
2115 !(req
->rq_state
& RQ_POSTPONED
)) {
2117 * Wait for the node with the discard flag to
2118 * decide if this request has been superseded
2119 * or needs to be retried.
2120 * Requests that have been superseded will
2121 * disappear from the write_requests tree.
2123 * In addition, wait for the conflicting
2124 * request to finish locally before submitting
2125 * the conflicting peer request.
2127 err
= drbd_wait_misc(mdev
, &req
->i
);
2129 _conn_request_state(mdev
->tconn
,
2130 NS(conn
, C_TIMEOUT
),
2132 fail_postponed_requests(mdev
, sector
, size
);
2138 * Remember to restart the conflicting requests after
2139 * the new peer request has completed.
2141 peer_req
->flags
|= EE_RESTART_REQUESTS
;
2148 drbd_remove_epoch_entry_interval(mdev
, peer_req
);
2152 /* mirrored write */
2153 static int receive_Data(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
2155 struct drbd_conf
*mdev
;
2157 struct drbd_peer_request
*peer_req
;
2158 struct p_data
*p
= pi
->data
;
2159 u32 peer_seq
= be32_to_cpu(p
->seq_num
);
2164 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
2168 if (!get_ldev(mdev
)) {
2171 err
= wait_for_and_update_peer_seq(mdev
, peer_seq
);
2172 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
, pi
->size
);
2173 atomic_inc(&tconn
->current_epoch
->epoch_size
);
2174 err2
= drbd_drain_block(mdev
, pi
->size
);
2181 * Corresponding put_ldev done either below (on various errors), or in
2182 * drbd_peer_request_endio, if we successfully submit the data at the
2183 * end of this function.
2186 sector
= be64_to_cpu(p
->sector
);
2187 peer_req
= read_in_block(mdev
, p
->block_id
, sector
, pi
->size
);
2193 peer_req
->w
.cb
= e_end_block
;
2195 dp_flags
= be32_to_cpu(p
->dp_flags
);
2196 rw
|= wire_flags_to_bio(mdev
, dp_flags
);
2197 if (peer_req
->pages
== NULL
) {
2198 D_ASSERT(peer_req
->i
.size
== 0);
2199 D_ASSERT(dp_flags
& DP_FLUSH
);
2202 if (dp_flags
& DP_MAY_SET_IN_SYNC
)
2203 peer_req
->flags
|= EE_MAY_SET_IN_SYNC
;
2205 spin_lock(&tconn
->epoch_lock
);
2206 peer_req
->epoch
= tconn
->current_epoch
;
2207 atomic_inc(&peer_req
->epoch
->epoch_size
);
2208 atomic_inc(&peer_req
->epoch
->active
);
2209 spin_unlock(&tconn
->epoch_lock
);
2212 tp
= rcu_dereference(mdev
->tconn
->net_conf
)->two_primaries
;
2215 peer_req
->flags
|= EE_IN_INTERVAL_TREE
;
2216 err
= wait_for_and_update_peer_seq(mdev
, peer_seq
);
2218 goto out_interrupted
;
2219 spin_lock_irq(&mdev
->tconn
->req_lock
);
2220 err
= handle_write_conflicts(mdev
, peer_req
);
2222 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2223 if (err
== -ENOENT
) {
2227 goto out_interrupted
;
2230 spin_lock_irq(&mdev
->tconn
->req_lock
);
2231 list_add(&peer_req
->w
.list
, &mdev
->active_ee
);
2232 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2234 if (mdev
->state
.conn
== C_SYNC_TARGET
)
2235 wait_event(mdev
->ee_wait
, !overlapping_resync_write(mdev
, peer_req
));
2237 if (mdev
->tconn
->agreed_pro_version
< 100) {
2239 switch (rcu_dereference(mdev
->tconn
->net_conf
)->wire_protocol
) {
2241 dp_flags
|= DP_SEND_WRITE_ACK
;
2244 dp_flags
|= DP_SEND_RECEIVE_ACK
;
2250 if (dp_flags
& DP_SEND_WRITE_ACK
) {
2251 peer_req
->flags
|= EE_SEND_WRITE_ACK
;
2253 /* corresponding dec_unacked() in e_end_block()
2254 * respective _drbd_clear_done_ee */
2257 if (dp_flags
& DP_SEND_RECEIVE_ACK
) {
2258 /* I really don't like it that the receiver thread
2259 * sends on the msock, but anyways */
2260 drbd_send_ack(mdev
, P_RECV_ACK
, peer_req
);
2263 if (mdev
->state
.pdsk
< D_INCONSISTENT
) {
2264 /* In case we have the only disk of the cluster, */
2265 drbd_set_out_of_sync(mdev
, peer_req
->i
.sector
, peer_req
->i
.size
);
2266 peer_req
->flags
|= EE_CALL_AL_COMPLETE_IO
;
2267 peer_req
->flags
&= ~EE_MAY_SET_IN_SYNC
;
2268 drbd_al_begin_io(mdev
, &peer_req
->i
);
2271 err
= drbd_submit_peer_request(mdev
, peer_req
, rw
, DRBD_FAULT_DT_WR
);
2275 /* don't care for the reason here */
2276 dev_err(DEV
, "submit failed, triggering re-connect\n");
2277 spin_lock_irq(&mdev
->tconn
->req_lock
);
2278 list_del(&peer_req
->w
.list
);
2279 drbd_remove_epoch_entry_interval(mdev
, peer_req
);
2280 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2281 if (peer_req
->flags
& EE_CALL_AL_COMPLETE_IO
)
2282 drbd_al_complete_io(mdev
, &peer_req
->i
);
2285 drbd_may_finish_epoch(tconn
, peer_req
->epoch
, EV_PUT
+ EV_CLEANUP
);
2287 drbd_free_peer_req(mdev
, peer_req
);
2291 /* We may throttle resync, if the lower device seems to be busy,
2292 * and current sync rate is above c_min_rate.
2294 * To decide whether or not the lower device is busy, we use a scheme similar
2295 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2296 * (more than 64 sectors) of activity we cannot account for with our own resync
2297 * activity, it obviously is "busy".
2299 * The current sync rate used here uses only the most recent two step marks,
2300 * to have a short time average so we can react faster.
2302 int drbd_rs_should_slow_down(struct drbd_conf
*mdev
, sector_t sector
)
2304 struct gendisk
*disk
= mdev
->ldev
->backing_bdev
->bd_contains
->bd_disk
;
2305 unsigned long db
, dt
, dbdt
;
2306 struct lc_element
*tmp
;
2309 unsigned int c_min_rate
;
2312 c_min_rate
= rcu_dereference(mdev
->ldev
->disk_conf
)->c_min_rate
;
2315 /* feature disabled? */
2316 if (c_min_rate
== 0)
2319 spin_lock_irq(&mdev
->al_lock
);
2320 tmp
= lc_find(mdev
->resync
, BM_SECT_TO_EXT(sector
));
2322 struct bm_extent
*bm_ext
= lc_entry(tmp
, struct bm_extent
, lce
);
2323 if (test_bit(BME_PRIORITY
, &bm_ext
->flags
)) {
2324 spin_unlock_irq(&mdev
->al_lock
);
2327 /* Do not slow down if app IO is already waiting for this extent */
2329 spin_unlock_irq(&mdev
->al_lock
);
2331 curr_events
= (int)part_stat_read(&disk
->part0
, sectors
[0]) +
2332 (int)part_stat_read(&disk
->part0
, sectors
[1]) -
2333 atomic_read(&mdev
->rs_sect_ev
);
2335 if (!mdev
->rs_last_events
|| curr_events
- mdev
->rs_last_events
> 64) {
2336 unsigned long rs_left
;
2339 mdev
->rs_last_events
= curr_events
;
2341 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2343 i
= (mdev
->rs_last_mark
+ DRBD_SYNC_MARKS
-1) % DRBD_SYNC_MARKS
;
2345 if (mdev
->state
.conn
== C_VERIFY_S
|| mdev
->state
.conn
== C_VERIFY_T
)
2346 rs_left
= mdev
->ov_left
;
2348 rs_left
= drbd_bm_total_weight(mdev
) - mdev
->rs_failed
;
2350 dt
= ((long)jiffies
- (long)mdev
->rs_mark_time
[i
]) / HZ
;
2353 db
= mdev
->rs_mark_left
[i
] - rs_left
;
2354 dbdt
= Bit2KB(db
/dt
);
2356 if (dbdt
> c_min_rate
)
2363 static int receive_DataRequest(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
2365 struct drbd_conf
*mdev
;
2368 struct drbd_peer_request
*peer_req
;
2369 struct digest_info
*di
= NULL
;
2371 unsigned int fault_type
;
2372 struct p_block_req
*p
= pi
->data
;
2374 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
2377 capacity
= drbd_get_capacity(mdev
->this_bdev
);
2379 sector
= be64_to_cpu(p
->sector
);
2380 size
= be32_to_cpu(p
->blksize
);
2382 if (size
<= 0 || !IS_ALIGNED(size
, 512) || size
> DRBD_MAX_BIO_SIZE
) {
2383 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
2384 (unsigned long long)sector
, size
);
2387 if (sector
+ (size
>>9) > capacity
) {
2388 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
2389 (unsigned long long)sector
, size
);
2393 if (!get_ldev_if_state(mdev
, D_UP_TO_DATE
)) {
2396 case P_DATA_REQUEST
:
2397 drbd_send_ack_rp(mdev
, P_NEG_DREPLY
, p
);
2399 case P_RS_DATA_REQUEST
:
2400 case P_CSUM_RS_REQUEST
:
2402 drbd_send_ack_rp(mdev
, P_NEG_RS_DREPLY
, p
);
2406 dec_rs_pending(mdev
);
2407 drbd_send_ack_ex(mdev
, P_OV_RESULT
, sector
, size
, ID_IN_SYNC
);
2412 if (verb
&& __ratelimit(&drbd_ratelimit_state
))
2413 dev_err(DEV
, "Can not satisfy peer's read request, "
2414 "no local data.\n");
2416 /* drain possibly payload */
2417 return drbd_drain_block(mdev
, pi
->size
);
2420 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2421 * "criss-cross" setup, that might cause write-out on some other DRBD,
2422 * which in turn might block on the other node at this very place. */
2423 peer_req
= drbd_alloc_peer_req(mdev
, p
->block_id
, sector
, size
, GFP_NOIO
);
2430 case P_DATA_REQUEST
:
2431 peer_req
->w
.cb
= w_e_end_data_req
;
2432 fault_type
= DRBD_FAULT_DT_RD
;
2433 /* application IO, don't drbd_rs_begin_io */
2436 case P_RS_DATA_REQUEST
:
2437 peer_req
->w
.cb
= w_e_end_rsdata_req
;
2438 fault_type
= DRBD_FAULT_RS_RD
;
2439 /* used in the sector offset progress display */
2440 mdev
->bm_resync_fo
= BM_SECT_TO_BIT(sector
);
2444 case P_CSUM_RS_REQUEST
:
2445 fault_type
= DRBD_FAULT_RS_RD
;
2446 di
= kmalloc(sizeof(*di
) + pi
->size
, GFP_NOIO
);
2450 di
->digest_size
= pi
->size
;
2451 di
->digest
= (((char *)di
)+sizeof(struct digest_info
));
2453 peer_req
->digest
= di
;
2454 peer_req
->flags
|= EE_HAS_DIGEST
;
2456 if (drbd_recv_all(mdev
->tconn
, di
->digest
, pi
->size
))
2459 if (pi
->cmd
== P_CSUM_RS_REQUEST
) {
2460 D_ASSERT(mdev
->tconn
->agreed_pro_version
>= 89);
2461 peer_req
->w
.cb
= w_e_end_csum_rs_req
;
2462 /* used in the sector offset progress display */
2463 mdev
->bm_resync_fo
= BM_SECT_TO_BIT(sector
);
2464 } else if (pi
->cmd
== P_OV_REPLY
) {
2465 /* track progress, we may need to throttle */
2466 atomic_add(size
>> 9, &mdev
->rs_sect_in
);
2467 peer_req
->w
.cb
= w_e_end_ov_reply
;
2468 dec_rs_pending(mdev
);
2469 /* drbd_rs_begin_io done when we sent this request,
2470 * but accounting still needs to be done. */
2471 goto submit_for_resync
;
2476 if (mdev
->ov_start_sector
== ~(sector_t
)0 &&
2477 mdev
->tconn
->agreed_pro_version
>= 90) {
2478 unsigned long now
= jiffies
;
2480 mdev
->ov_start_sector
= sector
;
2481 mdev
->ov_position
= sector
;
2482 mdev
->ov_left
= drbd_bm_bits(mdev
) - BM_SECT_TO_BIT(sector
);
2483 mdev
->rs_total
= mdev
->ov_left
;
2484 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
2485 mdev
->rs_mark_left
[i
] = mdev
->ov_left
;
2486 mdev
->rs_mark_time
[i
] = now
;
2488 dev_info(DEV
, "Online Verify start sector: %llu\n",
2489 (unsigned long long)sector
);
2491 peer_req
->w
.cb
= w_e_end_ov_req
;
2492 fault_type
= DRBD_FAULT_RS_RD
;
2499 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2500 * wrt the receiver, but it is not as straightforward as it may seem.
2501 * Various places in the resync start and stop logic assume resync
2502 * requests are processed in order, requeuing this on the worker thread
2503 * introduces a bunch of new code for synchronization between threads.
2505 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2506 * "forever", throttling after drbd_rs_begin_io will lock that extent
2507 * for application writes for the same time. For now, just throttle
2508 * here, where the rest of the code expects the receiver to sleep for
2512 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2513 * this defers syncer requests for some time, before letting at least
2514 * on request through. The resync controller on the receiving side
2515 * will adapt to the incoming rate accordingly.
2517 * We cannot throttle here if remote is Primary/SyncTarget:
2518 * we would also throttle its application reads.
2519 * In that case, throttling is done on the SyncTarget only.
2521 if (mdev
->state
.peer
!= R_PRIMARY
&& drbd_rs_should_slow_down(mdev
, sector
))
2522 schedule_timeout_uninterruptible(HZ
/10);
2523 if (drbd_rs_begin_io(mdev
, sector
))
2527 atomic_add(size
>> 9, &mdev
->rs_sect_ev
);
2531 spin_lock_irq(&mdev
->tconn
->req_lock
);
2532 list_add_tail(&peer_req
->w
.list
, &mdev
->read_ee
);
2533 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2535 if (drbd_submit_peer_request(mdev
, peer_req
, READ
, fault_type
) == 0)
2538 /* don't care for the reason here */
2539 dev_err(DEV
, "submit failed, triggering re-connect\n");
2540 spin_lock_irq(&mdev
->tconn
->req_lock
);
2541 list_del(&peer_req
->w
.list
);
2542 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2543 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2547 drbd_free_peer_req(mdev
, peer_req
);
2551 static int drbd_asb_recover_0p(struct drbd_conf
*mdev
) __must_hold(local
)
2553 int self
, peer
, rv
= -100;
2554 unsigned long ch_self
, ch_peer
;
2555 enum drbd_after_sb_p after_sb_0p
;
2557 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2558 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2560 ch_peer
= mdev
->p_uuid
[UI_SIZE
];
2561 ch_self
= mdev
->comm_bm_set
;
2564 after_sb_0p
= rcu_dereference(mdev
->tconn
->net_conf
)->after_sb_0p
;
2566 switch (after_sb_0p
) {
2568 case ASB_DISCARD_SECONDARY
:
2569 case ASB_CALL_HELPER
:
2571 dev_err(DEV
, "Configuration error.\n");
2573 case ASB_DISCONNECT
:
2575 case ASB_DISCARD_YOUNGER_PRI
:
2576 if (self
== 0 && peer
== 1) {
2580 if (self
== 1 && peer
== 0) {
2584 /* Else fall through to one of the other strategies... */
2585 case ASB_DISCARD_OLDER_PRI
:
2586 if (self
== 0 && peer
== 1) {
2590 if (self
== 1 && peer
== 0) {
2594 /* Else fall through to one of the other strategies... */
2595 dev_warn(DEV
, "Discard younger/older primary did not find a decision\n"
2596 "Using discard-least-changes instead\n");
2597 case ASB_DISCARD_ZERO_CHG
:
2598 if (ch_peer
== 0 && ch_self
== 0) {
2599 rv
= test_bit(RESOLVE_CONFLICTS
, &mdev
->tconn
->flags
)
2603 if (ch_peer
== 0) { rv
= 1; break; }
2604 if (ch_self
== 0) { rv
= -1; break; }
2606 if (after_sb_0p
== ASB_DISCARD_ZERO_CHG
)
2608 case ASB_DISCARD_LEAST_CHG
:
2609 if (ch_self
< ch_peer
)
2611 else if (ch_self
> ch_peer
)
2613 else /* ( ch_self == ch_peer ) */
2614 /* Well, then use something else. */
2615 rv
= test_bit(RESOLVE_CONFLICTS
, &mdev
->tconn
->flags
)
2618 case ASB_DISCARD_LOCAL
:
2621 case ASB_DISCARD_REMOTE
:
2628 static int drbd_asb_recover_1p(struct drbd_conf
*mdev
) __must_hold(local
)
2631 enum drbd_after_sb_p after_sb_1p
;
2634 after_sb_1p
= rcu_dereference(mdev
->tconn
->net_conf
)->after_sb_1p
;
2636 switch (after_sb_1p
) {
2637 case ASB_DISCARD_YOUNGER_PRI
:
2638 case ASB_DISCARD_OLDER_PRI
:
2639 case ASB_DISCARD_LEAST_CHG
:
2640 case ASB_DISCARD_LOCAL
:
2641 case ASB_DISCARD_REMOTE
:
2642 case ASB_DISCARD_ZERO_CHG
:
2643 dev_err(DEV
, "Configuration error.\n");
2645 case ASB_DISCONNECT
:
2648 hg
= drbd_asb_recover_0p(mdev
);
2649 if (hg
== -1 && mdev
->state
.role
== R_SECONDARY
)
2651 if (hg
== 1 && mdev
->state
.role
== R_PRIMARY
)
2655 rv
= drbd_asb_recover_0p(mdev
);
2657 case ASB_DISCARD_SECONDARY
:
2658 return mdev
->state
.role
== R_PRIMARY
? 1 : -1;
2659 case ASB_CALL_HELPER
:
2660 hg
= drbd_asb_recover_0p(mdev
);
2661 if (hg
== -1 && mdev
->state
.role
== R_PRIMARY
) {
2662 enum drbd_state_rv rv2
;
2664 drbd_set_role(mdev
, R_SECONDARY
, 0);
2665 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2666 * we might be here in C_WF_REPORT_PARAMS which is transient.
2667 * we do not need to wait for the after state change work either. */
2668 rv2
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2669 if (rv2
!= SS_SUCCESS
) {
2670 drbd_khelper(mdev
, "pri-lost-after-sb");
2672 dev_warn(DEV
, "Successfully gave up primary role.\n");
2682 static int drbd_asb_recover_2p(struct drbd_conf
*mdev
) __must_hold(local
)
2685 enum drbd_after_sb_p after_sb_2p
;
2688 after_sb_2p
= rcu_dereference(mdev
->tconn
->net_conf
)->after_sb_2p
;
2690 switch (after_sb_2p
) {
2691 case ASB_DISCARD_YOUNGER_PRI
:
2692 case ASB_DISCARD_OLDER_PRI
:
2693 case ASB_DISCARD_LEAST_CHG
:
2694 case ASB_DISCARD_LOCAL
:
2695 case ASB_DISCARD_REMOTE
:
2697 case ASB_DISCARD_SECONDARY
:
2698 case ASB_DISCARD_ZERO_CHG
:
2699 dev_err(DEV
, "Configuration error.\n");
2702 rv
= drbd_asb_recover_0p(mdev
);
2704 case ASB_DISCONNECT
:
2706 case ASB_CALL_HELPER
:
2707 hg
= drbd_asb_recover_0p(mdev
);
2709 enum drbd_state_rv rv2
;
2711 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2712 * we might be here in C_WF_REPORT_PARAMS which is transient.
2713 * we do not need to wait for the after state change work either. */
2714 rv2
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2715 if (rv2
!= SS_SUCCESS
) {
2716 drbd_khelper(mdev
, "pri-lost-after-sb");
2718 dev_warn(DEV
, "Successfully gave up primary role.\n");
2728 static void drbd_uuid_dump(struct drbd_conf
*mdev
, char *text
, u64
*uuid
,
2729 u64 bits
, u64 flags
)
2732 dev_info(DEV
, "%s uuid info vanished while I was looking!\n", text
);
2735 dev_info(DEV
, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2737 (unsigned long long)uuid
[UI_CURRENT
],
2738 (unsigned long long)uuid
[UI_BITMAP
],
2739 (unsigned long long)uuid
[UI_HISTORY_START
],
2740 (unsigned long long)uuid
[UI_HISTORY_END
],
2741 (unsigned long long)bits
,
2742 (unsigned long long)flags
);
2746 100 after split brain try auto recover
2747 2 C_SYNC_SOURCE set BitMap
2748 1 C_SYNC_SOURCE use BitMap
2750 -1 C_SYNC_TARGET use BitMap
2751 -2 C_SYNC_TARGET set BitMap
2752 -100 after split brain, disconnect
2753 -1000 unrelated data
2754 -1091 requires proto 91
2755 -1096 requires proto 96
2757 static int drbd_uuid_compare(struct drbd_conf
*mdev
, int *rule_nr
) __must_hold(local
)
2762 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2763 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2766 if (self
== UUID_JUST_CREATED
&& peer
== UUID_JUST_CREATED
)
2770 if ((self
== UUID_JUST_CREATED
|| self
== (u64
)0) &&
2771 peer
!= UUID_JUST_CREATED
)
2775 if (self
!= UUID_JUST_CREATED
&&
2776 (peer
== UUID_JUST_CREATED
|| peer
== (u64
)0))
2780 int rct
, dc
; /* roles at crash time */
2782 if (mdev
->p_uuid
[UI_BITMAP
] == (u64
)0 && mdev
->ldev
->md
.uuid
[UI_BITMAP
] != (u64
)0) {
2784 if (mdev
->tconn
->agreed_pro_version
< 91)
2787 if ((mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1)) &&
2788 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1))) {
2789 dev_info(DEV
, "was SyncSource, missed the resync finished event, corrected myself:\n");
2790 drbd_uuid_move_history(mdev
);
2791 mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] = mdev
->ldev
->md
.uuid
[UI_BITMAP
];
2792 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = 0;
2794 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2795 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2798 dev_info(DEV
, "was SyncSource (peer failed to write sync_uuid)\n");
2805 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
] == (u64
)0 && mdev
->p_uuid
[UI_BITMAP
] != (u64
)0) {
2807 if (mdev
->tconn
->agreed_pro_version
< 91)
2810 if ((mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1)) &&
2811 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1))) {
2812 dev_info(DEV
, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2814 mdev
->p_uuid
[UI_HISTORY_START
+ 1] = mdev
->p_uuid
[UI_HISTORY_START
];
2815 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_BITMAP
];
2816 mdev
->p_uuid
[UI_BITMAP
] = 0UL;
2818 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
, mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2821 dev_info(DEV
, "was SyncTarget (failed to write sync_uuid)\n");
2828 /* Common power [off|failure] */
2829 rct
= (test_bit(CRASHED_PRIMARY
, &mdev
->flags
) ? 1 : 0) +
2830 (mdev
->p_uuid
[UI_FLAGS
] & 2);
2831 /* lowest bit is set when we were primary,
2832 * next bit (weight 2) is set when peer was primary */
2836 case 0: /* !self_pri && !peer_pri */ return 0;
2837 case 1: /* self_pri && !peer_pri */ return 1;
2838 case 2: /* !self_pri && peer_pri */ return -1;
2839 case 3: /* self_pri && peer_pri */
2840 dc
= test_bit(RESOLVE_CONFLICTS
, &mdev
->tconn
->flags
);
2846 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2851 peer
= mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1);
2853 if (mdev
->tconn
->agreed_pro_version
< 96 ?
2854 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) ==
2855 (mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) :
2856 peer
+ UUID_NEW_BM_OFFSET
== (mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1))) {
2857 /* The last P_SYNC_UUID did not get though. Undo the last start of
2858 resync as sync source modifications of the peer's UUIDs. */
2860 if (mdev
->tconn
->agreed_pro_version
< 91)
2863 mdev
->p_uuid
[UI_BITMAP
] = mdev
->p_uuid
[UI_HISTORY_START
];
2864 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_HISTORY_START
+ 1];
2866 dev_info(DEV
, "Lost last syncUUID packet, corrected:\n");
2867 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
, mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2874 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2875 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2876 peer
= mdev
->p_uuid
[i
] & ~((u64
)1);
2882 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2883 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2888 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1);
2890 if (mdev
->tconn
->agreed_pro_version
< 96 ?
2891 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) ==
2892 (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1)) :
2893 self
+ UUID_NEW_BM_OFFSET
== (mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1))) {
2894 /* The last P_SYNC_UUID did not get though. Undo the last start of
2895 resync as sync source modifications of our UUIDs. */
2897 if (mdev
->tconn
->agreed_pro_version
< 91)
2900 __drbd_uuid_set(mdev
, UI_BITMAP
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
]);
2901 __drbd_uuid_set(mdev
, UI_HISTORY_START
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1]);
2903 dev_info(DEV
, "Last syncUUID did not get through, corrected:\n");
2904 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2905 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2913 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2914 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2915 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2921 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2922 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2923 if (self
== peer
&& self
!= ((u64
)0))
2927 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2928 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2929 for (j
= UI_HISTORY_START
; j
<= UI_HISTORY_END
; j
++) {
2930 peer
= mdev
->p_uuid
[j
] & ~((u64
)1);
2939 /* drbd_sync_handshake() returns the new conn state on success, or
2940 CONN_MASK (-1) on failure.
2942 static enum drbd_conns
drbd_sync_handshake(struct drbd_conf
*mdev
, enum drbd_role peer_role
,
2943 enum drbd_disk_state peer_disk
) __must_hold(local
)
2945 enum drbd_conns rv
= C_MASK
;
2946 enum drbd_disk_state mydisk
;
2947 struct net_conf
*nc
;
2948 int hg
, rule_nr
, rr_conflict
, tentative
;
2950 mydisk
= mdev
->state
.disk
;
2951 if (mydisk
== D_NEGOTIATING
)
2952 mydisk
= mdev
->new_state_tmp
.disk
;
2954 dev_info(DEV
, "drbd_sync_handshake:\n");
2956 spin_lock_irq(&mdev
->ldev
->md
.uuid_lock
);
2957 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
, mdev
->comm_bm_set
, 0);
2958 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
,
2959 mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2961 hg
= drbd_uuid_compare(mdev
, &rule_nr
);
2962 spin_unlock_irq(&mdev
->ldev
->md
.uuid_lock
);
2964 dev_info(DEV
, "uuid_compare()=%d by rule %d\n", hg
, rule_nr
);
2967 dev_alert(DEV
, "Unrelated data, aborting!\n");
2971 dev_alert(DEV
, "To resolve this both sides have to support at least protocol %d\n", -hg
- 1000);
2975 if ((mydisk
== D_INCONSISTENT
&& peer_disk
> D_INCONSISTENT
) ||
2976 (peer_disk
== D_INCONSISTENT
&& mydisk
> D_INCONSISTENT
)) {
2977 int f
= (hg
== -100) || abs(hg
) == 2;
2978 hg
= mydisk
> D_INCONSISTENT
? 1 : -1;
2981 dev_info(DEV
, "Becoming sync %s due to disk states.\n",
2982 hg
> 0 ? "source" : "target");
2986 drbd_khelper(mdev
, "initial-split-brain");
2989 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
2991 if (hg
== 100 || (hg
== -100 && nc
->always_asbp
)) {
2992 int pcount
= (mdev
->state
.role
== R_PRIMARY
)
2993 + (peer_role
== R_PRIMARY
);
2994 int forced
= (hg
== -100);
2998 hg
= drbd_asb_recover_0p(mdev
);
3001 hg
= drbd_asb_recover_1p(mdev
);
3004 hg
= drbd_asb_recover_2p(mdev
);
3007 if (abs(hg
) < 100) {
3008 dev_warn(DEV
, "Split-Brain detected, %d primaries, "
3009 "automatically solved. Sync from %s node\n",
3010 pcount
, (hg
< 0) ? "peer" : "this");
3012 dev_warn(DEV
, "Doing a full sync, since"
3013 " UUIDs where ambiguous.\n");
3020 if (test_bit(DISCARD_MY_DATA
, &mdev
->flags
) && !(mdev
->p_uuid
[UI_FLAGS
]&1))
3022 if (!test_bit(DISCARD_MY_DATA
, &mdev
->flags
) && (mdev
->p_uuid
[UI_FLAGS
]&1))
3026 dev_warn(DEV
, "Split-Brain detected, manually solved. "
3027 "Sync from %s node\n",
3028 (hg
< 0) ? "peer" : "this");
3030 rr_conflict
= nc
->rr_conflict
;
3031 tentative
= nc
->tentative
;
3035 /* FIXME this log message is not correct if we end up here
3036 * after an attempted attach on a diskless node.
3037 * We just refuse to attach -- well, we drop the "connection"
3038 * to that disk, in a way... */
3039 dev_alert(DEV
, "Split-Brain detected but unresolved, dropping connection!\n");
3040 drbd_khelper(mdev
, "split-brain");
3044 if (hg
> 0 && mydisk
<= D_INCONSISTENT
) {
3045 dev_err(DEV
, "I shall become SyncSource, but I am inconsistent!\n");
3049 if (hg
< 0 && /* by intention we do not use mydisk here. */
3050 mdev
->state
.role
== R_PRIMARY
&& mdev
->state
.disk
>= D_CONSISTENT
) {
3051 switch (rr_conflict
) {
3052 case ASB_CALL_HELPER
:
3053 drbd_khelper(mdev
, "pri-lost");
3055 case ASB_DISCONNECT
:
3056 dev_err(DEV
, "I shall become SyncTarget, but I am primary!\n");
3059 dev_warn(DEV
, "Becoming SyncTarget, violating the stable-data"
3064 if (tentative
|| test_bit(CONN_DRY_RUN
, &mdev
->tconn
->flags
)) {
3066 dev_info(DEV
, "dry-run connect: No resync, would become Connected immediately.\n");
3068 dev_info(DEV
, "dry-run connect: Would become %s, doing a %s resync.",
3069 drbd_conn_str(hg
> 0 ? C_SYNC_SOURCE
: C_SYNC_TARGET
),
3070 abs(hg
) >= 2 ? "full" : "bit-map based");
3075 dev_info(DEV
, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3076 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_n_write
, "set_n_write from sync_handshake",
3077 BM_LOCKED_SET_ALLOWED
))
3081 if (hg
> 0) { /* become sync source. */
3083 } else if (hg
< 0) { /* become sync target */
3087 if (drbd_bm_total_weight(mdev
)) {
3088 dev_info(DEV
, "No resync, but %lu bits in bitmap!\n",
3089 drbd_bm_total_weight(mdev
));
3096 static enum drbd_after_sb_p
convert_after_sb(enum drbd_after_sb_p peer
)
3098 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
3099 if (peer
== ASB_DISCARD_REMOTE
)
3100 return ASB_DISCARD_LOCAL
;
3102 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
3103 if (peer
== ASB_DISCARD_LOCAL
)
3104 return ASB_DISCARD_REMOTE
;
3106 /* everything else is valid if they are equal on both sides. */
3110 static int receive_protocol(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
3112 struct p_protocol
*p
= pi
->data
;
3113 enum drbd_after_sb_p p_after_sb_0p
, p_after_sb_1p
, p_after_sb_2p
;
3114 int p_proto
, p_discard_my_data
, p_two_primaries
, cf
;
3115 struct net_conf
*nc
, *old_net_conf
, *new_net_conf
= NULL
;
3116 char integrity_alg
[SHARED_SECRET_MAX
] = "";
3117 struct crypto_hash
*peer_integrity_tfm
= NULL
;
3118 void *int_dig_in
= NULL
, *int_dig_vv
= NULL
;
3120 p_proto
= be32_to_cpu(p
->protocol
);
3121 p_after_sb_0p
= be32_to_cpu(p
->after_sb_0p
);
3122 p_after_sb_1p
= be32_to_cpu(p
->after_sb_1p
);
3123 p_after_sb_2p
= be32_to_cpu(p
->after_sb_2p
);
3124 p_two_primaries
= be32_to_cpu(p
->two_primaries
);
3125 cf
= be32_to_cpu(p
->conn_flags
);
3126 p_discard_my_data
= cf
& CF_DISCARD_MY_DATA
;
3128 if (tconn
->agreed_pro_version
>= 87) {
3131 if (pi
->size
> sizeof(integrity_alg
))
3133 err
= drbd_recv_all(tconn
, integrity_alg
, pi
->size
);
3136 integrity_alg
[SHARED_SECRET_MAX
- 1] = 0;
3139 if (pi
->cmd
!= P_PROTOCOL_UPDATE
) {
3140 clear_bit(CONN_DRY_RUN
, &tconn
->flags
);
3142 if (cf
& CF_DRY_RUN
)
3143 set_bit(CONN_DRY_RUN
, &tconn
->flags
);
3146 nc
= rcu_dereference(tconn
->net_conf
);
3148 if (p_proto
!= nc
->wire_protocol
) {
3149 conn_err(tconn
, "incompatible %s settings\n", "protocol");
3150 goto disconnect_rcu_unlock
;
3153 if (convert_after_sb(p_after_sb_0p
) != nc
->after_sb_0p
) {
3154 conn_err(tconn
, "incompatible %s settings\n", "after-sb-0pri");
3155 goto disconnect_rcu_unlock
;
3158 if (convert_after_sb(p_after_sb_1p
) != nc
->after_sb_1p
) {
3159 conn_err(tconn
, "incompatible %s settings\n", "after-sb-1pri");
3160 goto disconnect_rcu_unlock
;
3163 if (convert_after_sb(p_after_sb_2p
) != nc
->after_sb_2p
) {
3164 conn_err(tconn
, "incompatible %s settings\n", "after-sb-2pri");
3165 goto disconnect_rcu_unlock
;
3168 if (p_discard_my_data
&& nc
->discard_my_data
) {
3169 conn_err(tconn
, "incompatible %s settings\n", "discard-my-data");
3170 goto disconnect_rcu_unlock
;
3173 if (p_two_primaries
!= nc
->two_primaries
) {
3174 conn_err(tconn
, "incompatible %s settings\n", "allow-two-primaries");
3175 goto disconnect_rcu_unlock
;
3178 if (strcmp(integrity_alg
, nc
->integrity_alg
)) {
3179 conn_err(tconn
, "incompatible %s settings\n", "data-integrity-alg");
3180 goto disconnect_rcu_unlock
;
3186 if (integrity_alg
[0]) {
3190 * We can only change the peer data integrity algorithm
3191 * here. Changing our own data integrity algorithm
3192 * requires that we send a P_PROTOCOL_UPDATE packet at
3193 * the same time; otherwise, the peer has no way to
3194 * tell between which packets the algorithm should
3198 peer_integrity_tfm
= crypto_alloc_hash(integrity_alg
, 0, CRYPTO_ALG_ASYNC
);
3199 if (!peer_integrity_tfm
) {
3200 conn_err(tconn
, "peer data-integrity-alg %s not supported\n",
3205 hash_size
= crypto_hash_digestsize(peer_integrity_tfm
);
3206 int_dig_in
= kmalloc(hash_size
, GFP_KERNEL
);
3207 int_dig_vv
= kmalloc(hash_size
, GFP_KERNEL
);
3208 if (!(int_dig_in
&& int_dig_vv
)) {
3209 conn_err(tconn
, "Allocation of buffers for data integrity checking failed\n");
3214 new_net_conf
= kmalloc(sizeof(struct net_conf
), GFP_KERNEL
);
3215 if (!new_net_conf
) {
3216 conn_err(tconn
, "Allocation of new net_conf failed\n");
3220 mutex_lock(&tconn
->data
.mutex
);
3221 mutex_lock(&tconn
->conf_update
);
3222 old_net_conf
= tconn
->net_conf
;
3223 *new_net_conf
= *old_net_conf
;
3225 new_net_conf
->wire_protocol
= p_proto
;
3226 new_net_conf
->after_sb_0p
= convert_after_sb(p_after_sb_0p
);
3227 new_net_conf
->after_sb_1p
= convert_after_sb(p_after_sb_1p
);
3228 new_net_conf
->after_sb_2p
= convert_after_sb(p_after_sb_2p
);
3229 new_net_conf
->two_primaries
= p_two_primaries
;
3231 rcu_assign_pointer(tconn
->net_conf
, new_net_conf
);
3232 mutex_unlock(&tconn
->conf_update
);
3233 mutex_unlock(&tconn
->data
.mutex
);
3235 crypto_free_hash(tconn
->peer_integrity_tfm
);
3236 kfree(tconn
->int_dig_in
);
3237 kfree(tconn
->int_dig_vv
);
3238 tconn
->peer_integrity_tfm
= peer_integrity_tfm
;
3239 tconn
->int_dig_in
= int_dig_in
;
3240 tconn
->int_dig_vv
= int_dig_vv
;
3242 if (strcmp(old_net_conf
->integrity_alg
, integrity_alg
))
3243 conn_info(tconn
, "peer data-integrity-alg: %s\n",
3244 integrity_alg
[0] ? integrity_alg
: "(none)");
3247 kfree(old_net_conf
);
3250 disconnect_rcu_unlock
:
3253 crypto_free_hash(peer_integrity_tfm
);
3256 conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
3261 * input: alg name, feature name
3262 * return: NULL (alg name was "")
3263 * ERR_PTR(error) if something goes wrong
3264 * or the crypto hash ptr, if it worked out ok. */
3265 struct crypto_hash
*drbd_crypto_alloc_digest_safe(const struct drbd_conf
*mdev
,
3266 const char *alg
, const char *name
)
3268 struct crypto_hash
*tfm
;
3273 tfm
= crypto_alloc_hash(alg
, 0, CRYPTO_ALG_ASYNC
);
3275 dev_err(DEV
, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3276 alg
, name
, PTR_ERR(tfm
));
3282 static int ignore_remaining_packet(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
3284 void *buffer
= tconn
->data
.rbuf
;
3285 int size
= pi
->size
;
3288 int s
= min_t(int, size
, DRBD_SOCKET_BUFFER_SIZE
);
3289 s
= drbd_recv(tconn
, buffer
, s
);
3303 * config_unknown_volume - device configuration command for unknown volume
3305 * When a device is added to an existing connection, the node on which the
3306 * device is added first will send configuration commands to its peer but the
3307 * peer will not know about the device yet. It will warn and ignore these
3308 * commands. Once the device is added on the second node, the second node will
3309 * send the same device configuration commands, but in the other direction.
3311 * (We can also end up here if drbd is misconfigured.)
3313 static int config_unknown_volume(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
3315 conn_warn(tconn
, "%s packet received for volume %u, which is not configured locally\n",
3316 cmdname(pi
->cmd
), pi
->vnr
);
3317 return ignore_remaining_packet(tconn
, pi
);
3320 static int receive_SyncParam(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
3322 struct drbd_conf
*mdev
;
3323 struct p_rs_param_95
*p
;
3324 unsigned int header_size
, data_size
, exp_max_sz
;
3325 struct crypto_hash
*verify_tfm
= NULL
;
3326 struct crypto_hash
*csums_tfm
= NULL
;
3327 struct net_conf
*old_net_conf
, *new_net_conf
= NULL
;
3328 struct disk_conf
*old_disk_conf
= NULL
, *new_disk_conf
= NULL
;
3329 const int apv
= tconn
->agreed_pro_version
;
3330 struct fifo_buffer
*old_plan
= NULL
, *new_plan
= NULL
;
3334 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
3336 return config_unknown_volume(tconn
, pi
);
3338 exp_max_sz
= apv
<= 87 ? sizeof(struct p_rs_param
)
3339 : apv
== 88 ? sizeof(struct p_rs_param
)
3341 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
3342 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
3344 if (pi
->size
> exp_max_sz
) {
3345 dev_err(DEV
, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3346 pi
->size
, exp_max_sz
);
3351 header_size
= sizeof(struct p_rs_param
);
3352 data_size
= pi
->size
- header_size
;
3353 } else if (apv
<= 94) {
3354 header_size
= sizeof(struct p_rs_param_89
);
3355 data_size
= pi
->size
- header_size
;
3356 D_ASSERT(data_size
== 0);
3358 header_size
= sizeof(struct p_rs_param_95
);
3359 data_size
= pi
->size
- header_size
;
3360 D_ASSERT(data_size
== 0);
3363 /* initialize verify_alg and csums_alg */
3365 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
3367 err
= drbd_recv_all(mdev
->tconn
, p
, header_size
);
3371 mutex_lock(&mdev
->tconn
->conf_update
);
3372 old_net_conf
= mdev
->tconn
->net_conf
;
3373 if (get_ldev(mdev
)) {
3374 new_disk_conf
= kzalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
3375 if (!new_disk_conf
) {
3377 mutex_unlock(&mdev
->tconn
->conf_update
);
3378 dev_err(DEV
, "Allocation of new disk_conf failed\n");
3382 old_disk_conf
= mdev
->ldev
->disk_conf
;
3383 *new_disk_conf
= *old_disk_conf
;
3385 new_disk_conf
->resync_rate
= be32_to_cpu(p
->resync_rate
);
3390 if (data_size
> SHARED_SECRET_MAX
|| data_size
== 0) {
3391 dev_err(DEV
, "verify-alg of wrong size, "
3392 "peer wants %u, accepting only up to %u byte\n",
3393 data_size
, SHARED_SECRET_MAX
);
3398 err
= drbd_recv_all(mdev
->tconn
, p
->verify_alg
, data_size
);
3401 /* we expect NUL terminated string */
3402 /* but just in case someone tries to be evil */
3403 D_ASSERT(p
->verify_alg
[data_size
-1] == 0);
3404 p
->verify_alg
[data_size
-1] = 0;
3406 } else /* apv >= 89 */ {
3407 /* we still expect NUL terminated strings */
3408 /* but just in case someone tries to be evil */
3409 D_ASSERT(p
->verify_alg
[SHARED_SECRET_MAX
-1] == 0);
3410 D_ASSERT(p
->csums_alg
[SHARED_SECRET_MAX
-1] == 0);
3411 p
->verify_alg
[SHARED_SECRET_MAX
-1] = 0;
3412 p
->csums_alg
[SHARED_SECRET_MAX
-1] = 0;
3415 if (strcmp(old_net_conf
->verify_alg
, p
->verify_alg
)) {
3416 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
3417 dev_err(DEV
, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3418 old_net_conf
->verify_alg
, p
->verify_alg
);
3421 verify_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
3422 p
->verify_alg
, "verify-alg");
3423 if (IS_ERR(verify_tfm
)) {
3429 if (apv
>= 89 && strcmp(old_net_conf
->csums_alg
, p
->csums_alg
)) {
3430 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
3431 dev_err(DEV
, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3432 old_net_conf
->csums_alg
, p
->csums_alg
);
3435 csums_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
3436 p
->csums_alg
, "csums-alg");
3437 if (IS_ERR(csums_tfm
)) {
3443 if (apv
> 94 && new_disk_conf
) {
3444 new_disk_conf
->c_plan_ahead
= be32_to_cpu(p
->c_plan_ahead
);
3445 new_disk_conf
->c_delay_target
= be32_to_cpu(p
->c_delay_target
);
3446 new_disk_conf
->c_fill_target
= be32_to_cpu(p
->c_fill_target
);
3447 new_disk_conf
->c_max_rate
= be32_to_cpu(p
->c_max_rate
);
3449 fifo_size
= (new_disk_conf
->c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
3450 if (fifo_size
!= mdev
->rs_plan_s
->size
) {
3451 new_plan
= fifo_alloc(fifo_size
);
3453 dev_err(DEV
, "kmalloc of fifo_buffer failed");
3460 if (verify_tfm
|| csums_tfm
) {
3461 new_net_conf
= kzalloc(sizeof(struct net_conf
), GFP_KERNEL
);
3462 if (!new_net_conf
) {
3463 dev_err(DEV
, "Allocation of new net_conf failed\n");
3467 *new_net_conf
= *old_net_conf
;
3470 strcpy(new_net_conf
->verify_alg
, p
->verify_alg
);
3471 new_net_conf
->verify_alg_len
= strlen(p
->verify_alg
) + 1;
3472 crypto_free_hash(mdev
->tconn
->verify_tfm
);
3473 mdev
->tconn
->verify_tfm
= verify_tfm
;
3474 dev_info(DEV
, "using verify-alg: \"%s\"\n", p
->verify_alg
);
3477 strcpy(new_net_conf
->csums_alg
, p
->csums_alg
);
3478 new_net_conf
->csums_alg_len
= strlen(p
->csums_alg
) + 1;
3479 crypto_free_hash(mdev
->tconn
->csums_tfm
);
3480 mdev
->tconn
->csums_tfm
= csums_tfm
;
3481 dev_info(DEV
, "using csums-alg: \"%s\"\n", p
->csums_alg
);
3483 rcu_assign_pointer(tconn
->net_conf
, new_net_conf
);
3487 if (new_disk_conf
) {
3488 rcu_assign_pointer(mdev
->ldev
->disk_conf
, new_disk_conf
);
3493 old_plan
= mdev
->rs_plan_s
;
3494 rcu_assign_pointer(mdev
->rs_plan_s
, new_plan
);
3497 mutex_unlock(&mdev
->tconn
->conf_update
);
3500 kfree(old_net_conf
);
3501 kfree(old_disk_conf
);
3507 if (new_disk_conf
) {
3509 kfree(new_disk_conf
);
3511 mutex_unlock(&mdev
->tconn
->conf_update
);
3516 if (new_disk_conf
) {
3518 kfree(new_disk_conf
);
3520 mutex_unlock(&mdev
->tconn
->conf_update
);
3521 /* just for completeness: actually not needed,
3522 * as this is not reached if csums_tfm was ok. */
3523 crypto_free_hash(csums_tfm
);
3524 /* but free the verify_tfm again, if csums_tfm did not work out */
3525 crypto_free_hash(verify_tfm
);
3526 conn_request_state(mdev
->tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
3530 /* warn if the arguments differ by more than 12.5% */
3531 static void warn_if_differ_considerably(struct drbd_conf
*mdev
,
3532 const char *s
, sector_t a
, sector_t b
)
3535 if (a
== 0 || b
== 0)
3537 d
= (a
> b
) ? (a
- b
) : (b
- a
);
3538 if (d
> (a
>>3) || d
> (b
>>3))
3539 dev_warn(DEV
, "Considerable difference in %s: %llus vs. %llus\n", s
,
3540 (unsigned long long)a
, (unsigned long long)b
);
3543 static int receive_sizes(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
3545 struct drbd_conf
*mdev
;
3546 struct p_sizes
*p
= pi
->data
;
3547 enum determine_dev_size dd
= unchanged
;
3548 sector_t p_size
, p_usize
, my_usize
;
3549 int ldsc
= 0; /* local disk size changed */
3550 enum dds_flags ddsf
;
3552 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
3554 return config_unknown_volume(tconn
, pi
);
3556 p_size
= be64_to_cpu(p
->d_size
);
3557 p_usize
= be64_to_cpu(p
->u_size
);
3559 /* just store the peer's disk size for now.
3560 * we still need to figure out whether we accept that. */
3561 mdev
->p_size
= p_size
;
3563 if (get_ldev(mdev
)) {
3565 my_usize
= rcu_dereference(mdev
->ldev
->disk_conf
)->disk_size
;
3568 warn_if_differ_considerably(mdev
, "lower level device sizes",
3569 p_size
, drbd_get_max_capacity(mdev
->ldev
));
3570 warn_if_differ_considerably(mdev
, "user requested size",
3573 /* if this is the first connect, or an otherwise expected
3574 * param exchange, choose the minimum */
3575 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
)
3576 p_usize
= min_not_zero(my_usize
, p_usize
);
3578 /* Never shrink a device with usable data during connect.
3579 But allow online shrinking if we are connected. */
3580 if (drbd_new_dev_size(mdev
, mdev
->ldev
, p_usize
, 0) <
3581 drbd_get_capacity(mdev
->this_bdev
) &&
3582 mdev
->state
.disk
>= D_OUTDATED
&&
3583 mdev
->state
.conn
< C_CONNECTED
) {
3584 dev_err(DEV
, "The peer's disk size is too small!\n");
3585 conn_request_state(mdev
->tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
3590 if (my_usize
!= p_usize
) {
3591 struct disk_conf
*old_disk_conf
, *new_disk_conf
= NULL
;
3593 new_disk_conf
= kzalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
3594 if (!new_disk_conf
) {
3595 dev_err(DEV
, "Allocation of new disk_conf failed\n");
3600 mutex_lock(&mdev
->tconn
->conf_update
);
3601 old_disk_conf
= mdev
->ldev
->disk_conf
;
3602 *new_disk_conf
= *old_disk_conf
;
3603 new_disk_conf
->disk_size
= p_usize
;
3605 rcu_assign_pointer(mdev
->ldev
->disk_conf
, new_disk_conf
);
3606 mutex_unlock(&mdev
->tconn
->conf_update
);
3608 kfree(old_disk_conf
);
3610 dev_info(DEV
, "Peer sets u_size to %lu sectors\n",
3611 (unsigned long)my_usize
);
3617 ddsf
= be16_to_cpu(p
->dds_flags
);
3618 if (get_ldev(mdev
)) {
3619 dd
= drbd_determine_dev_size(mdev
, ddsf
);
3621 if (dd
== dev_size_error
)
3625 /* I am diskless, need to accept the peer's size. */
3626 drbd_set_my_capacity(mdev
, p_size
);
3629 mdev
->peer_max_bio_size
= be32_to_cpu(p
->max_bio_size
);
3630 drbd_reconsider_max_bio_size(mdev
);
3632 if (get_ldev(mdev
)) {
3633 if (mdev
->ldev
->known_size
!= drbd_get_capacity(mdev
->ldev
->backing_bdev
)) {
3634 mdev
->ldev
->known_size
= drbd_get_capacity(mdev
->ldev
->backing_bdev
);
3641 if (mdev
->state
.conn
> C_WF_REPORT_PARAMS
) {
3642 if (be64_to_cpu(p
->c_size
) !=
3643 drbd_get_capacity(mdev
->this_bdev
) || ldsc
) {
3644 /* we have different sizes, probably peer
3645 * needs to know my new size... */
3646 drbd_send_sizes(mdev
, 0, ddsf
);
3648 if (test_and_clear_bit(RESIZE_PENDING
, &mdev
->flags
) ||
3649 (dd
== grew
&& mdev
->state
.conn
== C_CONNECTED
)) {
3650 if (mdev
->state
.pdsk
>= D_INCONSISTENT
&&
3651 mdev
->state
.disk
>= D_INCONSISTENT
) {
3652 if (ddsf
& DDSF_NO_RESYNC
)
3653 dev_info(DEV
, "Resync of new storage suppressed with --assume-clean\n");
3655 resync_after_online_grow(mdev
);
3657 set_bit(RESYNC_AFTER_NEG
, &mdev
->flags
);
3664 static int receive_uuids(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
3666 struct drbd_conf
*mdev
;
3667 struct p_uuids
*p
= pi
->data
;
3669 int i
, updated_uuids
= 0;
3671 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
3673 return config_unknown_volume(tconn
, pi
);
3675 p_uuid
= kmalloc(sizeof(u64
)*UI_EXTENDED_SIZE
, GFP_NOIO
);
3677 dev_err(DEV
, "kmalloc of p_uuid failed\n");
3681 for (i
= UI_CURRENT
; i
< UI_EXTENDED_SIZE
; i
++)
3682 p_uuid
[i
] = be64_to_cpu(p
->uuid
[i
]);
3684 kfree(mdev
->p_uuid
);
3685 mdev
->p_uuid
= p_uuid
;
3687 if (mdev
->state
.conn
< C_CONNECTED
&&
3688 mdev
->state
.disk
< D_INCONSISTENT
&&
3689 mdev
->state
.role
== R_PRIMARY
&&
3690 (mdev
->ed_uuid
& ~((u64
)1)) != (p_uuid
[UI_CURRENT
] & ~((u64
)1))) {
3691 dev_err(DEV
, "Can only connect to data with current UUID=%016llX\n",
3692 (unsigned long long)mdev
->ed_uuid
);
3693 conn_request_state(mdev
->tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
3697 if (get_ldev(mdev
)) {
3698 int skip_initial_sync
=
3699 mdev
->state
.conn
== C_CONNECTED
&&
3700 mdev
->tconn
->agreed_pro_version
>= 90 &&
3701 mdev
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&&
3702 (p_uuid
[UI_FLAGS
] & 8);
3703 if (skip_initial_sync
) {
3704 dev_info(DEV
, "Accepted new current UUID, preparing to skip initial sync\n");
3705 drbd_bitmap_io(mdev
, &drbd_bmio_clear_n_write
,
3706 "clear_n_write from receive_uuids",
3707 BM_LOCKED_TEST_ALLOWED
);
3708 _drbd_uuid_set(mdev
, UI_CURRENT
, p_uuid
[UI_CURRENT
]);
3709 _drbd_uuid_set(mdev
, UI_BITMAP
, 0);
3710 _drbd_set_state(_NS2(mdev
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
3716 } else if (mdev
->state
.disk
< D_INCONSISTENT
&&
3717 mdev
->state
.role
== R_PRIMARY
) {
3718 /* I am a diskless primary, the peer just created a new current UUID
3720 updated_uuids
= drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3723 /* Before we test for the disk state, we should wait until an eventually
3724 ongoing cluster wide state change is finished. That is important if
3725 we are primary and are detaching from our disk. We need to see the
3726 new disk state... */
3727 mutex_lock(mdev
->state_mutex
);
3728 mutex_unlock(mdev
->state_mutex
);
3729 if (mdev
->state
.conn
>= C_CONNECTED
&& mdev
->state
.disk
< D_INCONSISTENT
)
3730 updated_uuids
|= drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3733 drbd_print_uuids(mdev
, "receiver updated UUIDs to");
3739 * convert_state() - Converts the peer's view of the cluster state to our point of view
3740 * @ps: The state as seen by the peer.
3742 static union drbd_state
convert_state(union drbd_state ps
)
3744 union drbd_state ms
;
3746 static enum drbd_conns c_tab
[] = {
3747 [C_WF_REPORT_PARAMS
] = C_WF_REPORT_PARAMS
,
3748 [C_CONNECTED
] = C_CONNECTED
,
3750 [C_STARTING_SYNC_S
] = C_STARTING_SYNC_T
,
3751 [C_STARTING_SYNC_T
] = C_STARTING_SYNC_S
,
3752 [C_DISCONNECTING
] = C_TEAR_DOWN
, /* C_NETWORK_FAILURE, */
3753 [C_VERIFY_S
] = C_VERIFY_T
,
3759 ms
.conn
= c_tab
[ps
.conn
];
3764 ms
.peer_isp
= (ps
.aftr_isp
| ps
.user_isp
);
3769 static int receive_req_state(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
3771 struct drbd_conf
*mdev
;
3772 struct p_req_state
*p
= pi
->data
;
3773 union drbd_state mask
, val
;
3774 enum drbd_state_rv rv
;
3776 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
3780 mask
.i
= be32_to_cpu(p
->mask
);
3781 val
.i
= be32_to_cpu(p
->val
);
3783 if (test_bit(RESOLVE_CONFLICTS
, &mdev
->tconn
->flags
) &&
3784 mutex_is_locked(mdev
->state_mutex
)) {
3785 drbd_send_sr_reply(mdev
, SS_CONCURRENT_ST_CHG
);
3789 mask
= convert_state(mask
);
3790 val
= convert_state(val
);
3792 rv
= drbd_change_state(mdev
, CS_VERBOSE
, mask
, val
);
3793 drbd_send_sr_reply(mdev
, rv
);
3800 static int receive_req_conn_state(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
3802 struct p_req_state
*p
= pi
->data
;
3803 union drbd_state mask
, val
;
3804 enum drbd_state_rv rv
;
3806 mask
.i
= be32_to_cpu(p
->mask
);
3807 val
.i
= be32_to_cpu(p
->val
);
3809 if (test_bit(RESOLVE_CONFLICTS
, &tconn
->flags
) &&
3810 mutex_is_locked(&tconn
->cstate_mutex
)) {
3811 conn_send_sr_reply(tconn
, SS_CONCURRENT_ST_CHG
);
3815 mask
= convert_state(mask
);
3816 val
= convert_state(val
);
3818 rv
= conn_request_state(tconn
, mask
, val
, CS_VERBOSE
| CS_LOCAL_ONLY
| CS_IGN_OUTD_FAIL
);
3819 conn_send_sr_reply(tconn
, rv
);
3824 static int receive_state(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
3826 struct drbd_conf
*mdev
;
3827 struct p_state
*p
= pi
->data
;
3828 union drbd_state os
, ns
, peer_state
;
3829 enum drbd_disk_state real_peer_disk
;
3830 enum chg_state_flags cs_flags
;
3833 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
3835 return config_unknown_volume(tconn
, pi
);
3837 peer_state
.i
= be32_to_cpu(p
->state
);
3839 real_peer_disk
= peer_state
.disk
;
3840 if (peer_state
.disk
== D_NEGOTIATING
) {
3841 real_peer_disk
= mdev
->p_uuid
[UI_FLAGS
] & 4 ? D_INCONSISTENT
: D_CONSISTENT
;
3842 dev_info(DEV
, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk
));
3845 spin_lock_irq(&mdev
->tconn
->req_lock
);
3847 os
= ns
= drbd_read_state(mdev
);
3848 spin_unlock_irq(&mdev
->tconn
->req_lock
);
3850 /* If some other part of the code (asender thread, timeout)
3851 * already decided to close the connection again,
3852 * we must not "re-establish" it here. */
3853 if (os
.conn
<= C_TEAR_DOWN
)
3856 /* If this is the "end of sync" confirmation, usually the peer disk
3857 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3858 * set) resync started in PausedSyncT, or if the timing of pause-/
3859 * unpause-sync events has been "just right", the peer disk may
3860 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3862 if ((os
.pdsk
== D_INCONSISTENT
|| os
.pdsk
== D_CONSISTENT
) &&
3863 real_peer_disk
== D_UP_TO_DATE
&&
3864 os
.conn
> C_CONNECTED
&& os
.disk
== D_UP_TO_DATE
) {
3865 /* If we are (becoming) SyncSource, but peer is still in sync
3866 * preparation, ignore its uptodate-ness to avoid flapping, it
3867 * will change to inconsistent once the peer reaches active
3869 * It may have changed syncer-paused flags, however, so we
3870 * cannot ignore this completely. */
3871 if (peer_state
.conn
> C_CONNECTED
&&
3872 peer_state
.conn
< C_SYNC_SOURCE
)
3873 real_peer_disk
= D_INCONSISTENT
;
3875 /* if peer_state changes to connected at the same time,
3876 * it explicitly notifies us that it finished resync.
3877 * Maybe we should finish it up, too? */
3878 else if (os
.conn
>= C_SYNC_SOURCE
&&
3879 peer_state
.conn
== C_CONNECTED
) {
3880 if (drbd_bm_total_weight(mdev
) <= mdev
->rs_failed
)
3881 drbd_resync_finished(mdev
);
3886 /* explicit verify finished notification, stop sector reached. */
3887 if (os
.conn
== C_VERIFY_T
&& os
.disk
== D_UP_TO_DATE
&&
3888 peer_state
.conn
== C_CONNECTED
&& real_peer_disk
== D_UP_TO_DATE
) {
3889 ov_out_of_sync_print(mdev
);
3890 drbd_resync_finished(mdev
);
3894 /* peer says his disk is inconsistent, while we think it is uptodate,
3895 * and this happens while the peer still thinks we have a sync going on,
3896 * but we think we are already done with the sync.
3897 * We ignore this to avoid flapping pdsk.
3898 * This should not happen, if the peer is a recent version of drbd. */
3899 if (os
.pdsk
== D_UP_TO_DATE
&& real_peer_disk
== D_INCONSISTENT
&&
3900 os
.conn
== C_CONNECTED
&& peer_state
.conn
> C_SYNC_SOURCE
)
3901 real_peer_disk
= D_UP_TO_DATE
;
3903 if (ns
.conn
== C_WF_REPORT_PARAMS
)
3904 ns
.conn
= C_CONNECTED
;
3906 if (peer_state
.conn
== C_AHEAD
)
3909 if (mdev
->p_uuid
&& peer_state
.disk
>= D_NEGOTIATING
&&
3910 get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3911 int cr
; /* consider resync */
3913 /* if we established a new connection */
3914 cr
= (os
.conn
< C_CONNECTED
);
3915 /* if we had an established connection
3916 * and one of the nodes newly attaches a disk */
3917 cr
|= (os
.conn
== C_CONNECTED
&&
3918 (peer_state
.disk
== D_NEGOTIATING
||
3919 os
.disk
== D_NEGOTIATING
));
3920 /* if we have both been inconsistent, and the peer has been
3921 * forced to be UpToDate with --overwrite-data */
3922 cr
|= test_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3923 /* if we had been plain connected, and the admin requested to
3924 * start a sync by "invalidate" or "invalidate-remote" */
3925 cr
|= (os
.conn
== C_CONNECTED
&&
3926 (peer_state
.conn
>= C_STARTING_SYNC_S
&&
3927 peer_state
.conn
<= C_WF_BITMAP_T
));
3930 ns
.conn
= drbd_sync_handshake(mdev
, peer_state
.role
, real_peer_disk
);
3933 if (ns
.conn
== C_MASK
) {
3934 ns
.conn
= C_CONNECTED
;
3935 if (mdev
->state
.disk
== D_NEGOTIATING
) {
3936 drbd_force_state(mdev
, NS(disk
, D_FAILED
));
3937 } else if (peer_state
.disk
== D_NEGOTIATING
) {
3938 dev_err(DEV
, "Disk attach process on the peer node was aborted.\n");
3939 peer_state
.disk
= D_DISKLESS
;
3940 real_peer_disk
= D_DISKLESS
;
3942 if (test_and_clear_bit(CONN_DRY_RUN
, &mdev
->tconn
->flags
))
3944 D_ASSERT(os
.conn
== C_WF_REPORT_PARAMS
);
3945 conn_request_state(mdev
->tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
3951 spin_lock_irq(&mdev
->tconn
->req_lock
);
3952 if (os
.i
!= drbd_read_state(mdev
).i
)
3954 clear_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3955 ns
.peer
= peer_state
.role
;
3956 ns
.pdsk
= real_peer_disk
;
3957 ns
.peer_isp
= (peer_state
.aftr_isp
| peer_state
.user_isp
);
3958 if ((ns
.conn
== C_CONNECTED
|| ns
.conn
== C_WF_BITMAP_S
) && ns
.disk
== D_NEGOTIATING
)
3959 ns
.disk
= mdev
->new_state_tmp
.disk
;
3960 cs_flags
= CS_VERBOSE
+ (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
? 0 : CS_HARD
);
3961 if (ns
.pdsk
== D_CONSISTENT
&& drbd_suspended(mdev
) && ns
.conn
== C_CONNECTED
&& os
.conn
< C_CONNECTED
&&
3962 test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
3963 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3964 for temporal network outages! */
3965 spin_unlock_irq(&mdev
->tconn
->req_lock
);
3966 dev_err(DEV
, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3967 tl_clear(mdev
->tconn
);
3968 drbd_uuid_new_current(mdev
);
3969 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
3970 conn_request_state(mdev
->tconn
, NS2(conn
, C_PROTOCOL_ERROR
, susp
, 0), CS_HARD
);
3973 rv
= _drbd_set_state(mdev
, ns
, cs_flags
, NULL
);
3974 ns
= drbd_read_state(mdev
);
3975 spin_unlock_irq(&mdev
->tconn
->req_lock
);
3977 if (rv
< SS_SUCCESS
) {
3978 conn_request_state(mdev
->tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
3982 if (os
.conn
> C_WF_REPORT_PARAMS
) {
3983 if (ns
.conn
> C_CONNECTED
&& peer_state
.conn
<= C_CONNECTED
&&
3984 peer_state
.disk
!= D_NEGOTIATING
) {
3985 /* we want resync, peer has not yet decided to sync... */
3986 /* Nowadays only used when forcing a node into primary role and
3987 setting its disk to UpToDate with that */
3988 drbd_send_uuids(mdev
);
3989 drbd_send_current_state(mdev
);
3993 clear_bit(DISCARD_MY_DATA
, &mdev
->flags
);
3995 drbd_md_sync(mdev
); /* update connected indicator, la_size, ... */
4000 static int receive_sync_uuid(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
4002 struct drbd_conf
*mdev
;
4003 struct p_rs_uuid
*p
= pi
->data
;
4005 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
4009 wait_event(mdev
->misc_wait
,
4010 mdev
->state
.conn
== C_WF_SYNC_UUID
||
4011 mdev
->state
.conn
== C_BEHIND
||
4012 mdev
->state
.conn
< C_CONNECTED
||
4013 mdev
->state
.disk
< D_NEGOTIATING
);
4015 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4017 /* Here the _drbd_uuid_ functions are right, current should
4018 _not_ be rotated into the history */
4019 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
4020 _drbd_uuid_set(mdev
, UI_CURRENT
, be64_to_cpu(p
->uuid
));
4021 _drbd_uuid_set(mdev
, UI_BITMAP
, 0UL);
4023 drbd_print_uuids(mdev
, "updated sync uuid");
4024 drbd_start_resync(mdev
, C_SYNC_TARGET
);
4028 dev_err(DEV
, "Ignoring SyncUUID packet!\n");
4034 * receive_bitmap_plain
4036 * Return 0 when done, 1 when another iteration is needed, and a negative error
4037 * code upon failure.
4040 receive_bitmap_plain(struct drbd_conf
*mdev
, unsigned int size
,
4041 unsigned long *p
, struct bm_xfer_ctx
*c
)
4043 unsigned int data_size
= DRBD_SOCKET_BUFFER_SIZE
-
4044 drbd_header_size(mdev
->tconn
);
4045 unsigned int num_words
= min_t(size_t, data_size
/ sizeof(*p
),
4046 c
->bm_words
- c
->word_offset
);
4047 unsigned int want
= num_words
* sizeof(*p
);
4051 dev_err(DEV
, "%s:want (%u) != size (%u)\n", __func__
, want
, size
);
4056 err
= drbd_recv_all(mdev
->tconn
, p
, want
);
4060 drbd_bm_merge_lel(mdev
, c
->word_offset
, num_words
, p
);
4062 c
->word_offset
+= num_words
;
4063 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
4064 if (c
->bit_offset
> c
->bm_bits
)
4065 c
->bit_offset
= c
->bm_bits
;
4070 static enum drbd_bitmap_code
dcbp_get_code(struct p_compressed_bm
*p
)
4072 return (enum drbd_bitmap_code
)(p
->encoding
& 0x0f);
4075 static int dcbp_get_start(struct p_compressed_bm
*p
)
4077 return (p
->encoding
& 0x80) != 0;
4080 static int dcbp_get_pad_bits(struct p_compressed_bm
*p
)
4082 return (p
->encoding
>> 4) & 0x7;
4088 * Return 0 when done, 1 when another iteration is needed, and a negative error
4089 * code upon failure.
4092 recv_bm_rle_bits(struct drbd_conf
*mdev
,
4093 struct p_compressed_bm
*p
,
4094 struct bm_xfer_ctx
*c
,
4097 struct bitstream bs
;
4101 unsigned long s
= c
->bit_offset
;
4103 int toggle
= dcbp_get_start(p
);
4107 bitstream_init(&bs
, p
->code
, len
, dcbp_get_pad_bits(p
));
4109 bits
= bitstream_get_bits(&bs
, &look_ahead
, 64);
4113 for (have
= bits
; have
> 0; s
+= rl
, toggle
= !toggle
) {
4114 bits
= vli_decode_bits(&rl
, look_ahead
);
4120 if (e
>= c
->bm_bits
) {
4121 dev_err(DEV
, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e
);
4124 _drbd_bm_set_bits(mdev
, s
, e
);
4128 dev_err(DEV
, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4129 have
, bits
, look_ahead
,
4130 (unsigned int)(bs
.cur
.b
- p
->code
),
4131 (unsigned int)bs
.buf_len
);
4134 look_ahead
>>= bits
;
4137 bits
= bitstream_get_bits(&bs
, &tmp
, 64 - have
);
4140 look_ahead
|= tmp
<< have
;
4145 bm_xfer_ctx_bit_to_word_offset(c
);
4147 return (s
!= c
->bm_bits
);
4153 * Return 0 when done, 1 when another iteration is needed, and a negative error
4154 * code upon failure.
4157 decode_bitmap_c(struct drbd_conf
*mdev
,
4158 struct p_compressed_bm
*p
,
4159 struct bm_xfer_ctx
*c
,
4162 if (dcbp_get_code(p
) == RLE_VLI_Bits
)
4163 return recv_bm_rle_bits(mdev
, p
, c
, len
- sizeof(*p
));
4165 /* other variants had been implemented for evaluation,
4166 * but have been dropped as this one turned out to be "best"
4167 * during all our tests. */
4169 dev_err(DEV
, "receive_bitmap_c: unknown encoding %u\n", p
->encoding
);
4170 conn_request_state(mdev
->tconn
, NS(conn
, C_PROTOCOL_ERROR
), CS_HARD
);
4174 void INFO_bm_xfer_stats(struct drbd_conf
*mdev
,
4175 const char *direction
, struct bm_xfer_ctx
*c
)
4177 /* what would it take to transfer it "plaintext" */
4178 unsigned int header_size
= drbd_header_size(mdev
->tconn
);
4179 unsigned int data_size
= DRBD_SOCKET_BUFFER_SIZE
- header_size
;
4180 unsigned int plain
=
4181 header_size
* (DIV_ROUND_UP(c
->bm_words
, data_size
) + 1) +
4182 c
->bm_words
* sizeof(unsigned long);
4183 unsigned int total
= c
->bytes
[0] + c
->bytes
[1];
4186 /* total can not be zero. but just in case: */
4190 /* don't report if not compressed */
4194 /* total < plain. check for overflow, still */
4195 r
= (total
> UINT_MAX
/1000) ? (total
/ (plain
/1000))
4196 : (1000 * total
/ plain
);
4202 dev_info(DEV
, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4203 "total %u; compression: %u.%u%%\n",
4205 c
->bytes
[1], c
->packets
[1],
4206 c
->bytes
[0], c
->packets
[0],
4207 total
, r
/10, r
% 10);
4210 /* Since we are processing the bitfield from lower addresses to higher,
4211 it does not matter if the process it in 32 bit chunks or 64 bit
4212 chunks as long as it is little endian. (Understand it as byte stream,
4213 beginning with the lowest byte...) If we would use big endian
4214 we would need to process it from the highest address to the lowest,
4215 in order to be agnostic to the 32 vs 64 bits issue.
4217 returns 0 on failure, 1 if we successfully received it. */
4218 static int receive_bitmap(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
4220 struct drbd_conf
*mdev
;
4221 struct bm_xfer_ctx c
;
4224 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
4228 drbd_bm_lock(mdev
, "receive bitmap", BM_LOCKED_SET_ALLOWED
);
4229 /* you are supposed to send additional out-of-sync information
4230 * if you actually set bits during this phase */
4232 c
= (struct bm_xfer_ctx
) {
4233 .bm_bits
= drbd_bm_bits(mdev
),
4234 .bm_words
= drbd_bm_words(mdev
),
4238 if (pi
->cmd
== P_BITMAP
)
4239 err
= receive_bitmap_plain(mdev
, pi
->size
, pi
->data
, &c
);
4240 else if (pi
->cmd
== P_COMPRESSED_BITMAP
) {
4241 /* MAYBE: sanity check that we speak proto >= 90,
4242 * and the feature is enabled! */
4243 struct p_compressed_bm
*p
= pi
->data
;
4245 if (pi
->size
> DRBD_SOCKET_BUFFER_SIZE
- drbd_header_size(tconn
)) {
4246 dev_err(DEV
, "ReportCBitmap packet too large\n");
4250 if (pi
->size
<= sizeof(*p
)) {
4251 dev_err(DEV
, "ReportCBitmap packet too small (l:%u)\n", pi
->size
);
4255 err
= drbd_recv_all(mdev
->tconn
, p
, pi
->size
);
4258 err
= decode_bitmap_c(mdev
, p
, &c
, pi
->size
);
4260 dev_warn(DEV
, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi
->cmd
);
4265 c
.packets
[pi
->cmd
== P_BITMAP
]++;
4266 c
.bytes
[pi
->cmd
== P_BITMAP
] += drbd_header_size(tconn
) + pi
->size
;
4273 err
= drbd_recv_header(mdev
->tconn
, pi
);
4278 INFO_bm_xfer_stats(mdev
, "receive", &c
);
4280 if (mdev
->state
.conn
== C_WF_BITMAP_T
) {
4281 enum drbd_state_rv rv
;
4283 err
= drbd_send_bitmap(mdev
);
4286 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4287 rv
= _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
4288 D_ASSERT(rv
== SS_SUCCESS
);
4289 } else if (mdev
->state
.conn
!= C_WF_BITMAP_S
) {
4290 /* admin may have requested C_DISCONNECTING,
4291 * other threads may have noticed network errors */
4292 dev_info(DEV
, "unexpected cstate (%s) in receive_bitmap\n",
4293 drbd_conn_str(mdev
->state
.conn
));
4298 drbd_bm_unlock(mdev
);
4299 if (!err
&& mdev
->state
.conn
== C_WF_BITMAP_S
)
4300 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
4304 static int receive_skip(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
4306 conn_warn(tconn
, "skipping unknown optional packet type %d, l: %d!\n",
4309 return ignore_remaining_packet(tconn
, pi
);
4312 static int receive_UnplugRemote(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
4314 /* Make sure we've acked all the TCP data associated
4315 * with the data requests being unplugged */
4316 drbd_tcp_quickack(tconn
->data
.socket
);
4321 static int receive_out_of_sync(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
4323 struct drbd_conf
*mdev
;
4324 struct p_block_desc
*p
= pi
->data
;
4326 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
4330 switch (mdev
->state
.conn
) {
4331 case C_WF_SYNC_UUID
:
4336 dev_err(DEV
, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4337 drbd_conn_str(mdev
->state
.conn
));
4340 drbd_set_out_of_sync(mdev
, be64_to_cpu(p
->sector
), be32_to_cpu(p
->blksize
));
4348 int (*fn
)(struct drbd_tconn
*, struct packet_info
*);
4351 static struct data_cmd drbd_cmd_handler
[] = {
4352 [P_DATA
] = { 1, sizeof(struct p_data
), receive_Data
},
4353 [P_DATA_REPLY
] = { 1, sizeof(struct p_data
), receive_DataReply
},
4354 [P_RS_DATA_REPLY
] = { 1, sizeof(struct p_data
), receive_RSDataReply
} ,
4355 [P_BARRIER
] = { 0, sizeof(struct p_barrier
), receive_Barrier
} ,
4356 [P_BITMAP
] = { 1, 0, receive_bitmap
} ,
4357 [P_COMPRESSED_BITMAP
] = { 1, 0, receive_bitmap
} ,
4358 [P_UNPLUG_REMOTE
] = { 0, 0, receive_UnplugRemote
},
4359 [P_DATA_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
4360 [P_RS_DATA_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
4361 [P_SYNC_PARAM
] = { 1, 0, receive_SyncParam
},
4362 [P_SYNC_PARAM89
] = { 1, 0, receive_SyncParam
},
4363 [P_PROTOCOL
] = { 1, sizeof(struct p_protocol
), receive_protocol
},
4364 [P_UUIDS
] = { 0, sizeof(struct p_uuids
), receive_uuids
},
4365 [P_SIZES
] = { 0, sizeof(struct p_sizes
), receive_sizes
},
4366 [P_STATE
] = { 0, sizeof(struct p_state
), receive_state
},
4367 [P_STATE_CHG_REQ
] = { 0, sizeof(struct p_req_state
), receive_req_state
},
4368 [P_SYNC_UUID
] = { 0, sizeof(struct p_rs_uuid
), receive_sync_uuid
},
4369 [P_OV_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
4370 [P_OV_REPLY
] = { 1, sizeof(struct p_block_req
), receive_DataRequest
},
4371 [P_CSUM_RS_REQUEST
] = { 1, sizeof(struct p_block_req
), receive_DataRequest
},
4372 [P_DELAY_PROBE
] = { 0, sizeof(struct p_delay_probe93
), receive_skip
},
4373 [P_OUT_OF_SYNC
] = { 0, sizeof(struct p_block_desc
), receive_out_of_sync
},
4374 [P_CONN_ST_CHG_REQ
] = { 0, sizeof(struct p_req_state
), receive_req_conn_state
},
4375 [P_PROTOCOL_UPDATE
] = { 1, sizeof(struct p_protocol
), receive_protocol
},
4378 static void drbdd(struct drbd_tconn
*tconn
)
4380 struct packet_info pi
;
4381 size_t shs
; /* sub header size */
4384 while (get_t_state(&tconn
->receiver
) == RUNNING
) {
4385 struct data_cmd
*cmd
;
4387 drbd_thread_current_set_cpu(&tconn
->receiver
);
4388 if (drbd_recv_header(tconn
, &pi
))
4391 cmd
= &drbd_cmd_handler
[pi
.cmd
];
4392 if (unlikely(pi
.cmd
>= ARRAY_SIZE(drbd_cmd_handler
) || !cmd
->fn
)) {
4393 conn_err(tconn
, "Unexpected data packet %s (0x%04x)",
4394 cmdname(pi
.cmd
), pi
.cmd
);
4398 shs
= cmd
->pkt_size
;
4399 if (pi
.size
> shs
&& !cmd
->expect_payload
) {
4400 conn_err(tconn
, "No payload expected %s l:%d\n",
4401 cmdname(pi
.cmd
), pi
.size
);
4406 err
= drbd_recv_all_warn(tconn
, pi
.data
, shs
);
4412 err
= cmd
->fn(tconn
, &pi
);
4414 conn_err(tconn
, "error receiving %s, e: %d l: %d!\n",
4415 cmdname(pi
.cmd
), err
, pi
.size
);
4422 conn_request_state(tconn
, NS(conn
, C_PROTOCOL_ERROR
), CS_HARD
);
4425 void conn_flush_workqueue(struct drbd_tconn
*tconn
)
4427 struct drbd_wq_barrier barr
;
4429 barr
.w
.cb
= w_prev_work_done
;
4430 barr
.w
.tconn
= tconn
;
4431 init_completion(&barr
.done
);
4432 drbd_queue_work(&tconn
->sender_work
, &barr
.w
);
4433 wait_for_completion(&barr
.done
);
4436 static void conn_disconnect(struct drbd_tconn
*tconn
)
4438 struct drbd_conf
*mdev
;
4442 if (tconn
->cstate
== C_STANDALONE
)
4445 /* We are about to start the cleanup after connection loss.
4446 * Make sure drbd_make_request knows about that.
4447 * Usually we should be in some network failure state already,
4448 * but just in case we are not, we fix it up here.
4450 conn_request_state(tconn
, NS(conn
, C_NETWORK_FAILURE
), CS_HARD
);
4452 /* asender does not clean up anything. it must not interfere, either */
4453 drbd_thread_stop(&tconn
->asender
);
4454 drbd_free_sock(tconn
);
4457 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
4458 kref_get(&mdev
->kref
);
4460 drbd_disconnected(mdev
);
4461 kref_put(&mdev
->kref
, &drbd_minor_destroy
);
4466 if (!list_empty(&tconn
->current_epoch
->list
))
4467 conn_err(tconn
, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4468 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4469 atomic_set(&tconn
->current_epoch
->epoch_size
, 0);
4470 tconn
->send
.seen_any_write_yet
= false;
4472 conn_info(tconn
, "Connection closed\n");
4474 if (conn_highest_role(tconn
) == R_PRIMARY
&& conn_highest_pdsk(tconn
) >= D_UNKNOWN
)
4475 conn_try_outdate_peer_async(tconn
);
4477 spin_lock_irq(&tconn
->req_lock
);
4479 if (oc
>= C_UNCONNECTED
)
4480 _conn_request_state(tconn
, NS(conn
, C_UNCONNECTED
), CS_VERBOSE
);
4482 spin_unlock_irq(&tconn
->req_lock
);
4484 if (oc
== C_DISCONNECTING
)
4485 conn_request_state(tconn
, NS(conn
, C_STANDALONE
), CS_VERBOSE
| CS_HARD
);
4488 static int drbd_disconnected(struct drbd_conf
*mdev
)
4492 /* wait for current activity to cease. */
4493 spin_lock_irq(&mdev
->tconn
->req_lock
);
4494 _drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
4495 _drbd_wait_ee_list_empty(mdev
, &mdev
->sync_ee
);
4496 _drbd_wait_ee_list_empty(mdev
, &mdev
->read_ee
);
4497 spin_unlock_irq(&mdev
->tconn
->req_lock
);
4499 /* We do not have data structures that would allow us to
4500 * get the rs_pending_cnt down to 0 again.
4501 * * On C_SYNC_TARGET we do not have any data structures describing
4502 * the pending RSDataRequest's we have sent.
4503 * * On C_SYNC_SOURCE there is no data structure that tracks
4504 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4505 * And no, it is not the sum of the reference counts in the
4506 * resync_LRU. The resync_LRU tracks the whole operation including
4507 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4509 drbd_rs_cancel_all(mdev
);
4511 mdev
->rs_failed
= 0;
4512 atomic_set(&mdev
->rs_pending_cnt
, 0);
4513 wake_up(&mdev
->misc_wait
);
4515 del_timer_sync(&mdev
->resync_timer
);
4516 resync_timer_fn((unsigned long)mdev
);
4518 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4519 * w_make_resync_request etc. which may still be on the worker queue
4520 * to be "canceled" */
4521 drbd_flush_workqueue(mdev
);
4523 drbd_finish_peer_reqs(mdev
);
4525 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4526 might have issued a work again. The one before drbd_finish_peer_reqs() is
4527 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4528 drbd_flush_workqueue(mdev
);
4530 /* need to do it again, drbd_finish_peer_reqs() may have populated it
4531 * again via drbd_try_clear_on_disk_bm(). */
4532 drbd_rs_cancel_all(mdev
);
4534 kfree(mdev
->p_uuid
);
4535 mdev
->p_uuid
= NULL
;
4537 if (!drbd_suspended(mdev
))
4538 tl_clear(mdev
->tconn
);
4542 /* serialize with bitmap writeout triggered by the state change,
4544 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
4546 /* tcp_close and release of sendpage pages can be deferred. I don't
4547 * want to use SO_LINGER, because apparently it can be deferred for
4548 * more than 20 seconds (longest time I checked).
4550 * Actually we don't care for exactly when the network stack does its
4551 * put_page(), but release our reference on these pages right here.
4553 i
= drbd_free_peer_reqs(mdev
, &mdev
->net_ee
);
4555 dev_info(DEV
, "net_ee not empty, killed %u entries\n", i
);
4556 i
= atomic_read(&mdev
->pp_in_use_by_net
);
4558 dev_info(DEV
, "pp_in_use_by_net = %d, expected 0\n", i
);
4559 i
= atomic_read(&mdev
->pp_in_use
);
4561 dev_info(DEV
, "pp_in_use = %d, expected 0\n", i
);
4563 D_ASSERT(list_empty(&mdev
->read_ee
));
4564 D_ASSERT(list_empty(&mdev
->active_ee
));
4565 D_ASSERT(list_empty(&mdev
->sync_ee
));
4566 D_ASSERT(list_empty(&mdev
->done_ee
));
4572 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4573 * we can agree on is stored in agreed_pro_version.
4575 * feature flags and the reserved array should be enough room for future
4576 * enhancements of the handshake protocol, and possible plugins...
4578 * for now, they are expected to be zero, but ignored.
4580 static int drbd_send_features(struct drbd_tconn
*tconn
)
4582 struct drbd_socket
*sock
;
4583 struct p_connection_features
*p
;
4585 sock
= &tconn
->data
;
4586 p
= conn_prepare_command(tconn
, sock
);
4589 memset(p
, 0, sizeof(*p
));
4590 p
->protocol_min
= cpu_to_be32(PRO_VERSION_MIN
);
4591 p
->protocol_max
= cpu_to_be32(PRO_VERSION_MAX
);
4592 return conn_send_command(tconn
, sock
, P_CONNECTION_FEATURES
, sizeof(*p
), NULL
, 0);
4597 * 1 yes, we have a valid connection
4598 * 0 oops, did not work out, please try again
4599 * -1 peer talks different language,
4600 * no point in trying again, please go standalone.
4602 static int drbd_do_features(struct drbd_tconn
*tconn
)
4604 /* ASSERT current == tconn->receiver ... */
4605 struct p_connection_features
*p
;
4606 const int expect
= sizeof(struct p_connection_features
);
4607 struct packet_info pi
;
4610 err
= drbd_send_features(tconn
);
4614 err
= drbd_recv_header(tconn
, &pi
);
4618 if (pi
.cmd
!= P_CONNECTION_FEATURES
) {
4619 conn_err(tconn
, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
4620 cmdname(pi
.cmd
), pi
.cmd
);
4624 if (pi
.size
!= expect
) {
4625 conn_err(tconn
, "expected ConnectionFeatures length: %u, received: %u\n",
4631 err
= drbd_recv_all_warn(tconn
, p
, expect
);
4635 p
->protocol_min
= be32_to_cpu(p
->protocol_min
);
4636 p
->protocol_max
= be32_to_cpu(p
->protocol_max
);
4637 if (p
->protocol_max
== 0)
4638 p
->protocol_max
= p
->protocol_min
;
4640 if (PRO_VERSION_MAX
< p
->protocol_min
||
4641 PRO_VERSION_MIN
> p
->protocol_max
)
4644 tconn
->agreed_pro_version
= min_t(int, PRO_VERSION_MAX
, p
->protocol_max
);
4646 conn_info(tconn
, "Handshake successful: "
4647 "Agreed network protocol version %d\n", tconn
->agreed_pro_version
);
4652 conn_err(tconn
, "incompatible DRBD dialects: "
4653 "I support %d-%d, peer supports %d-%d\n",
4654 PRO_VERSION_MIN
, PRO_VERSION_MAX
,
4655 p
->protocol_min
, p
->protocol_max
);
4659 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4660 static int drbd_do_auth(struct drbd_tconn
*tconn
)
4662 dev_err(DEV
, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4663 dev_err(DEV
, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4667 #define CHALLENGE_LEN 64
4671 0 - failed, try again (network error),
4672 -1 - auth failed, don't try again.
4675 static int drbd_do_auth(struct drbd_tconn
*tconn
)
4677 struct drbd_socket
*sock
;
4678 char my_challenge
[CHALLENGE_LEN
]; /* 64 Bytes... */
4679 struct scatterlist sg
;
4680 char *response
= NULL
;
4681 char *right_response
= NULL
;
4682 char *peers_ch
= NULL
;
4683 unsigned int key_len
;
4684 char secret
[SHARED_SECRET_MAX
]; /* 64 byte */
4685 unsigned int resp_size
;
4686 struct hash_desc desc
;
4687 struct packet_info pi
;
4688 struct net_conf
*nc
;
4691 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
4694 nc
= rcu_dereference(tconn
->net_conf
);
4695 key_len
= strlen(nc
->shared_secret
);
4696 memcpy(secret
, nc
->shared_secret
, key_len
);
4699 desc
.tfm
= tconn
->cram_hmac_tfm
;
4702 rv
= crypto_hash_setkey(tconn
->cram_hmac_tfm
, (u8
*)secret
, key_len
);
4704 conn_err(tconn
, "crypto_hash_setkey() failed with %d\n", rv
);
4709 get_random_bytes(my_challenge
, CHALLENGE_LEN
);
4711 sock
= &tconn
->data
;
4712 if (!conn_prepare_command(tconn
, sock
)) {
4716 rv
= !conn_send_command(tconn
, sock
, P_AUTH_CHALLENGE
, 0,
4717 my_challenge
, CHALLENGE_LEN
);
4721 err
= drbd_recv_header(tconn
, &pi
);
4727 if (pi
.cmd
!= P_AUTH_CHALLENGE
) {
4728 conn_err(tconn
, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4729 cmdname(pi
.cmd
), pi
.cmd
);
4734 if (pi
.size
> CHALLENGE_LEN
* 2) {
4735 conn_err(tconn
, "expected AuthChallenge payload too big.\n");
4740 peers_ch
= kmalloc(pi
.size
, GFP_NOIO
);
4741 if (peers_ch
== NULL
) {
4742 conn_err(tconn
, "kmalloc of peers_ch failed\n");
4747 err
= drbd_recv_all_warn(tconn
, peers_ch
, pi
.size
);
4753 resp_size
= crypto_hash_digestsize(tconn
->cram_hmac_tfm
);
4754 response
= kmalloc(resp_size
, GFP_NOIO
);
4755 if (response
== NULL
) {
4756 conn_err(tconn
, "kmalloc of response failed\n");
4761 sg_init_table(&sg
, 1);
4762 sg_set_buf(&sg
, peers_ch
, pi
.size
);
4764 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, response
);
4766 conn_err(tconn
, "crypto_hash_digest() failed with %d\n", rv
);
4771 if (!conn_prepare_command(tconn
, sock
)) {
4775 rv
= !conn_send_command(tconn
, sock
, P_AUTH_RESPONSE
, 0,
4776 response
, resp_size
);
4780 err
= drbd_recv_header(tconn
, &pi
);
4786 if (pi
.cmd
!= P_AUTH_RESPONSE
) {
4787 conn_err(tconn
, "expected AuthResponse packet, received: %s (0x%04x)\n",
4788 cmdname(pi
.cmd
), pi
.cmd
);
4793 if (pi
.size
!= resp_size
) {
4794 conn_err(tconn
, "expected AuthResponse payload of wrong size\n");
4799 err
= drbd_recv_all_warn(tconn
, response
, resp_size
);
4805 right_response
= kmalloc(resp_size
, GFP_NOIO
);
4806 if (right_response
== NULL
) {
4807 conn_err(tconn
, "kmalloc of right_response failed\n");
4812 sg_set_buf(&sg
, my_challenge
, CHALLENGE_LEN
);
4814 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, right_response
);
4816 conn_err(tconn
, "crypto_hash_digest() failed with %d\n", rv
);
4821 rv
= !memcmp(response
, right_response
, resp_size
);
4824 conn_info(tconn
, "Peer authenticated using %d bytes HMAC\n",
4832 kfree(right_response
);
4838 int drbdd_init(struct drbd_thread
*thi
)
4840 struct drbd_tconn
*tconn
= thi
->tconn
;
4843 conn_info(tconn
, "receiver (re)started\n");
4846 h
= conn_connect(tconn
);
4848 conn_disconnect(tconn
);
4849 schedule_timeout_interruptible(HZ
);
4852 conn_warn(tconn
, "Discarding network configuration.\n");
4853 conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
4860 conn_disconnect(tconn
);
4862 conn_info(tconn
, "receiver terminated\n");
4866 /* ********* acknowledge sender ******** */
4868 static int got_conn_RqSReply(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
4870 struct p_req_state_reply
*p
= pi
->data
;
4871 int retcode
= be32_to_cpu(p
->retcode
);
4873 if (retcode
>= SS_SUCCESS
) {
4874 set_bit(CONN_WD_ST_CHG_OKAY
, &tconn
->flags
);
4876 set_bit(CONN_WD_ST_CHG_FAIL
, &tconn
->flags
);
4877 conn_err(tconn
, "Requested state change failed by peer: %s (%d)\n",
4878 drbd_set_st_err_str(retcode
), retcode
);
4880 wake_up(&tconn
->ping_wait
);
4885 static int got_RqSReply(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
4887 struct drbd_conf
*mdev
;
4888 struct p_req_state_reply
*p
= pi
->data
;
4889 int retcode
= be32_to_cpu(p
->retcode
);
4891 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
4895 if (test_bit(CONN_WD_ST_CHG_REQ
, &tconn
->flags
)) {
4896 D_ASSERT(tconn
->agreed_pro_version
< 100);
4897 return got_conn_RqSReply(tconn
, pi
);
4900 if (retcode
>= SS_SUCCESS
) {
4901 set_bit(CL_ST_CHG_SUCCESS
, &mdev
->flags
);
4903 set_bit(CL_ST_CHG_FAIL
, &mdev
->flags
);
4904 dev_err(DEV
, "Requested state change failed by peer: %s (%d)\n",
4905 drbd_set_st_err_str(retcode
), retcode
);
4907 wake_up(&mdev
->state_wait
);
4912 static int got_Ping(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
4914 return drbd_send_ping_ack(tconn
);
4918 static int got_PingAck(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
4920 /* restore idle timeout */
4921 tconn
->meta
.socket
->sk
->sk_rcvtimeo
= tconn
->net_conf
->ping_int
*HZ
;
4922 if (!test_and_set_bit(GOT_PING_ACK
, &tconn
->flags
))
4923 wake_up(&tconn
->ping_wait
);
4928 static int got_IsInSync(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
4930 struct drbd_conf
*mdev
;
4931 struct p_block_ack
*p
= pi
->data
;
4932 sector_t sector
= be64_to_cpu(p
->sector
);
4933 int blksize
= be32_to_cpu(p
->blksize
);
4935 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
4939 D_ASSERT(mdev
->tconn
->agreed_pro_version
>= 89);
4941 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4943 if (get_ldev(mdev
)) {
4944 drbd_rs_complete_io(mdev
, sector
);
4945 drbd_set_in_sync(mdev
, sector
, blksize
);
4946 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4947 mdev
->rs_same_csum
+= (blksize
>> BM_BLOCK_SHIFT
);
4950 dec_rs_pending(mdev
);
4951 atomic_add(blksize
>> 9, &mdev
->rs_sect_in
);
4957 validate_req_change_req_state(struct drbd_conf
*mdev
, u64 id
, sector_t sector
,
4958 struct rb_root
*root
, const char *func
,
4959 enum drbd_req_event what
, bool missing_ok
)
4961 struct drbd_request
*req
;
4962 struct bio_and_error m
;
4964 spin_lock_irq(&mdev
->tconn
->req_lock
);
4965 req
= find_request(mdev
, root
, id
, sector
, missing_ok
, func
);
4966 if (unlikely(!req
)) {
4967 spin_unlock_irq(&mdev
->tconn
->req_lock
);
4970 __req_mod(req
, what
, &m
);
4971 spin_unlock_irq(&mdev
->tconn
->req_lock
);
4974 complete_master_bio(mdev
, &m
);
4978 static int got_BlockAck(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
4980 struct drbd_conf
*mdev
;
4981 struct p_block_ack
*p
= pi
->data
;
4982 sector_t sector
= be64_to_cpu(p
->sector
);
4983 int blksize
= be32_to_cpu(p
->blksize
);
4984 enum drbd_req_event what
;
4986 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
4990 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4992 if (p
->block_id
== ID_SYNCER
) {
4993 drbd_set_in_sync(mdev
, sector
, blksize
);
4994 dec_rs_pending(mdev
);
4998 case P_RS_WRITE_ACK
:
4999 what
= WRITE_ACKED_BY_PEER_AND_SIS
;
5002 what
= WRITE_ACKED_BY_PEER
;
5005 what
= RECV_ACKED_BY_PEER
;
5008 what
= CONFLICT_RESOLVED
;
5011 what
= POSTPONE_WRITE
;
5017 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
5018 &mdev
->write_requests
, __func__
,
5022 static int got_NegAck(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
5024 struct drbd_conf
*mdev
;
5025 struct p_block_ack
*p
= pi
->data
;
5026 sector_t sector
= be64_to_cpu(p
->sector
);
5027 int size
= be32_to_cpu(p
->blksize
);
5030 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
5034 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
5036 if (p
->block_id
== ID_SYNCER
) {
5037 dec_rs_pending(mdev
);
5038 drbd_rs_failed_io(mdev
, sector
, size
);
5042 err
= validate_req_change_req_state(mdev
, p
->block_id
, sector
,
5043 &mdev
->write_requests
, __func__
,
5046 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5047 The master bio might already be completed, therefore the
5048 request is no longer in the collision hash. */
5049 /* In Protocol B we might already have got a P_RECV_ACK
5050 but then get a P_NEG_ACK afterwards. */
5051 drbd_set_out_of_sync(mdev
, sector
, size
);
5056 static int got_NegDReply(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
5058 struct drbd_conf
*mdev
;
5059 struct p_block_ack
*p
= pi
->data
;
5060 sector_t sector
= be64_to_cpu(p
->sector
);
5062 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
5066 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
5068 dev_err(DEV
, "Got NegDReply; Sector %llus, len %u.\n",
5069 (unsigned long long)sector
, be32_to_cpu(p
->blksize
));
5071 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
5072 &mdev
->read_requests
, __func__
,
5076 static int got_NegRSDReply(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
5078 struct drbd_conf
*mdev
;
5081 struct p_block_ack
*p
= pi
->data
;
5083 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
5087 sector
= be64_to_cpu(p
->sector
);
5088 size
= be32_to_cpu(p
->blksize
);
5090 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
5092 dec_rs_pending(mdev
);
5094 if (get_ldev_if_state(mdev
, D_FAILED
)) {
5095 drbd_rs_complete_io(mdev
, sector
);
5097 case P_NEG_RS_DREPLY
:
5098 drbd_rs_failed_io(mdev
, sector
, size
);
5110 static int got_BarrierAck(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
5112 struct p_barrier_ack
*p
= pi
->data
;
5113 struct drbd_conf
*mdev
;
5116 tl_release(tconn
, p
->barrier
, be32_to_cpu(p
->set_size
));
5119 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
5120 if (mdev
->state
.conn
== C_AHEAD
&&
5121 atomic_read(&mdev
->ap_in_flight
) == 0 &&
5122 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE
, &mdev
->flags
)) {
5123 mdev
->start_resync_timer
.expires
= jiffies
+ HZ
;
5124 add_timer(&mdev
->start_resync_timer
);
5132 static int got_OVResult(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
5134 struct drbd_conf
*mdev
;
5135 struct p_block_ack
*p
= pi
->data
;
5136 struct drbd_work
*w
;
5140 mdev
= vnr_to_mdev(tconn
, pi
->vnr
);
5144 sector
= be64_to_cpu(p
->sector
);
5145 size
= be32_to_cpu(p
->blksize
);
5147 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
5149 if (be64_to_cpu(p
->block_id
) == ID_OUT_OF_SYNC
)
5150 drbd_ov_out_of_sync_found(mdev
, sector
, size
);
5152 ov_out_of_sync_print(mdev
);
5154 if (!get_ldev(mdev
))
5157 drbd_rs_complete_io(mdev
, sector
);
5158 dec_rs_pending(mdev
);
5162 /* let's advance progress step marks only for every other megabyte */
5163 if ((mdev
->ov_left
& 0x200) == 0x200)
5164 drbd_advance_rs_marks(mdev
, mdev
->ov_left
);
5166 if (mdev
->ov_left
== 0) {
5167 w
= kmalloc(sizeof(*w
), GFP_NOIO
);
5169 w
->cb
= w_ov_finished
;
5171 drbd_queue_work(&mdev
->tconn
->sender_work
, w
);
5173 dev_err(DEV
, "kmalloc(w) failed.");
5174 ov_out_of_sync_print(mdev
);
5175 drbd_resync_finished(mdev
);
5182 static int got_skip(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
5187 static int tconn_finish_peer_reqs(struct drbd_tconn
*tconn
)
5189 struct drbd_conf
*mdev
;
5190 int vnr
, not_empty
= 0;
5193 clear_bit(SIGNAL_ASENDER
, &tconn
->flags
);
5194 flush_signals(current
);
5197 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
5198 kref_get(&mdev
->kref
);
5200 if (drbd_finish_peer_reqs(mdev
)) {
5201 kref_put(&mdev
->kref
, &drbd_minor_destroy
);
5204 kref_put(&mdev
->kref
, &drbd_minor_destroy
);
5207 set_bit(SIGNAL_ASENDER
, &tconn
->flags
);
5209 spin_lock_irq(&tconn
->req_lock
);
5210 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
5211 not_empty
= !list_empty(&mdev
->done_ee
);
5215 spin_unlock_irq(&tconn
->req_lock
);
5217 } while (not_empty
);
5222 struct asender_cmd
{
5224 int (*fn
)(struct drbd_tconn
*tconn
, struct packet_info
*);
5227 static struct asender_cmd asender_tbl
[] = {
5228 [P_PING
] = { 0, got_Ping
},
5229 [P_PING_ACK
] = { 0, got_PingAck
},
5230 [P_RECV_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
5231 [P_WRITE_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
5232 [P_RS_WRITE_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
5233 [P_SUPERSEDED
] = { sizeof(struct p_block_ack
), got_BlockAck
},
5234 [P_NEG_ACK
] = { sizeof(struct p_block_ack
), got_NegAck
},
5235 [P_NEG_DREPLY
] = { sizeof(struct p_block_ack
), got_NegDReply
},
5236 [P_NEG_RS_DREPLY
] = { sizeof(struct p_block_ack
), got_NegRSDReply
},
5237 [P_OV_RESULT
] = { sizeof(struct p_block_ack
), got_OVResult
},
5238 [P_BARRIER_ACK
] = { sizeof(struct p_barrier_ack
), got_BarrierAck
},
5239 [P_STATE_CHG_REPLY
] = { sizeof(struct p_req_state_reply
), got_RqSReply
},
5240 [P_RS_IS_IN_SYNC
] = { sizeof(struct p_block_ack
), got_IsInSync
},
5241 [P_DELAY_PROBE
] = { sizeof(struct p_delay_probe93
), got_skip
},
5242 [P_RS_CANCEL
] = { sizeof(struct p_block_ack
), got_NegRSDReply
},
5243 [P_CONN_ST_CHG_REPLY
]={ sizeof(struct p_req_state_reply
), got_conn_RqSReply
},
5244 [P_RETRY_WRITE
] = { sizeof(struct p_block_ack
), got_BlockAck
},
5247 int drbd_asender(struct drbd_thread
*thi
)
5249 struct drbd_tconn
*tconn
= thi
->tconn
;
5250 struct asender_cmd
*cmd
= NULL
;
5251 struct packet_info pi
;
5253 void *buf
= tconn
->meta
.rbuf
;
5255 unsigned int header_size
= drbd_header_size(tconn
);
5256 int expect
= header_size
;
5257 bool ping_timeout_active
= false;
5258 struct net_conf
*nc
;
5259 int ping_timeo
, tcp_cork
, ping_int
;
5261 current
->policy
= SCHED_RR
; /* Make this a realtime task! */
5262 current
->rt_priority
= 2; /* more important than all other tasks */
5264 while (get_t_state(thi
) == RUNNING
) {
5265 drbd_thread_current_set_cpu(thi
);
5268 nc
= rcu_dereference(tconn
->net_conf
);
5269 ping_timeo
= nc
->ping_timeo
;
5270 tcp_cork
= nc
->tcp_cork
;
5271 ping_int
= nc
->ping_int
;
5274 if (test_and_clear_bit(SEND_PING
, &tconn
->flags
)) {
5275 if (drbd_send_ping(tconn
)) {
5276 conn_err(tconn
, "drbd_send_ping has failed\n");
5279 tconn
->meta
.socket
->sk
->sk_rcvtimeo
= ping_timeo
* HZ
/ 10;
5280 ping_timeout_active
= true;
5283 /* TODO: conditionally cork; it may hurt latency if we cork without
5286 drbd_tcp_cork(tconn
->meta
.socket
);
5287 if (tconn_finish_peer_reqs(tconn
)) {
5288 conn_err(tconn
, "tconn_finish_peer_reqs() failed\n");
5291 /* but unconditionally uncork unless disabled */
5293 drbd_tcp_uncork(tconn
->meta
.socket
);
5295 /* short circuit, recv_msg would return EINTR anyways. */
5296 if (signal_pending(current
))
5299 rv
= drbd_recv_short(tconn
->meta
.socket
, buf
, expect
-received
, 0);
5300 clear_bit(SIGNAL_ASENDER
, &tconn
->flags
);
5302 flush_signals(current
);
5305 * -EINTR (on meta) we got a signal
5306 * -EAGAIN (on meta) rcvtimeo expired
5307 * -ECONNRESET other side closed the connection
5308 * -ERESTARTSYS (on data) we got a signal
5309 * rv < 0 other than above: unexpected error!
5310 * rv == expected: full header or command
5311 * rv < expected: "woken" by signal during receive
5312 * rv == 0 : "connection shut down by peer"
5314 if (likely(rv
> 0)) {
5317 } else if (rv
== 0) {
5318 if (test_bit(DISCONNECT_SENT
, &tconn
->flags
)) {
5321 t
= rcu_dereference(tconn
->net_conf
)->ping_timeo
* HZ
/10;
5324 t
= wait_event_timeout(tconn
->ping_wait
,
5325 tconn
->cstate
< C_WF_REPORT_PARAMS
,
5330 conn_err(tconn
, "meta connection shut down by peer.\n");
5332 } else if (rv
== -EAGAIN
) {
5333 /* If the data socket received something meanwhile,
5334 * that is good enough: peer is still alive. */
5335 if (time_after(tconn
->last_received
,
5336 jiffies
- tconn
->meta
.socket
->sk
->sk_rcvtimeo
))
5338 if (ping_timeout_active
) {
5339 conn_err(tconn
, "PingAck did not arrive in time.\n");
5342 set_bit(SEND_PING
, &tconn
->flags
);
5344 } else if (rv
== -EINTR
) {
5347 conn_err(tconn
, "sock_recvmsg returned %d\n", rv
);
5351 if (received
== expect
&& cmd
== NULL
) {
5352 if (decode_header(tconn
, tconn
->meta
.rbuf
, &pi
))
5354 cmd
= &asender_tbl
[pi
.cmd
];
5355 if (pi
.cmd
>= ARRAY_SIZE(asender_tbl
) || !cmd
->fn
) {
5356 conn_err(tconn
, "Unexpected meta packet %s (0x%04x)\n",
5357 cmdname(pi
.cmd
), pi
.cmd
);
5360 expect
= header_size
+ cmd
->pkt_size
;
5361 if (pi
.size
!= expect
- header_size
) {
5362 conn_err(tconn
, "Wrong packet size on meta (c: %d, l: %d)\n",
5367 if (received
== expect
) {
5370 err
= cmd
->fn(tconn
, &pi
);
5372 conn_err(tconn
, "%pf failed\n", cmd
->fn
);
5376 tconn
->last_received
= jiffies
;
5378 if (cmd
== &asender_tbl
[P_PING_ACK
]) {
5379 /* restore idle timeout */
5380 tconn
->meta
.socket
->sk
->sk_rcvtimeo
= ping_int
* HZ
;
5381 ping_timeout_active
= false;
5384 buf
= tconn
->meta
.rbuf
;
5386 expect
= header_size
;
5393 conn_request_state(tconn
, NS(conn
, C_NETWORK_FAILURE
), CS_HARD
);
5394 conn_md_sync(tconn
);
5398 conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
5400 clear_bit(SIGNAL_ASENDER
, &tconn
->flags
);
5402 conn_info(tconn
, "asender terminated\n");