2 * Copyright (c) 2012-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS - Please see sys/dmsg.h for an
36 * involved explanation of the protocol.
39 #include "dmsg_local.h"
42 * Maximum spanning tree distance. This has the practical effect of
43 * stopping tail-chasing closed loops when a feeder span is lost.
45 #define DMSG_SPAN_MAXDIST 16
48 * RED-BLACK TREE DEFINITIONS
52 * (1) shared fsid's (a cluster).
53 * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
55 * We need to aggegate all active LNK_SPANs, aggregate, and create our own
56 * outgoing LNK_SPAN transactions on each of our connections representing
57 * the aggregated state.
59 * h2span_conn - list of iocom connections who wish to receive SPAN
60 * propagation from other connections. Might contain
61 * a filter string. Only iocom's with an open
62 * LNK_CONN transactions are applicable for SPAN
65 * h2span_relay - List of links relayed (via SPAN). Essentially
66 * each relay structure represents a LNK_SPAN
67 * transaction that we initiated, verses h2span_link
68 * which is a LNK_SPAN transaction that we received.
72 * h2span_cluster - Organizes the shared fsid's. One structure for
75 * h2span_node - Organizes the nodes in a cluster. One structure
76 * for each unique {cluster,node}, aka {peer_id, pfs_id}.
78 * h2span_link - Organizes all incoming and outgoing LNK_SPAN message
79 * transactions related to a node.
81 * One h2span_link structure for each incoming LNK_SPAN
82 * transaction. Links selected for propagation back
83 * out are also where the outgoing LNK_SPAN messages
84 * are indexed into (so we can propagate changes).
86 * The h2span_link's use a red-black tree to sort the
87 * distance hop metric for the incoming LNK_SPAN. We
88 * then select the top N for outgoing. When the
89 * topology changes the top N may also change and cause
90 * new outgoing LNK_SPAN transactions to be opened
91 * and less desireable ones to be closed, causing
92 * transactional aborts within the message flow in
95 * Also note - All outgoing LNK_SPAN message transactions are also
96 * entered into a red-black tree for use by the routing
97 * function. This is handled by msg.c in the state
103 TAILQ_HEAD(h2span_conn_queue
, h2span_conn
);
104 TAILQ_HEAD(h2span_relay_queue
, h2span_relay
);
106 RB_HEAD(h2span_cluster_tree
, h2span_cluster
);
107 RB_HEAD(h2span_node_tree
, h2span_node
);
108 RB_HEAD(h2span_link_tree
, h2span_link
);
109 RB_HEAD(h2span_relay_tree
, h2span_relay
);
113 * Received LNK_CONN transaction enables SPAN protocol over connection.
114 * (may contain filter). Typically one for each mount and several may
115 * share the same media.
118 TAILQ_ENTRY(h2span_conn
) entry
;
119 struct h2span_relay_tree tree
;
121 dmsg_lnk_conn_t lnk_conn
;
125 * All received LNK_SPANs are organized by peer id (peer_id),
126 * node (pfs_id), and link (received LNK_SPAN transaction).
128 struct h2span_cluster
{
129 RB_ENTRY(h2span_cluster
) rbnode
;
130 struct h2span_node_tree tree
;
131 uuid_t peer_id
; /* shared fsid */
133 uint8_t reserved01
[7];
134 char peer_label
[128]; /* string identification */
135 int refs
; /* prevents destruction */
139 RB_ENTRY(h2span_node
) rbnode
;
140 struct h2span_link_tree tree
;
141 struct h2span_cluster
*cls
;
143 uint8_t reserved01
[7];
144 uuid_t pfs_id
; /* unique pfs id */
145 char pfs_label
[128]; /* string identification */
150 RB_ENTRY(h2span_link
) rbnode
;
151 dmsg_state_t
*state
; /* state<->link */
152 struct h2span_node
*node
; /* related node */
153 struct h2span_relay_queue relayq
; /* relay out */
154 dmsg_lnk_span_t lnk_span
;
158 * Any LNK_SPAN transactions we receive which are relayed out other
159 * connections utilize this structure to track the LNK_SPAN transactions
160 * we initiate (relay out) on other connections. We only relay out
161 * LNK_SPANs on connections we have an open CONN transaction for.
163 * The relay structure points to the outgoing LNK_SPAN trans (out_state)
164 * and to the incoming LNK_SPAN transaction (in_state). The relay
165 * structure holds refs on the related states.
167 * In many respects this is the core of the protocol... actually figuring
168 * out what LNK_SPANs to relay. The spanid used for relaying is the
169 * address of the 'state' structure, which is why h2span_relay has to
170 * be entered into a RB-TREE based at h2span_conn (so we can look
171 * up the spanid to validate it).
173 struct h2span_relay
{
174 TAILQ_ENTRY(h2span_relay
) entry
; /* from link */
175 RB_ENTRY(h2span_relay
) rbnode
; /* from h2span_conn */
176 struct h2span_conn
*conn
; /* related CONN transaction */
177 dmsg_state_t
*source_rt
; /* h2span_link state */
178 dmsg_state_t
*target_rt
; /* h2span_relay state */
181 typedef struct h2span_conn h2span_conn_t
;
182 typedef struct h2span_cluster h2span_cluster_t
;
183 typedef struct h2span_node h2span_node_t
;
184 typedef struct h2span_link h2span_link_t
;
185 typedef struct h2span_relay h2span_relay_t
;
187 #define dmsg_termstr(array) _dmsg_termstr((array), sizeof(array))
189 static h2span_relay_t
*dmsg_generate_relay(h2span_conn_t
*conn
,
190 h2span_link_t
*slink
);
191 static uint32_t dmsg_rnss(void);
195 _dmsg_termstr(char *base
, size_t size
)
201 * Cluster peer_type, uuid, AND label must match for a match
205 h2span_cluster_cmp(h2span_cluster_t
*cls1
, h2span_cluster_t
*cls2
)
209 if (cls1
->peer_type
< cls2
->peer_type
)
211 if (cls1
->peer_type
> cls2
->peer_type
)
213 r
= uuid_compare(&cls1
->peer_id
, &cls2
->peer_id
, NULL
);
215 r
= strcmp(cls1
->peer_label
, cls2
->peer_label
);
221 * Match against pfs_label/pfs_id. Together these two items represent a
222 * unique node. In most cases the primary differentiator is pfs_id but
223 * we also string-match fs_label.
227 h2span_node_cmp(h2span_node_t
*node1
, h2span_node_t
*node2
)
231 r
= strcmp(node1
->pfs_label
, node2
->pfs_label
);
233 r
= uuid_compare(&node1
->pfs_id
, &node2
->pfs_id
, NULL
);
238 * Sort/subsort must match h2span_relay_cmp() under any given node
239 * to make the aggregation algorithm easier, so the best links are
240 * in the same sorted order as the best relays.
242 * NOTE: We cannot use link*->state->msgid because this msgid is created
243 * by each remote host and thus might wind up being the same.
247 h2span_link_cmp(h2span_link_t
*link1
, h2span_link_t
*link2
)
249 if (link1
->lnk_span
.dist
< link2
->lnk_span
.dist
)
251 if (link1
->lnk_span
.dist
> link2
->lnk_span
.dist
)
253 if (link1
->lnk_span
.rnss
< link2
->lnk_span
.rnss
)
255 if (link1
->lnk_span
.rnss
> link2
->lnk_span
.rnss
)
258 if ((uintptr_t)link1
->state
< (uintptr_t)link2
->state
)
260 if ((uintptr_t)link1
->state
> (uintptr_t)link2
->state
)
263 if (link1
->state
->msgid
< link2
->state
->msgid
)
265 if (link1
->state
->msgid
> link2
->state
->msgid
)
272 * Relay entries are sorted by node, subsorted by distance and link
273 * address (so we can match up the conn->tree relay topology with
274 * a node's link topology).
278 h2span_relay_cmp(h2span_relay_t
*relay1
, h2span_relay_t
*relay2
)
280 h2span_link_t
*link1
= relay1
->source_rt
->any
.link
;
281 h2span_link_t
*link2
= relay2
->source_rt
->any
.link
;
283 if ((intptr_t)link1
->node
< (intptr_t)link2
->node
)
285 if ((intptr_t)link1
->node
> (intptr_t)link2
->node
)
287 if (link1
->lnk_span
.dist
< link2
->lnk_span
.dist
)
289 if (link1
->lnk_span
.dist
> link2
->lnk_span
.dist
)
291 if (link1
->lnk_span
.rnss
< link2
->lnk_span
.rnss
)
293 if (link1
->lnk_span
.rnss
> link2
->lnk_span
.rnss
)
296 if ((uintptr_t)link1
->state
< (uintptr_t)link2
->state
)
298 if ((uintptr_t)link1
->state
> (uintptr_t)link2
->state
)
301 if (link1
->state
->msgid
< link2
->state
->msgid
)
303 if (link1
->state
->msgid
> link2
->state
->msgid
)
309 RB_PROTOTYPE_STATIC(h2span_cluster_tree
, h2span_cluster
,
310 rbnode
, h2span_cluster_cmp
);
311 RB_PROTOTYPE_STATIC(h2span_node_tree
, h2span_node
,
312 rbnode
, h2span_node_cmp
);
313 RB_PROTOTYPE_STATIC(h2span_link_tree
, h2span_link
,
314 rbnode
, h2span_link_cmp
);
315 RB_PROTOTYPE_STATIC(h2span_relay_tree
, h2span_relay
,
316 rbnode
, h2span_relay_cmp
);
318 RB_GENERATE_STATIC(h2span_cluster_tree
, h2span_cluster
,
319 rbnode
, h2span_cluster_cmp
);
320 RB_GENERATE_STATIC(h2span_node_tree
, h2span_node
,
321 rbnode
, h2span_node_cmp
);
322 RB_GENERATE_STATIC(h2span_link_tree
, h2span_link
,
323 rbnode
, h2span_link_cmp
);
324 RB_GENERATE_STATIC(h2span_relay_tree
, h2span_relay
,
325 rbnode
, h2span_relay_cmp
);
328 * Global mutex protects cluster_tree lookups, connq, mediaq.
330 static pthread_mutex_t cluster_mtx
;
331 static struct h2span_cluster_tree cluster_tree
= RB_INITIALIZER(cluster_tree
);
332 static struct h2span_conn_queue connq
= TAILQ_HEAD_INITIALIZER(connq
);
333 static struct dmsg_media_queue mediaq
= TAILQ_HEAD_INITIALIZER(mediaq
);
335 static void dmsg_lnk_span(dmsg_msg_t
*msg
);
336 static void dmsg_lnk_conn(dmsg_msg_t
*msg
);
337 static void dmsg_lnk_ping(dmsg_msg_t
*msg
);
338 static void dmsg_lnk_relay(dmsg_msg_t
*msg
);
339 static void dmsg_relay_scan(h2span_conn_t
*conn
, h2span_node_t
*node
);
340 static void dmsg_relay_delete(h2span_relay_t
*relay
);
343 dmsg_msg_lnk_signal(dmsg_iocom_t
*iocom __unused
)
345 pthread_mutex_lock(&cluster_mtx
);
346 dmsg_relay_scan(NULL
, NULL
);
347 pthread_mutex_unlock(&cluster_mtx
);
351 * DMSG_PROTO_LNK - Generic DMSG_PROTO_LNK.
352 * (incoming iocom lock not held)
354 * This function is typically called for one-way and opening-transactions
355 * since state->func is assigned after that, but it will also be called
356 * if no state->func is assigned on transaction-open.
359 dmsg_msg_lnk(dmsg_msg_t
*msg
)
361 dmsg_iocom_t
*iocom
= msg
->state
->iocom
;
363 switch(msg
->tcmd
& DMSGF_BASECMDMASK
) {
374 iocom
->usrmsg_callback(msg
, 1);
375 /* state invalid after reply */
381 * LNK_CONN - iocom identify message reception.
382 * (incoming iocom lock not held)
384 * Remote node identifies itself to us, sets up a SPAN filter, and gives us
385 * the ok to start transmitting SPANs.
388 dmsg_lnk_conn(dmsg_msg_t
*msg
)
390 dmsg_state_t
*state
= msg
->state
;
391 dmsg_iocom_t
*iocom
= state
->iocom
;
394 h2span_relay_t
*relay
;
397 pthread_mutex_lock(&cluster_mtx
);
399 dmio_printf(iocom
, 3,
400 "dmsg_lnk_conn: msg %p cmd %08x state %p "
401 "txcmd %08x rxcmd %08x\n",
402 msg
, msg
->any
.head
.cmd
, state
,
403 state
->txcmd
, state
->rxcmd
);
405 switch(msg
->any
.head
.cmd
& DMSGF_TRANSMASK
) {
406 case DMSG_LNK_CONN
| DMSGF_CREATE
:
407 case DMSG_LNK_CONN
| DMSGF_CREATE
| DMSGF_DELETE
:
409 * On transaction start we allocate a new h2span_conn and
410 * acknowledge the request, leaving the transaction open.
411 * We then relay priority-selected SPANs.
413 dmio_printf(iocom
, 3, "LNK_CONN(%08x): %s/%s\n",
414 (uint32_t)msg
->any
.head
.msgid
,
415 dmsg_uuid_to_str(&msg
->any
.lnk_conn
.peer_id
, &alloc
),
416 msg
->any
.lnk_conn
.peer_label
);
419 conn
= dmsg_alloc(sizeof(*conn
));
420 assert(state
->iocom
->conn
== NULL
);
422 RB_INIT(&conn
->tree
);
423 state
->iocom
->conn
= conn
; /* XXX only one */
424 state
->iocom
->conn_msgid
= state
->msgid
;
425 dmsg_state_hold(state
);
427 state
->func
= dmsg_lnk_conn
;
428 state
->any
.conn
= conn
;
429 TAILQ_INSERT_TAIL(&connq
, conn
, entry
);
430 conn
->lnk_conn
= msg
->any
.lnk_conn
;
435 TAILQ_FOREACH(media
, &mediaq
, entry
) {
436 if (uuid_compare(&msg
->any
.lnk_conn
.media_id
,
437 &media
->media_id
, NULL
) == 0) {
442 media
= dmsg_alloc(sizeof(*media
));
443 media
->media_id
= msg
->any
.lnk_conn
.media_id
;
444 TAILQ_INSERT_TAIL(&mediaq
, media
, entry
);
446 state
->media
= media
;
449 if ((msg
->any
.head
.cmd
& DMSGF_DELETE
) == 0) {
450 iocom
->usrmsg_callback(msg
, 0);
451 dmsg_msg_result(msg
, 0);
452 dmsg_iocom_signal(iocom
);
456 case DMSG_LNK_CONN
| DMSGF_DELETE
:
457 case DMSG_LNK_ERROR
| DMSGF_DELETE
:
459 * On transaction terminate we clean out our h2span_conn
460 * and acknowledge the request, closing the transaction.
462 dmio_printf(iocom
, 3, "%s\n", "LNK_CONN: Terminated");
463 conn
= state
->any
.conn
;
469 * Callback will clean out media config / user-opaque state
471 media
= state
->media
;
473 if (media
->refs
== 0) {
474 dmio_printf(iocom
, 3, "%s\n", "Media shutdown");
475 TAILQ_REMOVE(&mediaq
, media
, entry
);
476 pthread_mutex_unlock(&cluster_mtx
);
477 iocom
->usrmsg_callback(msg
, 0);
478 pthread_mutex_lock(&cluster_mtx
);
484 * Clean out all relays. This requires terminating each
487 while ((relay
= RB_ROOT(&conn
->tree
)) != NULL
) {
488 dmsg_relay_delete(relay
);
495 msg
->state
->any
.conn
= NULL
;
496 msg
->state
->iocom
->conn
= NULL
;
497 TAILQ_REMOVE(&connq
, conn
, entry
);
500 dmsg_msg_reply(msg
, 0);
501 dmsg_state_drop(state
);
502 /* state invalid after reply */
505 iocom
->usrmsg_callback(msg
, 1);
507 if (msg
->any
.head
.cmd
& DMSGF_DELETE
)
509 dmsg_msg_reply(msg
, DMSG_ERR_NOSUPP
);
513 pthread_mutex_unlock(&cluster_mtx
);
517 * LNK_SPAN - Spanning tree protocol message reception
518 * (incoming iocom lock not held)
520 * Receive a spanning tree transactional message, creating or destroying
521 * a SPAN and propagating it to other iocoms.
524 dmsg_lnk_span(dmsg_msg_t
*msg
)
526 dmsg_state_t
*state
= msg
->state
;
527 dmsg_iocom_t
*iocom
= state
->iocom
;
528 h2span_cluster_t dummy_cls
;
529 h2span_node_t dummy_node
;
530 h2span_cluster_t
*cls
;
532 h2span_link_t
*slink
;
533 h2span_relay_t
*relay
;
539 * Ignore reply to LNK_SPAN. The reply is expected and will commands
540 * to flow in both directions on the open transaction. This will also
541 * ignore DMSGF_REPLY|DMSGF_DELETE messages. Since we take no action
542 * if the other end unexpectedly closes their side of the transaction,
543 * we can ignore that too.
545 if (msg
->any
.head
.cmd
& DMSGF_REPLY
) {
546 dmio_printf(iocom
, 2, "%s\n",
547 "Ignore reply to LNK_SPAN");
551 pthread_mutex_lock(&cluster_mtx
);
554 * On transaction start we initialize the tracking infrastructure
556 if (msg
->any
.head
.cmd
& DMSGF_CREATE
) {
557 assert(state
->func
== NULL
);
558 state
->func
= dmsg_lnk_span
;
560 dmsg_termstr(msg
->any
.lnk_span
.peer_label
);
561 dmsg_termstr(msg
->any
.lnk_span
.pfs_label
);
566 dummy_cls
.peer_id
= msg
->any
.lnk_span
.peer_id
;
567 dummy_cls
.peer_type
= msg
->any
.lnk_span
.peer_type
;
568 bcopy(msg
->any
.lnk_span
.peer_label
, dummy_cls
.peer_label
,
569 sizeof(dummy_cls
.peer_label
));
570 cls
= RB_FIND(h2span_cluster_tree
, &cluster_tree
, &dummy_cls
);
572 cls
= dmsg_alloc(sizeof(*cls
));
573 cls
->peer_id
= msg
->any
.lnk_span
.peer_id
;
574 cls
->peer_type
= msg
->any
.lnk_span
.peer_type
;
575 bcopy(msg
->any
.lnk_span
.peer_label
,
576 cls
->peer_label
, sizeof(cls
->peer_label
));
578 RB_INSERT(h2span_cluster_tree
, &cluster_tree
, cls
);
584 dummy_node
.pfs_id
= msg
->any
.lnk_span
.pfs_id
;
585 bcopy(msg
->any
.lnk_span
.pfs_label
, dummy_node
.pfs_label
,
586 sizeof(dummy_node
.pfs_label
));
587 node
= RB_FIND(h2span_node_tree
, &cls
->tree
, &dummy_node
);
589 node
= dmsg_alloc(sizeof(*node
));
590 node
->pfs_id
= msg
->any
.lnk_span
.pfs_id
;
591 node
->pfs_type
= msg
->any
.lnk_span
.pfs_type
;
592 bcopy(msg
->any
.lnk_span
.pfs_label
, node
->pfs_label
,
593 sizeof(node
->pfs_label
));
595 RB_INIT(&node
->tree
);
596 RB_INSERT(h2span_node_tree
, &cls
->tree
, node
);
602 * NOTE: Sub-transactions on the incoming SPAN can be used
603 * to talk to the originator. We should not set-up
604 * state->relay for incoming SPANs since our sub-trans
605 * is running on the same interface (i.e. no actual
606 * relaying need be done).
608 * NOTE: Later on when we relay the SPAN out the outgoing
609 * SPAN state will be set up to relay back to this
612 * NOTE: It is possible for SPAN targets to send one-way
613 * messages to the originator but it is not possible
614 * for the originator to (currently) broadcast one-way
615 * messages to all of its SPAN targets. The protocol
616 * allows such a feature to be added in the future.
618 assert(state
->any
.link
== NULL
);
619 dmsg_state_hold(state
);
620 slink
= dmsg_alloc(sizeof(*slink
));
621 TAILQ_INIT(&slink
->relayq
);
623 slink
->state
= state
;
624 state
->any
.link
= slink
;
625 slink
->lnk_span
= msg
->any
.lnk_span
;
627 RB_INSERT(h2span_link_tree
, &node
->tree
, slink
);
629 dmio_printf(iocom
, 3,
630 "LNK_SPAN(thr %p): %p %s cl=%s fs=%s dist=%d\n",
632 dmsg_uuid_to_str(&msg
->any
.lnk_span
.peer_id
,
634 msg
->any
.lnk_span
.peer_label
,
635 msg
->any
.lnk_span
.pfs_label
,
636 msg
->any
.lnk_span
.dist
);
639 dmsg_relay_scan(NULL
, node
);
642 * Ack the open, which will issue a CREATE on our side, and
643 * leave the transaction open. Necessary to allow the
644 * transaction to be used as a virtual circuit.
646 dmsg_state_result(state
, 0);
647 dmsg_iocom_signal(iocom
);
651 * On transaction terminate we remove the tracking infrastructure.
653 if (msg
->any
.head
.cmd
& DMSGF_DELETE
) {
654 slink
= state
->any
.link
;
655 assert(slink
->state
== state
);
656 assert(slink
!= NULL
);
660 dmio_printf(iocom
, 3,
661 "LNK_DELE(thr %p): %p %s cl=%s fs=%s\n",
663 dmsg_uuid_to_str(&cls
->peer_id
, &alloc
),
669 * Clean out all relays. This requires terminating each
672 while ((relay
= TAILQ_FIRST(&slink
->relayq
)) != NULL
) {
673 dmsg_relay_delete(relay
);
677 * Clean out the topology
679 RB_REMOVE(h2span_link_tree
, &node
->tree
, slink
);
680 if (RB_EMPTY(&node
->tree
)) {
681 RB_REMOVE(h2span_node_tree
, &cls
->tree
, node
);
682 if (RB_EMPTY(&cls
->tree
) && cls
->refs
== 0) {
683 RB_REMOVE(h2span_cluster_tree
,
691 state
->any
.link
= NULL
;
694 dmsg_state_drop(state
);
698 * We have to terminate the transaction
700 dmsg_state_reply(state
, 0);
701 /* state invalid after reply */
704 * If the node still exists issue any required updates. If
705 * it doesn't then all related relays have already been
706 * removed and there's nothing left to do.
710 dmsg_relay_scan(NULL
, node
);
713 dmsg_iocom_signal(iocom
);
716 pthread_mutex_unlock(&cluster_mtx
);
720 * Respond to a PING with a PING|REPLY, forward replies to the usermsg
725 dmsg_lnk_ping(dmsg_msg_t
*msg
)
729 if (msg
->any
.head
.cmd
& DMSGF_REPLY
) {
730 msg
->state
->iocom
->usrmsg_callback(msg
, 1);
732 rep
= dmsg_msg_alloc(msg
->state
, 0,
733 DMSG_LNK_PING
| DMSGF_REPLY
,
740 * Update relay transactions for SPANs.
742 * Called with cluster_mtx held.
744 static void dmsg_relay_scan_specific(h2span_node_t
*node
,
745 h2span_conn_t
*conn
);
748 dmsg_relay_scan(h2span_conn_t
*conn
, h2span_node_t
*node
)
750 h2span_cluster_t
*cls
;
754 * Iterate specific node
756 TAILQ_FOREACH(conn
, &connq
, entry
)
757 dmsg_relay_scan_specific(node
, conn
);
762 * Iterate cluster ids, nodes, and either a specific connection
763 * or all connections.
765 RB_FOREACH(cls
, h2span_cluster_tree
, &cluster_tree
) {
769 RB_FOREACH(node
, h2span_node_tree
, &cls
->tree
) {
771 * Synchronize the node's link (received SPANs)
772 * with each connection's relays.
775 dmsg_relay_scan_specific(node
, conn
);
777 TAILQ_FOREACH(conn
, &connq
, entry
) {
778 dmsg_relay_scan_specific(node
,
781 assert(conn
== NULL
);
789 * Update the relay'd SPANs for this (node, conn).
791 * Iterate links and adjust relays to match. We only propagate the top link
792 * for now (XXX we want to propagate the top two).
794 * The dmsg_relay_scan_cmp() function locates the first relay element
795 * for any given node. The relay elements will be sub-sorted by dist.
797 struct relay_scan_info
{
799 h2span_relay_t
*relay
;
803 dmsg_relay_scan_cmp(h2span_relay_t
*relay
, void *arg
)
805 struct relay_scan_info
*info
= arg
;
807 if ((intptr_t)relay
->source_rt
->any
.link
->node
< (intptr_t)info
->node
)
809 if ((intptr_t)relay
->source_rt
->any
.link
->node
> (intptr_t)info
->node
)
815 dmsg_relay_scan_callback(h2span_relay_t
*relay
, void *arg
)
817 struct relay_scan_info
*info
= arg
;
824 dmsg_relay_scan_specific(h2span_node_t
*node
, h2span_conn_t
*conn
)
826 struct relay_scan_info info
;
827 h2span_relay_t
*relay
;
828 h2span_relay_t
*next_relay
;
829 h2span_link_t
*slink
;
830 dmsg_lnk_conn_t
*lconn
;
831 dmsg_lnk_span_t
*lspan
;
834 #ifdef REQUIRE_SYMMETRICAL
835 uint32_t lastdist
= DMSG_SPAN_MAXDIST
;
836 uint32_t lastrnss
= 0;
843 * Locate the first related relay for the node on this connection.
844 * relay will be NULL if there were none.
846 RB_SCAN(h2span_relay_tree
, &conn
->tree
,
847 dmsg_relay_scan_cmp
, dmsg_relay_scan_callback
, &info
);
851 assert(relay
->source_rt
->any
.link
->node
== node
);
853 dm_printf(9, "relay scan for connection %p\n", conn
);
856 * Iterate the node's links (received SPANs) in distance order,
857 * lowest (best) dist first.
859 * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
861 * Track relays while iterating the best links and construct
862 * missing relays when necessary.
864 * (If some prior better link was removed it would have also
865 * removed the relay, so the relay can only match exactly or
869 RB_FOREACH(slink
, h2span_link_tree
, &node
->tree
) {
871 * Increment count of successful relays. This isn't
872 * quite accurate if we break out but nothing after
873 * the loop uses (count).
875 * If count exceeds the maximum number of relays we desire
876 * we normally want to break out. However, in order to
877 * guarantee a symmetric path we have to continue if both
878 * (dist) and (rnss) continue to match. Otherwise the SPAN
879 * propagation in the reverse direction may choose different
880 * routes and we will not have a symmetric path.
882 * NOTE: Spanning tree does not have to be symmetrical so
883 * this code is not currently enabled.
885 if (++count
>= maxcount
) {
886 #ifdef REQUIRE_SYMMETRICAL
887 if (lastdist
!= slink
->lnk_span
.dist
||
888 lastrnss
!= slink
->lnk_span
.rnss
) {
894 /* go beyond the nominal maximum desired relays */
898 * Match, relay already in-place, get the next
899 * relay to match against the next slink.
901 if (relay
&& relay
->source_rt
->any
.link
== slink
) {
902 relay
= RB_NEXT(h2span_relay_tree
, &conn
->tree
, relay
);
907 * We might want this SLINK, if it passes our filters.
909 * The spanning tree can cause closed loops so we have
910 * to limit slink->dist.
912 if (slink
->lnk_span
.dist
> DMSG_SPAN_MAXDIST
)
916 * Don't bother transmitting a LNK_SPAN out the same
917 * connection it came in on. Trivial optimization.
919 if (slink
->state
->iocom
== conn
->state
->iocom
)
923 * NOTE ON FILTERS: The protocol spec allows non-requested
924 * SPANs to be transmitted, the other end is expected to
925 * leave their transactions open but otherwise ignore them.
927 * Don't bother transmitting if the remote connection
928 * is not accepting this SPAN's peer_type.
930 lspan
= &slink
->lnk_span
;
931 lconn
= &conn
->lnk_conn
;
932 if (((1LLU << lspan
->peer_type
) & lconn
->peer_mask
) == 0)
936 * Do not give pure clients visibility to other pure clients
938 if (lconn
->peer_type
== DMSG_PEER_CLIENT
&&
939 lspan
->peer_type
== DMSG_PEER_CLIENT
) {
944 * Clients can set peer_id to filter the peer_id of incoming
945 * spans. Other peer types set peer_id to advertising their
948 * NOTE: peer_label is not a filter on clients, it identifies
949 * the client just as it identifies other peer types.
951 if (lconn
->peer_type
== DMSG_PEER_CLIENT
&&
952 !uuid_is_nil(&lconn
->peer_id
, NULL
) &&
953 uuid_compare(&slink
->node
->cls
->peer_id
,
954 &lconn
->peer_id
, NULL
)) {
959 * NOTE! pfs_id differentiates nodes within the same cluster
960 * so we obviously don't want to match those. Similarly
965 * Ok, we've accepted this SPAN for relaying.
967 assert(relay
== NULL
||
968 relay
->source_rt
->any
.link
->node
!= slink
->node
||
969 relay
->source_rt
->any
.link
->lnk_span
.dist
>=
970 slink
->lnk_span
.dist
);
971 relay
= dmsg_generate_relay(conn
, slink
);
972 #ifdef REQUIRE_SYMMETRICAL
973 lastdist
= slink
->lnk_span
.dist
;
974 lastrnss
= slink
->lnk_span
.rnss
;
978 * Match (created new relay), get the next relay to
979 * match against the next slink.
981 relay
= RB_NEXT(h2span_relay_tree
, &conn
->tree
, relay
);
985 * Any remaining relay's belonging to this connection which match
986 * the node are in excess of the current aggregate spanning state
987 * and should be removed.
989 while (relay
&& relay
->source_rt
->any
.link
->node
== node
) {
990 next_relay
= RB_NEXT(h2span_relay_tree
, &conn
->tree
, relay
);
991 dm_printf(9, "%s\n", "RELAY DELETE FROM EXTRAS");
992 dmsg_relay_delete(relay
);
998 * Find the slink associated with the msgid and return its state,
999 * so the caller can issue a transaction.
1002 dmsg_findspan(const char *label
)
1004 dmsg_state_t
*state
;
1005 h2span_cluster_t
*cls
;
1006 h2span_node_t
*node
;
1007 h2span_link_t
*slink
;
1008 uint64_t msgid
= strtoull(label
, NULL
, 16);
1010 pthread_mutex_lock(&cluster_mtx
);
1013 RB_FOREACH(cls
, h2span_cluster_tree
, &cluster_tree
) {
1014 RB_FOREACH(node
, h2span_node_tree
, &cls
->tree
) {
1015 RB_FOREACH(slink
, h2span_link_tree
, &node
->tree
) {
1016 if (slink
->state
->msgid
== msgid
) {
1017 state
= slink
->state
;
1024 pthread_mutex_unlock(&cluster_mtx
);
1026 dm_printf(8, "findspan: %p\n", state
);
1033 * Helper function to generate missing relay on target connection.
1035 * cluster_mtx must be held
1039 dmsg_generate_relay(h2span_conn_t
*conn
, h2span_link_t
*slink
)
1041 h2span_relay_t
*relay
;
1044 dmsg_state_hold(slink
->state
);
1045 relay
= dmsg_alloc(sizeof(*relay
));
1047 relay
->source_rt
= slink
->state
;
1048 /* relay->source_rt->any.link = slink; */
1051 * NOTE: relay->target_rt->any.relay set to relay by alloc.
1053 * NOTE: LNK_SPAN is transmitted as a top-level transaction.
1055 msg
= dmsg_msg_alloc(&conn
->state
->iocom
->state0
,
1056 0, DMSG_LNK_SPAN
| DMSGF_CREATE
,
1057 dmsg_lnk_relay
, relay
);
1058 dmsg_state_hold(msg
->state
);
1059 relay
->target_rt
= msg
->state
;
1061 msg
->any
.lnk_span
= slink
->lnk_span
;
1062 msg
->any
.lnk_span
.dist
= slink
->lnk_span
.dist
+ 1;
1063 msg
->any
.lnk_span
.rnss
= slink
->lnk_span
.rnss
+ dmsg_rnss();
1065 RB_INSERT(h2span_relay_tree
, &conn
->tree
, relay
);
1066 TAILQ_INSERT_TAIL(&slink
->relayq
, relay
, entry
);
1069 * Seed the relay so new sub-transactions received on the outgoing
1070 * SPAN circuit are relayed back to the originator.
1072 msg
->state
->relay
= relay
->source_rt
;
1073 dmsg_state_hold(msg
->state
->relay
);
1075 dmsg_msg_write(msg
);
1081 * Messages received on relay SPANs. These are open transactions so it is
1082 * in fact possible for the other end to close the transaction.
1084 * XXX MPRACE on state structure
1087 dmsg_lnk_relay(dmsg_msg_t
*msg
)
1089 dmsg_state_t
*state
= msg
->state
;
1090 h2span_relay_t
*relay
;
1092 assert(msg
->any
.head
.cmd
& DMSGF_REPLY
);
1094 if (msg
->any
.head
.cmd
& DMSGF_DELETE
) {
1095 pthread_mutex_lock(&cluster_mtx
);
1096 dm_printf(8, "%s\n", "RELAY DELETE FROM LNK_RELAY MSG");
1097 if ((relay
= state
->any
.relay
) != NULL
) {
1098 dmsg_relay_delete(relay
);
1100 dmsg_state_reply(state
, 0);
1102 pthread_mutex_unlock(&cluster_mtx
);
1107 * cluster_mtx held by caller
1111 dmsg_relay_delete(h2span_relay_t
*relay
)
1114 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p "
1115 "DIST=%d FD %d STATE %p\n",
1116 relay
->source_rt
->any
.link
,
1118 relay
->source_rt
->any
.link
->node
->cls
,
1119 relay
->source_rt
->any
.link
->node
,
1120 relay
->source_rt
->any
.link
->lnk_span
.dist
,
1121 relay
->conn
->state
->iocom
->sock_fd
,
1124 RB_REMOVE(h2span_relay_tree
, &relay
->conn
->tree
, relay
);
1125 TAILQ_REMOVE(&relay
->source_rt
->any
.link
->relayq
, relay
, entry
);
1127 if (relay
->target_rt
) {
1128 relay
->target_rt
->any
.relay
= NULL
;
1129 dmsg_state_reply(relay
->target_rt
, 0);
1130 dmsg_state_drop(relay
->target_rt
);
1131 /* state invalid after reply */
1132 relay
->target_rt
= NULL
;
1136 * NOTE: relay->source_rt->refs is held by the relay SPAN
1137 * state, not by this relay structure.
1140 if (relay
->source_rt
) {
1141 dmsg_state_drop(relay
->source_rt
);
1142 relay
->source_rt
= NULL
;
1147 /************************************************************************
1148 * ROUTER AND MESSAGING HANDLES *
1149 ************************************************************************
1151 * Basically the idea here is to provide a stable data structure which
1152 * can be localized to the caller for higher level protocols to work with.
1153 * Depends on the context, these dmsg_handle's can be pooled by use-case
1154 * and remain persistent through a client (or mount point's) life.
1159 * Obtain a stable handle on a cluster given its uuid. This ties directly
1160 * into the global cluster topology, creating the structure if necessary
1161 * (even if the uuid does not exist or does not exist yet), and preventing
1162 * the structure from getting ripped out from under us while we hold a
1166 dmsg_cluster_get(uuid_t
*peer_id
)
1168 h2span_cluster_t dummy_cls
;
1169 h2span_cluster_t
*cls
;
1171 dummy_cls
.peer_id
= *peer_id
;
1172 pthread_mutex_lock(&cluster_mtx
);
1173 cls
= RB_FIND(h2span_cluster_tree
, &cluster_tree
, &dummy_cls
);
1176 pthread_mutex_unlock(&cluster_mtx
);
1181 dmsg_cluster_put(h2span_cluster_t
*cls
)
1183 pthread_mutex_lock(&cluster_mtx
);
1184 assert(cls
->refs
> 0);
1186 if (RB_EMPTY(&cls
->tree
) && cls
->refs
== 0) {
1187 RB_REMOVE(h2span_cluster_tree
,
1188 &cluster_tree
, cls
);
1191 pthread_mutex_unlock(&cluster_mtx
);
1195 * Obtain a stable handle to a specific cluster node given its uuid.
1196 * This handle does NOT lock in the route to the node and is typically
1197 * used as part of the dmsg_handle_*() API to obtain a set of
1201 dmsg_node_get(h2span_cluster_t
*cls
, uuid_t
*pfs_id
)
1208 * Dumps the spanning tree
1213 dmsg_shell_tree(dmsg_iocom_t
*iocom
, char *cmdbuf __unused
)
1215 h2span_cluster_t
*cls
;
1216 h2span_node_t
*node
;
1217 h2span_link_t
*slink
;
1218 h2span_relay_t
*relay
;
1221 pthread_mutex_lock(&cluster_mtx
);
1222 RB_FOREACH(cls
, h2span_cluster_tree
, &cluster_tree
) {
1223 dmsg_printf(iocom
, "Cluster %s %s (%s)\n",
1224 dmsg_peer_type_to_str(cls
->peer_type
),
1225 dmsg_uuid_to_str(&cls
->peer_id
, &uustr
),
1227 RB_FOREACH(node
, h2span_node_tree
, &cls
->tree
) {
1228 dmsg_printf(iocom
, " Node %02x %s (%s)\n",
1230 dmsg_uuid_to_str(&node
->pfs_id
, &uustr
),
1232 RB_FOREACH(slink
, h2span_link_tree
, &node
->tree
) {
1234 "\tSLink msgid %016jx "
1236 (intmax_t)slink
->state
->msgid
,
1237 slink
->lnk_span
.dist
,
1238 slink
->state
->iocom
->sock_fd
);
1239 TAILQ_FOREACH(relay
, &slink
->relayq
, entry
) {
1241 "\t Relay-out msgid %016jx "
1243 (intmax_t)relay
->target_rt
->msgid
,
1244 relay
->target_rt
->iocom
->sock_fd
);
1249 pthread_mutex_unlock(&cluster_mtx
);
1253 TAILQ_FOREACH(conn
, &connq
, entry
) {
1261 * Locate the state representing an incoming LNK_SPAN given its msgid.
1264 dmsg_debug_findspan(uint64_t msgid
, dmsg_state_t
**statep
)
1266 h2span_cluster_t
*cls
;
1267 h2span_node_t
*node
;
1268 h2span_link_t
*slink
;
1270 pthread_mutex_lock(&cluster_mtx
);
1271 RB_FOREACH(cls
, h2span_cluster_tree
, &cluster_tree
) {
1272 RB_FOREACH(node
, h2span_node_tree
, &cls
->tree
) {
1273 RB_FOREACH(slink
, h2span_link_tree
, &node
->tree
) {
1274 if (slink
->state
->msgid
== msgid
) {
1275 *statep
= slink
->state
;
1281 pthread_mutex_unlock(&cluster_mtx
);
1285 pthread_mutex_unlock(&cluster_mtx
);
1290 * Random number sub-sort value to add to SPAN rnss fields on relay.
1291 * This allows us to differentiate spans with the same <dist> field
1292 * for relaying purposes. We must normally limit the number of relays
1293 * for any given SPAN origination but we must also guarantee that a
1294 * symmetric reverse path exists, so we use the rnss field as a sub-sort
1295 * (since there can be thousands or millions if we only match on <dist>),
1296 * and if there STILL too many spans we go past the limit.
1302 if (DMsgRNSS
== 0) {
1303 pthread_mutex_lock(&cluster_mtx
);
1304 while (DMsgRNSS
== 0) {
1306 DMsgRNSS
= random();
1308 pthread_mutex_unlock(&cluster_mtx
);