kernel/ath_hal: Add missing header to fix build with -Wundef.
[dragonfly.git] / lib / libdmsg / msg_lnk.c
blob0c3aa7db840fa11934cd983341045e7cbfa406ca
1 /*
2 * Copyright (c) 2012-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS - Please see sys/dmsg.h for an
36 * involved explanation of the protocol.
39 #include "dmsg_local.h"
42 * Maximum spanning tree distance. This has the practical effect of
43 * stopping tail-chasing closed loops when a feeder span is lost.
45 #define DMSG_SPAN_MAXDIST 16
48 * RED-BLACK TREE DEFINITIONS
50 * We need to track:
52 * (1) shared fsid's (a cluster).
53 * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
55 * We need to aggegate all active LNK_SPANs, aggregate, and create our own
56 * outgoing LNK_SPAN transactions on each of our connections representing
57 * the aggregated state.
59 * h2span_conn - list of iocom connections who wish to receive SPAN
60 * propagation from other connections. Might contain
61 * a filter string. Only iocom's with an open
62 * LNK_CONN transactions are applicable for SPAN
63 * propagation.
65 * h2span_relay - List of links relayed (via SPAN). Essentially
66 * each relay structure represents a LNK_SPAN
67 * transaction that we initiated, verses h2span_link
68 * which is a LNK_SPAN transaction that we received.
70 * --
72 * h2span_cluster - Organizes the shared fsid's. One structure for
73 * each cluster.
75 * h2span_node - Organizes the nodes in a cluster. One structure
76 * for each unique {cluster,node}, aka {peer_id, pfs_id}.
78 * h2span_link - Organizes all incoming and outgoing LNK_SPAN message
79 * transactions related to a node.
81 * One h2span_link structure for each incoming LNK_SPAN
82 * transaction. Links selected for propagation back
83 * out are also where the outgoing LNK_SPAN messages
84 * are indexed into (so we can propagate changes).
86 * The h2span_link's use a red-black tree to sort the
87 * distance hop metric for the incoming LNK_SPAN. We
88 * then select the top N for outgoing. When the
89 * topology changes the top N may also change and cause
90 * new outgoing LNK_SPAN transactions to be opened
91 * and less desireable ones to be closed, causing
92 * transactional aborts within the message flow in
93 * the process.
95 * Also note - All outgoing LNK_SPAN message transactions are also
96 * entered into a red-black tree for use by the routing
97 * function. This is handled by msg.c in the state
98 * code, not here.
101 struct h2span_link;
102 struct h2span_relay;
103 TAILQ_HEAD(h2span_conn_queue, h2span_conn);
104 TAILQ_HEAD(h2span_relay_queue, h2span_relay);
106 RB_HEAD(h2span_cluster_tree, h2span_cluster);
107 RB_HEAD(h2span_node_tree, h2span_node);
108 RB_HEAD(h2span_link_tree, h2span_link);
109 RB_HEAD(h2span_relay_tree, h2span_relay);
110 uint32_t DMsgRNSS;
113 * Received LNK_CONN transaction enables SPAN protocol over connection.
114 * (may contain filter). Typically one for each mount and several may
115 * share the same media.
117 struct h2span_conn {
118 TAILQ_ENTRY(h2span_conn) entry;
119 struct h2span_relay_tree tree;
120 dmsg_state_t *state;
121 dmsg_lnk_conn_t lnk_conn;
125 * All received LNK_SPANs are organized by peer id (peer_id),
126 * node (pfs_id), and link (received LNK_SPAN transaction).
128 struct h2span_cluster {
129 RB_ENTRY(h2span_cluster) rbnode;
130 struct h2span_node_tree tree;
131 uuid_t peer_id; /* shared fsid */
132 uint8_t peer_type;
133 uint8_t reserved01[7];
134 char peer_label[128]; /* string identification */
135 int refs; /* prevents destruction */
138 struct h2span_node {
139 RB_ENTRY(h2span_node) rbnode;
140 struct h2span_link_tree tree;
141 struct h2span_cluster *cls;
142 uint8_t pfs_type;
143 uint8_t reserved01[7];
144 uuid_t pfs_id; /* unique pfs id */
145 char pfs_label[128]; /* string identification */
146 void *opaque;
149 struct h2span_link {
150 RB_ENTRY(h2span_link) rbnode;
151 dmsg_state_t *state; /* state<->link */
152 struct h2span_node *node; /* related node */
153 struct h2span_relay_queue relayq; /* relay out */
154 dmsg_lnk_span_t lnk_span;
158 * Any LNK_SPAN transactions we receive which are relayed out other
159 * connections utilize this structure to track the LNK_SPAN transactions
160 * we initiate (relay out) on other connections. We only relay out
161 * LNK_SPANs on connections we have an open CONN transaction for.
163 * The relay structure points to the outgoing LNK_SPAN trans (out_state)
164 * and to the incoming LNK_SPAN transaction (in_state). The relay
165 * structure holds refs on the related states.
167 * In many respects this is the core of the protocol... actually figuring
168 * out what LNK_SPANs to relay. The spanid used for relaying is the
169 * address of the 'state' structure, which is why h2span_relay has to
170 * be entered into a RB-TREE based at h2span_conn (so we can look
171 * up the spanid to validate it).
173 struct h2span_relay {
174 TAILQ_ENTRY(h2span_relay) entry; /* from link */
175 RB_ENTRY(h2span_relay) rbnode; /* from h2span_conn */
176 struct h2span_conn *conn; /* related CONN transaction */
177 dmsg_state_t *source_rt; /* h2span_link state */
178 dmsg_state_t *target_rt; /* h2span_relay state */
181 typedef struct h2span_conn h2span_conn_t;
182 typedef struct h2span_cluster h2span_cluster_t;
183 typedef struct h2span_node h2span_node_t;
184 typedef struct h2span_link h2span_link_t;
185 typedef struct h2span_relay h2span_relay_t;
187 #define dmsg_termstr(array) _dmsg_termstr((array), sizeof(array))
189 static h2span_relay_t *dmsg_generate_relay(h2span_conn_t *conn,
190 h2span_link_t *slink);
191 static uint32_t dmsg_rnss(void);
193 static __inline
194 void
195 _dmsg_termstr(char *base, size_t size)
197 base[size-1] = 0;
201 * Cluster peer_type, uuid, AND label must match for a match
203 static
205 h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
207 int r;
209 if (cls1->peer_type < cls2->peer_type)
210 return(-1);
211 if (cls1->peer_type > cls2->peer_type)
212 return(1);
213 r = uuid_compare(&cls1->peer_id, &cls2->peer_id, NULL);
214 if (r == 0)
215 r = strcmp(cls1->peer_label, cls2->peer_label);
217 return r;
221 * Match against pfs_label/pfs_id. Together these two items represent a
222 * unique node. In most cases the primary differentiator is pfs_id but
223 * we also string-match fs_label.
225 static
227 h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
229 int r;
231 r = strcmp(node1->pfs_label, node2->pfs_label);
232 if (r == 0)
233 r = uuid_compare(&node1->pfs_id, &node2->pfs_id, NULL);
234 return (r);
238 * Sort/subsort must match h2span_relay_cmp() under any given node
239 * to make the aggregation algorithm easier, so the best links are
240 * in the same sorted order as the best relays.
242 * NOTE: We cannot use link*->state->msgid because this msgid is created
243 * by each remote host and thus might wind up being the same.
245 static
247 h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
249 if (link1->lnk_span.dist < link2->lnk_span.dist)
250 return(-1);
251 if (link1->lnk_span.dist > link2->lnk_span.dist)
252 return(1);
253 if (link1->lnk_span.rnss < link2->lnk_span.rnss)
254 return(-1);
255 if (link1->lnk_span.rnss > link2->lnk_span.rnss)
256 return(1);
257 #if 1
258 if ((uintptr_t)link1->state < (uintptr_t)link2->state)
259 return(-1);
260 if ((uintptr_t)link1->state > (uintptr_t)link2->state)
261 return(1);
262 #else
263 if (link1->state->msgid < link2->state->msgid)
264 return(-1);
265 if (link1->state->msgid > link2->state->msgid)
266 return(1);
267 #endif
268 return(0);
272 * Relay entries are sorted by node, subsorted by distance and link
273 * address (so we can match up the conn->tree relay topology with
274 * a node's link topology).
276 static
278 h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
280 h2span_link_t *link1 = relay1->source_rt->any.link;
281 h2span_link_t *link2 = relay2->source_rt->any.link;
283 if ((intptr_t)link1->node < (intptr_t)link2->node)
284 return(-1);
285 if ((intptr_t)link1->node > (intptr_t)link2->node)
286 return(1);
287 if (link1->lnk_span.dist < link2->lnk_span.dist)
288 return(-1);
289 if (link1->lnk_span.dist > link2->lnk_span.dist)
290 return(1);
291 if (link1->lnk_span.rnss < link2->lnk_span.rnss)
292 return(-1);
293 if (link1->lnk_span.rnss > link2->lnk_span.rnss)
294 return(1);
295 #if 1
296 if ((uintptr_t)link1->state < (uintptr_t)link2->state)
297 return(-1);
298 if ((uintptr_t)link1->state > (uintptr_t)link2->state)
299 return(1);
300 #else
301 if (link1->state->msgid < link2->state->msgid)
302 return(-1);
303 if (link1->state->msgid > link2->state->msgid)
304 return(1);
305 #endif
306 return(0);
309 RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
310 rbnode, h2span_cluster_cmp);
311 RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
312 rbnode, h2span_node_cmp);
313 RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
314 rbnode, h2span_link_cmp);
315 RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
316 rbnode, h2span_relay_cmp);
318 RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
319 rbnode, h2span_cluster_cmp);
320 RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
321 rbnode, h2span_node_cmp);
322 RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
323 rbnode, h2span_link_cmp);
324 RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
325 rbnode, h2span_relay_cmp);
328 * Global mutex protects cluster_tree lookups, connq, mediaq.
330 static pthread_mutex_t cluster_mtx;
331 static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
332 static struct h2span_conn_queue connq = TAILQ_HEAD_INITIALIZER(connq);
333 static struct dmsg_media_queue mediaq = TAILQ_HEAD_INITIALIZER(mediaq);
335 static void dmsg_lnk_span(dmsg_msg_t *msg);
336 static void dmsg_lnk_conn(dmsg_msg_t *msg);
337 static void dmsg_lnk_ping(dmsg_msg_t *msg);
338 static void dmsg_lnk_relay(dmsg_msg_t *msg);
339 static void dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node);
340 static void dmsg_relay_delete(h2span_relay_t *relay);
342 void
343 dmsg_msg_lnk_signal(dmsg_iocom_t *iocom __unused)
345 pthread_mutex_lock(&cluster_mtx);
346 dmsg_relay_scan(NULL, NULL);
347 pthread_mutex_unlock(&cluster_mtx);
351 * DMSG_PROTO_LNK - Generic DMSG_PROTO_LNK.
352 * (incoming iocom lock not held)
354 * This function is typically called for one-way and opening-transactions
355 * since state->func is assigned after that, but it will also be called
356 * if no state->func is assigned on transaction-open.
358 void
359 dmsg_msg_lnk(dmsg_msg_t *msg)
361 dmsg_iocom_t *iocom = msg->state->iocom;
363 switch(msg->tcmd & DMSGF_BASECMDMASK) {
364 case DMSG_LNK_CONN:
365 dmsg_lnk_conn(msg);
366 break;
367 case DMSG_LNK_SPAN:
368 dmsg_lnk_span(msg);
369 break;
370 case DMSG_LNK_PING:
371 dmsg_lnk_ping(msg);
372 break;
373 default:
374 iocom->usrmsg_callback(msg, 1);
375 /* state invalid after reply */
376 break;
381 * LNK_CONN - iocom identify message reception.
382 * (incoming iocom lock not held)
384 * Remote node identifies itself to us, sets up a SPAN filter, and gives us
385 * the ok to start transmitting SPANs.
387 void
388 dmsg_lnk_conn(dmsg_msg_t *msg)
390 dmsg_state_t *state = msg->state;
391 dmsg_iocom_t *iocom = state->iocom;
392 dmsg_media_t *media;
393 h2span_conn_t *conn;
394 h2span_relay_t *relay;
395 char *alloc = NULL;
397 pthread_mutex_lock(&cluster_mtx);
399 dmio_printf(iocom, 3,
400 "dmsg_lnk_conn: msg %p cmd %08x state %p "
401 "txcmd %08x rxcmd %08x\n",
402 msg, msg->any.head.cmd, state,
403 state->txcmd, state->rxcmd);
405 switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
406 case DMSG_LNK_CONN | DMSGF_CREATE:
407 case DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_DELETE:
409 * On transaction start we allocate a new h2span_conn and
410 * acknowledge the request, leaving the transaction open.
411 * We then relay priority-selected SPANs.
413 dmio_printf(iocom, 3, "LNK_CONN(%08x): %s/%s\n",
414 (uint32_t)msg->any.head.msgid,
415 dmsg_uuid_to_str(&msg->any.lnk_conn.peer_id, &alloc),
416 msg->any.lnk_conn.peer_label);
417 free(alloc);
419 conn = dmsg_alloc(sizeof(*conn));
420 assert(state->iocom->conn == NULL);
422 RB_INIT(&conn->tree);
423 state->iocom->conn = conn; /* XXX only one */
424 state->iocom->conn_msgid = state->msgid;
425 dmsg_state_hold(state);
426 conn->state = state;
427 state->func = dmsg_lnk_conn;
428 state->any.conn = conn;
429 TAILQ_INSERT_TAIL(&connq, conn, entry);
430 conn->lnk_conn = msg->any.lnk_conn;
433 * Set up media
435 TAILQ_FOREACH(media, &mediaq, entry) {
436 if (uuid_compare(&msg->any.lnk_conn.media_id,
437 &media->media_id, NULL) == 0) {
438 break;
441 if (media == NULL) {
442 media = dmsg_alloc(sizeof(*media));
443 media->media_id = msg->any.lnk_conn.media_id;
444 TAILQ_INSERT_TAIL(&mediaq, media, entry);
446 state->media = media;
447 ++media->refs;
449 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) {
450 iocom->usrmsg_callback(msg, 0);
451 dmsg_msg_result(msg, 0);
452 dmsg_iocom_signal(iocom);
453 break;
455 /* FALL THROUGH */
456 case DMSG_LNK_CONN | DMSGF_DELETE:
457 case DMSG_LNK_ERROR | DMSGF_DELETE:
459 * On transaction terminate we clean out our h2span_conn
460 * and acknowledge the request, closing the transaction.
462 dmio_printf(iocom, 3, "%s\n", "LNK_CONN: Terminated");
463 conn = state->any.conn;
464 assert(conn);
467 * Adjust media refs
469 * Callback will clean out media config / user-opaque state
471 media = state->media;
472 --media->refs;
473 if (media->refs == 0) {
474 dmio_printf(iocom, 3, "%s\n", "Media shutdown");
475 TAILQ_REMOVE(&mediaq, media, entry);
476 pthread_mutex_unlock(&cluster_mtx);
477 iocom->usrmsg_callback(msg, 0);
478 pthread_mutex_lock(&cluster_mtx);
479 dmsg_free(media);
481 state->media = NULL;
484 * Clean out all relays. This requires terminating each
485 * relay transaction.
487 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
488 dmsg_relay_delete(relay);
492 * Clean out conn
494 conn->state = NULL;
495 msg->state->any.conn = NULL;
496 msg->state->iocom->conn = NULL;
497 TAILQ_REMOVE(&connq, conn, entry);
498 dmsg_free(conn);
500 dmsg_msg_reply(msg, 0);
501 dmsg_state_drop(state);
502 /* state invalid after reply */
503 break;
504 default:
505 iocom->usrmsg_callback(msg, 1);
506 #if 0
507 if (msg->any.head.cmd & DMSGF_DELETE)
508 goto deleteconn;
509 dmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
510 #endif
511 break;
513 pthread_mutex_unlock(&cluster_mtx);
517 * LNK_SPAN - Spanning tree protocol message reception
518 * (incoming iocom lock not held)
520 * Receive a spanning tree transactional message, creating or destroying
521 * a SPAN and propagating it to other iocoms.
523 void
524 dmsg_lnk_span(dmsg_msg_t *msg)
526 dmsg_state_t *state = msg->state;
527 dmsg_iocom_t *iocom = state->iocom;
528 h2span_cluster_t dummy_cls;
529 h2span_node_t dummy_node;
530 h2span_cluster_t *cls;
531 h2span_node_t *node;
532 h2span_link_t *slink;
533 h2span_relay_t *relay;
534 char *alloc = NULL;
537 * XXX
539 * Ignore reply to LNK_SPAN. The reply is expected and will commands
540 * to flow in both directions on the open transaction. This will also
541 * ignore DMSGF_REPLY|DMSGF_DELETE messages. Since we take no action
542 * if the other end unexpectedly closes their side of the transaction,
543 * we can ignore that too.
545 if (msg->any.head.cmd & DMSGF_REPLY) {
546 dmio_printf(iocom, 2, "%s\n",
547 "Ignore reply to LNK_SPAN");
548 return;
551 pthread_mutex_lock(&cluster_mtx);
554 * On transaction start we initialize the tracking infrastructure
556 if (msg->any.head.cmd & DMSGF_CREATE) {
557 assert(state->func == NULL);
558 state->func = dmsg_lnk_span;
560 dmsg_termstr(msg->any.lnk_span.peer_label);
561 dmsg_termstr(msg->any.lnk_span.pfs_label);
564 * Find the cluster
566 dummy_cls.peer_id = msg->any.lnk_span.peer_id;
567 dummy_cls.peer_type = msg->any.lnk_span.peer_type;
568 bcopy(msg->any.lnk_span.peer_label, dummy_cls.peer_label,
569 sizeof(dummy_cls.peer_label));
570 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
571 if (cls == NULL) {
572 cls = dmsg_alloc(sizeof(*cls));
573 cls->peer_id = msg->any.lnk_span.peer_id;
574 cls->peer_type = msg->any.lnk_span.peer_type;
575 bcopy(msg->any.lnk_span.peer_label,
576 cls->peer_label, sizeof(cls->peer_label));
577 RB_INIT(&cls->tree);
578 RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
582 * Find the node
584 dummy_node.pfs_id = msg->any.lnk_span.pfs_id;
585 bcopy(msg->any.lnk_span.pfs_label, dummy_node.pfs_label,
586 sizeof(dummy_node.pfs_label));
587 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
588 if (node == NULL) {
589 node = dmsg_alloc(sizeof(*node));
590 node->pfs_id = msg->any.lnk_span.pfs_id;
591 node->pfs_type = msg->any.lnk_span.pfs_type;
592 bcopy(msg->any.lnk_span.pfs_label, node->pfs_label,
593 sizeof(node->pfs_label));
594 node->cls = cls;
595 RB_INIT(&node->tree);
596 RB_INSERT(h2span_node_tree, &cls->tree, node);
600 * Create the link
602 * NOTE: Sub-transactions on the incoming SPAN can be used
603 * to talk to the originator. We should not set-up
604 * state->relay for incoming SPANs since our sub-trans
605 * is running on the same interface (i.e. no actual
606 * relaying need be done).
608 * NOTE: Later on when we relay the SPAN out the outgoing
609 * SPAN state will be set up to relay back to this
610 * state.
612 * NOTE: It is possible for SPAN targets to send one-way
613 * messages to the originator but it is not possible
614 * for the originator to (currently) broadcast one-way
615 * messages to all of its SPAN targets. The protocol
616 * allows such a feature to be added in the future.
618 assert(state->any.link == NULL);
619 dmsg_state_hold(state);
620 slink = dmsg_alloc(sizeof(*slink));
621 TAILQ_INIT(&slink->relayq);
622 slink->node = node;
623 slink->state = state;
624 state->any.link = slink;
625 slink->lnk_span = msg->any.lnk_span;
627 RB_INSERT(h2span_link_tree, &node->tree, slink);
629 dmio_printf(iocom, 3,
630 "LNK_SPAN(thr %p): %p %s cl=%s fs=%s dist=%d\n",
631 iocom, slink,
632 dmsg_uuid_to_str(&msg->any.lnk_span.peer_id,
633 &alloc),
634 msg->any.lnk_span.peer_label,
635 msg->any.lnk_span.pfs_label,
636 msg->any.lnk_span.dist);
637 free(alloc);
638 #if 0
639 dmsg_relay_scan(NULL, node);
640 #endif
642 * Ack the open, which will issue a CREATE on our side, and
643 * leave the transaction open. Necessary to allow the
644 * transaction to be used as a virtual circuit.
646 dmsg_state_result(state, 0);
647 dmsg_iocom_signal(iocom);
651 * On transaction terminate we remove the tracking infrastructure.
653 if (msg->any.head.cmd & DMSGF_DELETE) {
654 slink = state->any.link;
655 assert(slink->state == state);
656 assert(slink != NULL);
657 node = slink->node;
658 cls = node->cls;
660 dmio_printf(iocom, 3,
661 "LNK_DELE(thr %p): %p %s cl=%s fs=%s\n",
662 iocom, slink,
663 dmsg_uuid_to_str(&cls->peer_id, &alloc),
664 cls->peer_label,
665 node->pfs_label);
666 free(alloc);
669 * Clean out all relays. This requires terminating each
670 * relay transaction.
672 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
673 dmsg_relay_delete(relay);
677 * Clean out the topology
679 RB_REMOVE(h2span_link_tree, &node->tree, slink);
680 if (RB_EMPTY(&node->tree)) {
681 RB_REMOVE(h2span_node_tree, &cls->tree, node);
682 if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
683 RB_REMOVE(h2span_cluster_tree,
684 &cluster_tree, cls);
685 dmsg_free(cls);
687 node->cls = NULL;
688 dmsg_free(node);
689 node = NULL;
691 state->any.link = NULL;
692 slink->state = NULL;
693 slink->node = NULL;
694 dmsg_state_drop(state);
695 dmsg_free(slink);
698 * We have to terminate the transaction
700 dmsg_state_reply(state, 0);
701 /* state invalid after reply */
704 * If the node still exists issue any required updates. If
705 * it doesn't then all related relays have already been
706 * removed and there's nothing left to do.
708 #if 0
709 if (node)
710 dmsg_relay_scan(NULL, node);
711 #endif
712 if (node)
713 dmsg_iocom_signal(iocom);
716 pthread_mutex_unlock(&cluster_mtx);
720 * Respond to a PING with a PING|REPLY, forward replies to the usermsg
721 * callback.
723 static
724 void
725 dmsg_lnk_ping(dmsg_msg_t *msg)
727 dmsg_msg_t *rep;
729 if (msg->any.head.cmd & DMSGF_REPLY) {
730 msg->state->iocom->usrmsg_callback(msg, 1);
731 } else {
732 rep = dmsg_msg_alloc(msg->state, 0,
733 DMSG_LNK_PING | DMSGF_REPLY,
734 NULL, NULL);
735 dmsg_msg_write(rep);
740 * Update relay transactions for SPANs.
742 * Called with cluster_mtx held.
744 static void dmsg_relay_scan_specific(h2span_node_t *node,
745 h2span_conn_t *conn);
747 static void
748 dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node)
750 h2span_cluster_t *cls;
752 if (node) {
754 * Iterate specific node
756 TAILQ_FOREACH(conn, &connq, entry)
757 dmsg_relay_scan_specific(node, conn);
758 } else {
760 * Full iteration.
762 * Iterate cluster ids, nodes, and either a specific connection
763 * or all connections.
765 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
767 * Iterate node ids
769 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
771 * Synchronize the node's link (received SPANs)
772 * with each connection's relays.
774 if (conn) {
775 dmsg_relay_scan_specific(node, conn);
776 } else {
777 TAILQ_FOREACH(conn, &connq, entry) {
778 dmsg_relay_scan_specific(node,
779 conn);
781 assert(conn == NULL);
789 * Update the relay'd SPANs for this (node, conn).
791 * Iterate links and adjust relays to match. We only propagate the top link
792 * for now (XXX we want to propagate the top two).
794 * The dmsg_relay_scan_cmp() function locates the first relay element
795 * for any given node. The relay elements will be sub-sorted by dist.
797 struct relay_scan_info {
798 h2span_node_t *node;
799 h2span_relay_t *relay;
802 static int
803 dmsg_relay_scan_cmp(h2span_relay_t *relay, void *arg)
805 struct relay_scan_info *info = arg;
807 if ((intptr_t)relay->source_rt->any.link->node < (intptr_t)info->node)
808 return(-1);
809 if ((intptr_t)relay->source_rt->any.link->node > (intptr_t)info->node)
810 return(1);
811 return(0);
814 static int
815 dmsg_relay_scan_callback(h2span_relay_t *relay, void *arg)
817 struct relay_scan_info *info = arg;
819 info->relay = relay;
820 return(-1);
823 static void
824 dmsg_relay_scan_specific(h2span_node_t *node, h2span_conn_t *conn)
826 struct relay_scan_info info;
827 h2span_relay_t *relay;
828 h2span_relay_t *next_relay;
829 h2span_link_t *slink;
830 dmsg_lnk_conn_t *lconn;
831 dmsg_lnk_span_t *lspan;
832 int count;
833 int maxcount = 2;
834 #ifdef REQUIRE_SYMMETRICAL
835 uint32_t lastdist = DMSG_SPAN_MAXDIST;
836 uint32_t lastrnss = 0;
837 #endif
839 info.node = node;
840 info.relay = NULL;
843 * Locate the first related relay for the node on this connection.
844 * relay will be NULL if there were none.
846 RB_SCAN(h2span_relay_tree, &conn->tree,
847 dmsg_relay_scan_cmp, dmsg_relay_scan_callback, &info);
848 relay = info.relay;
849 info.relay = NULL;
850 if (relay)
851 assert(relay->source_rt->any.link->node == node);
853 dm_printf(9, "relay scan for connection %p\n", conn);
856 * Iterate the node's links (received SPANs) in distance order,
857 * lowest (best) dist first.
859 * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
861 * Track relays while iterating the best links and construct
862 * missing relays when necessary.
864 * (If some prior better link was removed it would have also
865 * removed the relay, so the relay can only match exactly or
866 * be worse).
868 count = 0;
869 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
871 * Increment count of successful relays. This isn't
872 * quite accurate if we break out but nothing after
873 * the loop uses (count).
875 * If count exceeds the maximum number of relays we desire
876 * we normally want to break out. However, in order to
877 * guarantee a symmetric path we have to continue if both
878 * (dist) and (rnss) continue to match. Otherwise the SPAN
879 * propagation in the reverse direction may choose different
880 * routes and we will not have a symmetric path.
882 * NOTE: Spanning tree does not have to be symmetrical so
883 * this code is not currently enabled.
885 if (++count >= maxcount) {
886 #ifdef REQUIRE_SYMMETRICAL
887 if (lastdist != slink->lnk_span.dist ||
888 lastrnss != slink->lnk_span.rnss) {
889 break;
891 #else
892 break;
893 #endif
894 /* go beyond the nominal maximum desired relays */
898 * Match, relay already in-place, get the next
899 * relay to match against the next slink.
901 if (relay && relay->source_rt->any.link == slink) {
902 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
903 continue;
907 * We might want this SLINK, if it passes our filters.
909 * The spanning tree can cause closed loops so we have
910 * to limit slink->dist.
912 if (slink->lnk_span.dist > DMSG_SPAN_MAXDIST)
913 break;
916 * Don't bother transmitting a LNK_SPAN out the same
917 * connection it came in on. Trivial optimization.
919 if (slink->state->iocom == conn->state->iocom)
920 break;
923 * NOTE ON FILTERS: The protocol spec allows non-requested
924 * SPANs to be transmitted, the other end is expected to
925 * leave their transactions open but otherwise ignore them.
927 * Don't bother transmitting if the remote connection
928 * is not accepting this SPAN's peer_type.
930 lspan = &slink->lnk_span;
931 lconn = &conn->lnk_conn;
932 if (((1LLU << lspan->peer_type) & lconn->peer_mask) == 0)
933 break;
936 * Do not give pure clients visibility to other pure clients
938 if (lconn->peer_type == DMSG_PEER_CLIENT &&
939 lspan->peer_type == DMSG_PEER_CLIENT) {
940 break;
944 * Clients can set peer_id to filter the peer_id of incoming
945 * spans. Other peer types set peer_id to advertising their
946 * peer_id. XXX
948 * NOTE: peer_label is not a filter on clients, it identifies
949 * the client just as it identifies other peer types.
951 if (lconn->peer_type == DMSG_PEER_CLIENT &&
952 !uuid_is_nil(&lconn->peer_id, NULL) &&
953 uuid_compare(&slink->node->cls->peer_id,
954 &lconn->peer_id, NULL)) {
955 break;
959 * NOTE! pfs_id differentiates nodes within the same cluster
960 * so we obviously don't want to match those. Similarly
961 * for pfs_label.
965 * Ok, we've accepted this SPAN for relaying.
967 assert(relay == NULL ||
968 relay->source_rt->any.link->node != slink->node ||
969 relay->source_rt->any.link->lnk_span.dist >=
970 slink->lnk_span.dist);
971 relay = dmsg_generate_relay(conn, slink);
972 #ifdef REQUIRE_SYMMETRICAL
973 lastdist = slink->lnk_span.dist;
974 lastrnss = slink->lnk_span.rnss;
975 #endif
978 * Match (created new relay), get the next relay to
979 * match against the next slink.
981 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
985 * Any remaining relay's belonging to this connection which match
986 * the node are in excess of the current aggregate spanning state
987 * and should be removed.
989 while (relay && relay->source_rt->any.link->node == node) {
990 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
991 dm_printf(9, "%s\n", "RELAY DELETE FROM EXTRAS");
992 dmsg_relay_delete(relay);
993 relay = next_relay;
998 * Find the slink associated with the msgid and return its state,
999 * so the caller can issue a transaction.
1001 dmsg_state_t *
1002 dmsg_findspan(const char *label)
1004 dmsg_state_t *state;
1005 h2span_cluster_t *cls;
1006 h2span_node_t *node;
1007 h2span_link_t *slink;
1008 uint64_t msgid = strtoull(label, NULL, 16);
1010 pthread_mutex_lock(&cluster_mtx);
1012 state = NULL;
1013 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1014 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1015 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1016 if (slink->state->msgid == msgid) {
1017 state = slink->state;
1018 goto done;
1023 done:
1024 pthread_mutex_unlock(&cluster_mtx);
1026 dm_printf(8, "findspan: %p\n", state);
1028 return state;
1033 * Helper function to generate missing relay on target connection.
1035 * cluster_mtx must be held
1037 static
1038 h2span_relay_t *
1039 dmsg_generate_relay(h2span_conn_t *conn, h2span_link_t *slink)
1041 h2span_relay_t *relay;
1042 dmsg_msg_t *msg;
1044 dmsg_state_hold(slink->state);
1045 relay = dmsg_alloc(sizeof(*relay));
1046 relay->conn = conn;
1047 relay->source_rt = slink->state;
1048 /* relay->source_rt->any.link = slink; */
1051 * NOTE: relay->target_rt->any.relay set to relay by alloc.
1053 * NOTE: LNK_SPAN is transmitted as a top-level transaction.
1055 msg = dmsg_msg_alloc(&conn->state->iocom->state0,
1056 0, DMSG_LNK_SPAN | DMSGF_CREATE,
1057 dmsg_lnk_relay, relay);
1058 dmsg_state_hold(msg->state);
1059 relay->target_rt = msg->state;
1061 msg->any.lnk_span = slink->lnk_span;
1062 msg->any.lnk_span.dist = slink->lnk_span.dist + 1;
1063 msg->any.lnk_span.rnss = slink->lnk_span.rnss + dmsg_rnss();
1065 RB_INSERT(h2span_relay_tree, &conn->tree, relay);
1066 TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
1069 * Seed the relay so new sub-transactions received on the outgoing
1070 * SPAN circuit are relayed back to the originator.
1072 msg->state->relay = relay->source_rt;
1073 dmsg_state_hold(msg->state->relay);
1075 dmsg_msg_write(msg);
1077 return (relay);
1081 * Messages received on relay SPANs. These are open transactions so it is
1082 * in fact possible for the other end to close the transaction.
1084 * XXX MPRACE on state structure
1086 static void
1087 dmsg_lnk_relay(dmsg_msg_t *msg)
1089 dmsg_state_t *state = msg->state;
1090 h2span_relay_t *relay;
1092 assert(msg->any.head.cmd & DMSGF_REPLY);
1094 if (msg->any.head.cmd & DMSGF_DELETE) {
1095 pthread_mutex_lock(&cluster_mtx);
1096 dm_printf(8, "%s\n", "RELAY DELETE FROM LNK_RELAY MSG");
1097 if ((relay = state->any.relay) != NULL) {
1098 dmsg_relay_delete(relay);
1099 } else {
1100 dmsg_state_reply(state, 0);
1102 pthread_mutex_unlock(&cluster_mtx);
1107 * cluster_mtx held by caller
1109 static
1110 void
1111 dmsg_relay_delete(h2span_relay_t *relay)
1113 dm_printf(8,
1114 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p "
1115 "DIST=%d FD %d STATE %p\n",
1116 relay->source_rt->any.link,
1117 relay,
1118 relay->source_rt->any.link->node->cls,
1119 relay->source_rt->any.link->node,
1120 relay->source_rt->any.link->lnk_span.dist,
1121 relay->conn->state->iocom->sock_fd,
1122 relay->target_rt);
1124 RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
1125 TAILQ_REMOVE(&relay->source_rt->any.link->relayq, relay, entry);
1127 if (relay->target_rt) {
1128 relay->target_rt->any.relay = NULL;
1129 dmsg_state_reply(relay->target_rt, 0);
1130 dmsg_state_drop(relay->target_rt);
1131 /* state invalid after reply */
1132 relay->target_rt = NULL;
1136 * NOTE: relay->source_rt->refs is held by the relay SPAN
1137 * state, not by this relay structure.
1139 relay->conn = NULL;
1140 if (relay->source_rt) {
1141 dmsg_state_drop(relay->source_rt);
1142 relay->source_rt = NULL;
1144 dmsg_free(relay);
1147 /************************************************************************
1148 * ROUTER AND MESSAGING HANDLES *
1149 ************************************************************************
1151 * Basically the idea here is to provide a stable data structure which
1152 * can be localized to the caller for higher level protocols to work with.
1153 * Depends on the context, these dmsg_handle's can be pooled by use-case
1154 * and remain persistent through a client (or mount point's) life.
1157 #if 0
1159 * Obtain a stable handle on a cluster given its uuid. This ties directly
1160 * into the global cluster topology, creating the structure if necessary
1161 * (even if the uuid does not exist or does not exist yet), and preventing
1162 * the structure from getting ripped out from under us while we hold a
1163 * pointer to it.
1165 h2span_cluster_t *
1166 dmsg_cluster_get(uuid_t *peer_id)
1168 h2span_cluster_t dummy_cls;
1169 h2span_cluster_t *cls;
1171 dummy_cls.peer_id = *peer_id;
1172 pthread_mutex_lock(&cluster_mtx);
1173 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
1174 if (cls)
1175 ++cls->refs;
1176 pthread_mutex_unlock(&cluster_mtx);
1177 return (cls);
1180 void
1181 dmsg_cluster_put(h2span_cluster_t *cls)
1183 pthread_mutex_lock(&cluster_mtx);
1184 assert(cls->refs > 0);
1185 --cls->refs;
1186 if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
1187 RB_REMOVE(h2span_cluster_tree,
1188 &cluster_tree, cls);
1189 dmsg_free(cls);
1191 pthread_mutex_unlock(&cluster_mtx);
1195 * Obtain a stable handle to a specific cluster node given its uuid.
1196 * This handle does NOT lock in the route to the node and is typically
1197 * used as part of the dmsg_handle_*() API to obtain a set of
1198 * stable nodes.
1200 h2span_node_t *
1201 dmsg_node_get(h2span_cluster_t *cls, uuid_t *pfs_id)
1205 #endif
1208 * Dumps the spanning tree
1210 * DEBUG ONLY
1212 void
1213 dmsg_shell_tree(dmsg_iocom_t *iocom, char *cmdbuf __unused)
1215 h2span_cluster_t *cls;
1216 h2span_node_t *node;
1217 h2span_link_t *slink;
1218 h2span_relay_t *relay;
1219 char *uustr = NULL;
1221 pthread_mutex_lock(&cluster_mtx);
1222 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1223 dmsg_printf(iocom, "Cluster %s %s (%s)\n",
1224 dmsg_peer_type_to_str(cls->peer_type),
1225 dmsg_uuid_to_str(&cls->peer_id, &uustr),
1226 cls->peer_label);
1227 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1228 dmsg_printf(iocom, " Node %02x %s (%s)\n",
1229 node->pfs_type,
1230 dmsg_uuid_to_str(&node->pfs_id, &uustr),
1231 node->pfs_label);
1232 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1233 dmsg_printf(iocom,
1234 "\tSLink msgid %016jx "
1235 "dist=%d via %d\n",
1236 (intmax_t)slink->state->msgid,
1237 slink->lnk_span.dist,
1238 slink->state->iocom->sock_fd);
1239 TAILQ_FOREACH(relay, &slink->relayq, entry) {
1240 dmsg_printf(iocom,
1241 "\t Relay-out msgid %016jx "
1242 "via %d\n",
1243 (intmax_t)relay->target_rt->msgid,
1244 relay->target_rt->iocom->sock_fd);
1249 pthread_mutex_unlock(&cluster_mtx);
1250 if (uustr)
1251 free(uustr);
1252 #if 0
1253 TAILQ_FOREACH(conn, &connq, entry) {
1255 #endif
1259 * DEBUG ONLY
1261 * Locate the state representing an incoming LNK_SPAN given its msgid.
1264 dmsg_debug_findspan(uint64_t msgid, dmsg_state_t **statep)
1266 h2span_cluster_t *cls;
1267 h2span_node_t *node;
1268 h2span_link_t *slink;
1270 pthread_mutex_lock(&cluster_mtx);
1271 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1272 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1273 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1274 if (slink->state->msgid == msgid) {
1275 *statep = slink->state;
1276 goto found;
1281 pthread_mutex_unlock(&cluster_mtx);
1282 *statep = NULL;
1283 return(ENOENT);
1284 found:
1285 pthread_mutex_unlock(&cluster_mtx);
1286 return(0);
1290 * Random number sub-sort value to add to SPAN rnss fields on relay.
1291 * This allows us to differentiate spans with the same <dist> field
1292 * for relaying purposes. We must normally limit the number of relays
1293 * for any given SPAN origination but we must also guarantee that a
1294 * symmetric reverse path exists, so we use the rnss field as a sub-sort
1295 * (since there can be thousands or millions if we only match on <dist>),
1296 * and if there STILL too many spans we go past the limit.
1298 static
1299 uint32_t
1300 dmsg_rnss(void)
1302 if (DMsgRNSS == 0) {
1303 pthread_mutex_lock(&cluster_mtx);
1304 while (DMsgRNSS == 0) {
1305 srandomdev();
1306 DMsgRNSS = random();
1308 pthread_mutex_unlock(&cluster_mtx);
1310 return(DMsgRNSS);