2 ctdb main protocol code
4 Copyright (C) Andrew Tridgell 2006
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "system/network.h"
22 #include "system/filesys.h"
27 #include "lib/util/dlinklist.h"
28 #include "lib/util/debug.h"
29 #include "lib/util/samba_util.h"
31 #include "ctdb_private.h"
32 #include "ctdb_client.h"
34 #include "common/common.h"
35 #include "common/logging.h"
38 choose the transport we will use
40 int ctdb_set_transport(struct ctdb_context
*ctdb
, const char *transport
)
42 ctdb
->transport
= talloc_strdup(ctdb
, transport
);
43 CTDB_NO_MEMORY(ctdb
, ctdb
->transport
);
48 /* Return the PNN for nodeip, CTDB_UNKNOWN_PNN if nodeip is invalid */
49 uint32_t ctdb_ip_to_pnn(struct ctdb_context
*ctdb
,
50 const ctdb_sock_addr
*nodeip
)
54 for (nodeid
=0;nodeid
<ctdb
->num_nodes
;nodeid
++) {
55 if (ctdb
->nodes
[nodeid
]->flags
& NODE_FLAGS_DELETED
) {
58 if (ctdb_same_ip(&ctdb
->nodes
[nodeid
]->address
, nodeip
)) {
59 return ctdb
->nodes
[nodeid
]->pnn
;
63 return CTDB_UNKNOWN_PNN
;
66 /* Load a nodes list file into a nodes array */
67 static int convert_node_map_to_list(struct ctdb_context
*ctdb
,
69 struct ctdb_node_map_old
*node_map
,
70 struct ctdb_node
***nodes
,
75 *nodes
= talloc_zero_array(mem_ctx
,
76 struct ctdb_node
*, node_map
->num
);
77 CTDB_NO_MEMORY(ctdb
, *nodes
);
78 *num_nodes
= node_map
->num
;
80 for (i
= 0; i
< node_map
->num
; i
++) {
81 struct ctdb_node
*node
;
83 node
= talloc_zero(*nodes
, struct ctdb_node
);
84 CTDB_NO_MEMORY(ctdb
, node
);
87 node
->address
= node_map
->nodes
[i
].addr
;
88 node
->name
= talloc_asprintf(node
, "%s:%u",
89 ctdb_addr_to_str(&node
->address
),
90 ctdb_addr_to_port(&node
->address
));
92 node
->flags
= node_map
->nodes
[i
].flags
;
93 if (!(node
->flags
& NODE_FLAGS_DELETED
)) {
94 node
->flags
= NODE_FLAGS_UNHEALTHY
;
96 node
->flags
|= NODE_FLAGS_DISCONNECTED
;
100 node
->dead_count
= 0;
106 /* Load the nodes list from a file */
107 void ctdb_load_nodes_file(struct ctdb_context
*ctdb
)
109 struct ctdb_node_map_old
*node_map
;
112 node_map
= ctdb_read_nodes_file(ctdb
, ctdb
->nodes_file
);
113 if (node_map
== NULL
) {
117 TALLOC_FREE(ctdb
->nodes
);
118 ret
= convert_node_map_to_list(ctdb
, ctdb
, node_map
,
119 &ctdb
->nodes
, &ctdb
->num_nodes
);
124 talloc_free(node_map
);
128 DEBUG(DEBUG_ERR
, ("Failed to load nodes file \"%s\"\n",
130 talloc_free(node_map
);
135 setup the local node address
137 int ctdb_set_address(struct ctdb_context
*ctdb
, const char *address
)
139 ctdb
->address
= talloc(ctdb
, ctdb_sock_addr
);
140 CTDB_NO_MEMORY(ctdb
, ctdb
->address
);
142 if (ctdb_parse_address(ctdb
, address
, ctdb
->address
) != 0) {
146 ctdb
->name
= talloc_asprintf(ctdb
, "%s:%u",
147 ctdb_addr_to_str(ctdb
->address
),
148 ctdb_addr_to_port(ctdb
->address
));
154 return the number of active nodes
156 uint32_t ctdb_get_num_active_nodes(struct ctdb_context
*ctdb
)
160 for (i
=0; i
< ctdb
->num_nodes
; i
++) {
161 if (!(ctdb
->nodes
[i
]->flags
& NODE_FLAGS_INACTIVE
)) {
170 called when we need to process a packet. This can be a requeued packet
171 after a lockwait, or a real packet from another node
173 void ctdb_input_pkt(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
177 /* place the packet as a child of the tmp_ctx. We then use
178 talloc_free() below to free it. If any of the calls want
179 to keep it, then they will steal it somewhere else, and the
180 talloc_free() will only free the tmp_ctx */
181 tmp_ctx
= talloc_new(ctdb
);
182 talloc_steal(tmp_ctx
, hdr
);
184 DEBUG(DEBUG_DEBUG
,(__location__
" ctdb request %u of type %u length %u from "
185 "node %u to %u\n", hdr
->reqid
, hdr
->operation
, hdr
->length
,
186 hdr
->srcnode
, hdr
->destnode
));
188 switch (hdr
->operation
) {
190 case CTDB_REPLY_CALL
:
191 case CTDB_REQ_DMASTER
:
192 case CTDB_REPLY_DMASTER
:
193 /* we don't allow these calls when banned */
194 if (ctdb
->nodes
[ctdb
->pnn
]->flags
& NODE_FLAGS_BANNED
) {
195 DEBUG(DEBUG_DEBUG
,(__location__
" ctdb operation %u"
197 " length %u from node %u to %u while node"
199 hdr
->operation
, hdr
->reqid
,
201 hdr
->srcnode
, hdr
->destnode
));
205 /* for ctdb_call inter-node operations verify that the
206 remote node that sent us the call is running in the
207 same generation instance as this node
209 if (ctdb
->vnn_map
->generation
!= hdr
->generation
) {
210 DEBUG(DEBUG_DEBUG
,(__location__
" ctdb operation %u"
212 " length %u from node %u to %u had an"
213 " invalid generation id:%u while our"
214 " generation id is:%u\n",
215 hdr
->operation
, hdr
->reqid
,
217 hdr
->srcnode
, hdr
->destnode
,
218 hdr
->generation
, ctdb
->vnn_map
->generation
));
223 switch (hdr
->operation
) {
225 CTDB_INCREMENT_STAT(ctdb
, node
.req_call
);
226 ctdb_request_call(ctdb
, hdr
);
229 case CTDB_REPLY_CALL
:
230 CTDB_INCREMENT_STAT(ctdb
, node
.reply_call
);
231 ctdb_reply_call(ctdb
, hdr
);
234 case CTDB_REPLY_ERROR
:
235 CTDB_INCREMENT_STAT(ctdb
, node
.reply_error
);
236 ctdb_reply_error(ctdb
, hdr
);
239 case CTDB_REQ_DMASTER
:
240 CTDB_INCREMENT_STAT(ctdb
, node
.req_dmaster
);
241 ctdb_request_dmaster(ctdb
, hdr
);
244 case CTDB_REPLY_DMASTER
:
245 CTDB_INCREMENT_STAT(ctdb
, node
.reply_dmaster
);
246 ctdb_reply_dmaster(ctdb
, hdr
);
249 case CTDB_REQ_MESSAGE
:
250 CTDB_INCREMENT_STAT(ctdb
, node
.req_message
);
251 ctdb_request_message(ctdb
, hdr
);
254 case CTDB_REQ_CONTROL
:
255 CTDB_INCREMENT_STAT(ctdb
, node
.req_control
);
256 ctdb_request_control(ctdb
, hdr
);
259 case CTDB_REPLY_CONTROL
:
260 CTDB_INCREMENT_STAT(ctdb
, node
.reply_control
);
261 ctdb_reply_control(ctdb
, hdr
);
264 case CTDB_REQ_KEEPALIVE
:
265 CTDB_INCREMENT_STAT(ctdb
, keepalive_packets_recv
);
266 ctdb_request_keepalive(ctdb
, hdr
);
269 case CTDB_REQ_TUNNEL
:
270 CTDB_INCREMENT_STAT(ctdb
, node
.req_tunnel
);
271 ctdb_request_tunnel(ctdb
, hdr
);
275 DEBUG(DEBUG_CRIT
,("%s: Packet with unknown operation %u\n",
276 __location__
, hdr
->operation
));
281 talloc_free(tmp_ctx
);
286 called by the transport layer when a node is dead
288 void ctdb_node_dead(struct ctdb_node
*node
)
290 if (node
->flags
& NODE_FLAGS_DISCONNECTED
) {
291 DEBUG(DEBUG_INFO
,("%s: node %s is already marked disconnected: %u connected\n",
292 node
->ctdb
->name
, node
->name
,
293 node
->ctdb
->num_connected
));
296 node
->ctdb
->num_connected
--;
297 node
->flags
|= NODE_FLAGS_DISCONNECTED
| NODE_FLAGS_UNHEALTHY
;
299 node
->dead_count
= 0;
301 DEBUG(DEBUG_ERR
,("%s: node %s is dead: %u connected\n",
302 node
->ctdb
->name
, node
->name
, node
->ctdb
->num_connected
));
303 ctdb_daemon_cancel_controls(node
->ctdb
, node
);
305 if (node
->ctdb
->methods
== NULL
) {
306 DEBUG(DEBUG_ERR
,(__location__
" Can not restart transport while shutting down daemon.\n"));
310 node
->ctdb
->methods
->restart(node
);
314 called by the transport layer when a node is connected
316 void ctdb_node_connected(struct ctdb_node
*node
)
318 if (!(node
->flags
& NODE_FLAGS_DISCONNECTED
)) {
319 DEBUG(DEBUG_INFO
,("%s: node %s is already marked connected: %u connected\n",
320 node
->ctdb
->name
, node
->name
,
321 node
->ctdb
->num_connected
));
324 node
->ctdb
->num_connected
++;
325 node
->dead_count
= 0;
326 node
->flags
&= ~NODE_FLAGS_DISCONNECTED
;
327 node
->flags
|= NODE_FLAGS_UNHEALTHY
;
329 ("%s: connected to %s - %u connected\n",
330 node
->ctdb
->name
, node
->name
, node
->ctdb
->num_connected
));
334 struct ctdb_context
*ctdb
;
335 struct ctdb_req_header
*hdr
;
340 triggered when a deferred packet is due
342 static void queue_next_trigger(struct tevent_context
*ev
,
343 struct tevent_timer
*te
,
344 struct timeval t
, void *private_data
)
346 struct queue_next
*q
= talloc_get_type(private_data
, struct queue_next
);
347 ctdb_input_pkt(q
->ctdb
, q
->hdr
);
352 defer a packet, so it is processed on the next event loop
353 this is used for sending packets to ourselves
355 static void ctdb_defer_packet(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
357 struct queue_next
*q
;
358 q
= talloc(ctdb
, struct queue_next
);
360 DEBUG(DEBUG_ERR
,(__location__
" Failed to allocate deferred packet\n"));
364 q
->hdr
= talloc_memdup(ctdb
, hdr
, hdr
->length
);
365 if (q
->hdr
== NULL
) {
366 DEBUG(DEBUG_ERR
,("Error copying deferred packet to self\n"));
370 /* use this to put packets directly into our recv function */
371 ctdb_input_pkt(q
->ctdb
, q
->hdr
);
373 tevent_add_timer(ctdb
->ev
, q
, timeval_zero(), queue_next_trigger
, q
);
379 broadcast a packet to all nodes
381 static void ctdb_broadcast_packet_all(struct ctdb_context
*ctdb
,
382 struct ctdb_req_header
*hdr
)
385 for (i
=0; i
< ctdb
->num_nodes
; i
++) {
386 if (ctdb
->nodes
[i
]->flags
& NODE_FLAGS_DELETED
) {
389 hdr
->destnode
= ctdb
->nodes
[i
]->pnn
;
390 ctdb_queue_packet(ctdb
, hdr
);
395 broadcast a packet to all active nodes
397 static void ctdb_broadcast_packet_active(struct ctdb_context
*ctdb
,
398 struct ctdb_req_header
*hdr
)
401 for (i
= 0; i
< ctdb
->num_nodes
; i
++) {
402 if (ctdb
->nodes
[i
]->flags
& NODE_FLAGS_INACTIVE
) {
406 hdr
->destnode
= ctdb
->nodes
[i
]->pnn
;
407 ctdb_queue_packet(ctdb
, hdr
);
412 broadcast a packet to all connected nodes
414 static void ctdb_broadcast_packet_connected(struct ctdb_context
*ctdb
,
415 struct ctdb_req_header
*hdr
)
418 for (i
=0; i
< ctdb
->num_nodes
; i
++) {
419 if (ctdb
->nodes
[i
]->flags
& NODE_FLAGS_DELETED
) {
422 if (!(ctdb
->nodes
[i
]->flags
& NODE_FLAGS_DISCONNECTED
)) {
423 hdr
->destnode
= ctdb
->nodes
[i
]->pnn
;
424 ctdb_queue_packet(ctdb
, hdr
);
430 queue a packet or die
432 void ctdb_queue_packet(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
434 struct ctdb_node
*node
;
436 switch (hdr
->destnode
) {
437 case CTDB_BROADCAST_ALL
:
438 ctdb_broadcast_packet_all(ctdb
, hdr
);
440 case CTDB_BROADCAST_ACTIVE
:
441 ctdb_broadcast_packet_active(ctdb
, hdr
);
443 case CTDB_BROADCAST_CONNECTED
:
444 ctdb_broadcast_packet_connected(ctdb
, hdr
);
448 CTDB_INCREMENT_STAT(ctdb
, node_packets_sent
);
450 if (!ctdb_validate_pnn(ctdb
, hdr
->destnode
)) {
451 DEBUG(DEBUG_CRIT
,(__location__
" cant send to node %u that does not exist\n",
456 node
= ctdb
->nodes
[hdr
->destnode
];
458 if (node
->flags
& NODE_FLAGS_DELETED
) {
459 DEBUG(DEBUG_ERR
, (__location__
" Can not queue packet to DELETED node %d\n", hdr
->destnode
));
463 if (node
->pnn
== ctdb
->pnn
) {
464 ctdb_defer_packet(ctdb
, hdr
);
468 if (ctdb
->methods
== NULL
) {
469 DEBUG(DEBUG_ALERT
, (__location__
" Can not queue packet. "
470 "Transport is DOWN\n"));
475 if (ctdb
->methods
->queue_pkt(node
, (uint8_t *)hdr
, hdr
->length
) != 0) {
476 ctdb_fatal(ctdb
, "Unable to queue packet\n");
484 a valgrind hack to allow us to get opcode specific backtraces
485 very ugly, and relies on no compiler optimisation!
487 void ctdb_queue_packet_opcode(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
, unsigned opcode
)
490 #define DO_OP(x) case x: ctdb_queue_packet(ctdb, hdr); break
592 ctdb_queue_packet(ctdb
, hdr
);