2 ctdb main protocol code
4 Copyright (C) Andrew Tridgell 2006
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "system/network.h"
22 #include "system/filesys.h"
27 #include "lib/util/dlinklist.h"
28 #include "lib/util/debug.h"
29 #include "lib/util/samba_util.h"
31 #include "ctdb_private.h"
32 #include "ctdb_client.h"
34 #include "common/common.h"
35 #include "common/logging.h"
38 choose the transport we will use
40 int ctdb_set_transport(struct ctdb_context
*ctdb
, const char *transport
)
42 ctdb
->transport
= talloc_strdup(ctdb
, transport
);
43 CTDB_NO_MEMORY(ctdb
, ctdb
->transport
);
49 Check whether an ip is a valid node ip
50 Returns the node id for this ip address or -1
52 int ctdb_ip_to_nodeid(struct ctdb_context
*ctdb
, const ctdb_sock_addr
*nodeip
)
56 for (nodeid
=0;nodeid
<ctdb
->num_nodes
;nodeid
++) {
57 if (ctdb
->nodes
[nodeid
]->flags
& NODE_FLAGS_DELETED
) {
60 if (ctdb_same_ip(&ctdb
->nodes
[nodeid
]->address
, nodeip
)) {
69 choose the recovery lock file
71 int ctdb_set_recovery_lock_file(struct ctdb_context
*ctdb
, const char *file
)
73 if (ctdb
->recovery_lock_file
!= NULL
) {
74 talloc_free(ctdb
->recovery_lock_file
);
75 ctdb
->recovery_lock_file
= NULL
;
79 DEBUG(DEBUG_ALERT
,("Recovery lock file set to \"\". Disabling recovery lock checking\n"));
83 ctdb
->recovery_lock_file
= talloc_strdup(ctdb
, file
);
84 CTDB_NO_MEMORY(ctdb
, ctdb
->recovery_lock_file
);
89 /* Load a nodes list file into a nodes array */
90 static int convert_node_map_to_list(struct ctdb_context
*ctdb
,
92 struct ctdb_node_map_old
*node_map
,
93 struct ctdb_node
***nodes
,
98 *nodes
= talloc_zero_array(mem_ctx
,
99 struct ctdb_node
*, node_map
->num
);
100 CTDB_NO_MEMORY(ctdb
, *nodes
);
101 *num_nodes
= node_map
->num
;
103 for (i
= 0; i
< node_map
->num
; i
++) {
104 struct ctdb_node
*node
;
106 node
= talloc_zero(*nodes
, struct ctdb_node
);
107 CTDB_NO_MEMORY(ctdb
, node
);
110 node
->address
= node_map
->nodes
[i
].addr
;
111 node
->name
= talloc_asprintf(node
, "%s:%u",
112 ctdb_addr_to_str(&node
->address
),
113 ctdb_addr_to_port(&node
->address
));
115 node
->flags
= node_map
->nodes
[i
].flags
;
116 if (!(node
->flags
& NODE_FLAGS_DELETED
)) {
117 node
->flags
= NODE_FLAGS_UNHEALTHY
;
119 node
->flags
|= NODE_FLAGS_DISCONNECTED
;
123 node
->dead_count
= 0;
129 /* Load the nodes list from a file */
130 void ctdb_load_nodes_file(struct ctdb_context
*ctdb
)
132 struct ctdb_node_map_old
*node_map
;
135 node_map
= ctdb_read_nodes_file(ctdb
, ctdb
->nodes_file
);
136 if (node_map
== NULL
) {
140 TALLOC_FREE(ctdb
->nodes
);
141 ret
= convert_node_map_to_list(ctdb
, ctdb
, node_map
,
142 &ctdb
->nodes
, &ctdb
->num_nodes
);
147 talloc_free(node_map
);
151 DEBUG(DEBUG_ERR
, ("Failed to load nodes file \"%s\"\n",
153 talloc_free(node_map
);
158 setup the local node address
160 int ctdb_set_address(struct ctdb_context
*ctdb
, const char *address
)
162 ctdb
->address
= talloc(ctdb
, ctdb_sock_addr
);
163 CTDB_NO_MEMORY(ctdb
, ctdb
->address
);
165 if (ctdb_parse_address(ctdb
, address
, ctdb
->address
) != 0) {
169 ctdb
->name
= talloc_asprintf(ctdb
, "%s:%u",
170 ctdb_addr_to_str(ctdb
->address
),
171 ctdb_addr_to_port(ctdb
->address
));
177 return the number of active nodes
179 uint32_t ctdb_get_num_active_nodes(struct ctdb_context
*ctdb
)
183 for (i
=0; i
< ctdb
->num_nodes
; i
++) {
184 if (!(ctdb
->nodes
[i
]->flags
& NODE_FLAGS_INACTIVE
)) {
193 called when we need to process a packet. This can be a requeued packet
194 after a lockwait, or a real packet from another node
196 void ctdb_input_pkt(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
200 /* place the packet as a child of the tmp_ctx. We then use
201 talloc_free() below to free it. If any of the calls want
202 to keep it, then they will steal it somewhere else, and the
203 talloc_free() will only free the tmp_ctx */
204 tmp_ctx
= talloc_new(ctdb
);
205 talloc_steal(tmp_ctx
, hdr
);
207 DEBUG(DEBUG_DEBUG
,(__location__
" ctdb request %u of type %u length %u from "
208 "node %u to %u\n", hdr
->reqid
, hdr
->operation
, hdr
->length
,
209 hdr
->srcnode
, hdr
->destnode
));
211 switch (hdr
->operation
) {
213 case CTDB_REPLY_CALL
:
214 case CTDB_REQ_DMASTER
:
215 case CTDB_REPLY_DMASTER
:
216 /* we don't allow these calls when banned */
217 if (ctdb
->nodes
[ctdb
->pnn
]->flags
& NODE_FLAGS_BANNED
) {
218 DEBUG(DEBUG_DEBUG
,(__location__
" ctdb operation %u"
220 " length %u from node %u to %u while node"
222 hdr
->operation
, hdr
->reqid
,
224 hdr
->srcnode
, hdr
->destnode
));
228 /* Push the check for generation in the handlers for these
229 * operations. Check database generation instead of global
230 * generation. Since the database context is not available
231 * here, push the check in the operations.
235 switch (hdr
->operation
) {
237 CTDB_INCREMENT_STAT(ctdb
, node
.req_call
);
238 ctdb_request_call(ctdb
, hdr
);
241 case CTDB_REPLY_CALL
:
242 CTDB_INCREMENT_STAT(ctdb
, node
.reply_call
);
243 ctdb_reply_call(ctdb
, hdr
);
246 case CTDB_REPLY_ERROR
:
247 CTDB_INCREMENT_STAT(ctdb
, node
.reply_error
);
248 ctdb_reply_error(ctdb
, hdr
);
251 case CTDB_REQ_DMASTER
:
252 CTDB_INCREMENT_STAT(ctdb
, node
.req_dmaster
);
253 ctdb_request_dmaster(ctdb
, hdr
);
256 case CTDB_REPLY_DMASTER
:
257 CTDB_INCREMENT_STAT(ctdb
, node
.reply_dmaster
);
258 ctdb_reply_dmaster(ctdb
, hdr
);
261 case CTDB_REQ_MESSAGE
:
262 CTDB_INCREMENT_STAT(ctdb
, node
.req_message
);
263 ctdb_request_message(ctdb
, hdr
);
266 case CTDB_REQ_CONTROL
:
267 CTDB_INCREMENT_STAT(ctdb
, node
.req_control
);
268 ctdb_request_control(ctdb
, hdr
);
271 case CTDB_REPLY_CONTROL
:
272 CTDB_INCREMENT_STAT(ctdb
, node
.reply_control
);
273 ctdb_reply_control(ctdb
, hdr
);
276 case CTDB_REQ_KEEPALIVE
:
277 CTDB_INCREMENT_STAT(ctdb
, keepalive_packets_recv
);
281 DEBUG(DEBUG_CRIT
,("%s: Packet with unknown operation %u\n",
282 __location__
, hdr
->operation
));
287 talloc_free(tmp_ctx
);
292 called by the transport layer when a node is dead
294 void ctdb_node_dead(struct ctdb_node
*node
)
296 if (node
->flags
& NODE_FLAGS_DISCONNECTED
) {
297 DEBUG(DEBUG_INFO
,("%s: node %s is already marked disconnected: %u connected\n",
298 node
->ctdb
->name
, node
->name
,
299 node
->ctdb
->num_connected
));
302 node
->ctdb
->num_connected
--;
303 node
->flags
|= NODE_FLAGS_DISCONNECTED
| NODE_FLAGS_UNHEALTHY
;
305 node
->dead_count
= 0;
307 DEBUG(DEBUG_NOTICE
,("%s: node %s is dead: %u connected\n",
308 node
->ctdb
->name
, node
->name
, node
->ctdb
->num_connected
));
309 ctdb_daemon_cancel_controls(node
->ctdb
, node
);
311 if (node
->ctdb
->methods
== NULL
) {
312 DEBUG(DEBUG_ERR
,(__location__
" Can not restart transport while shutting down daemon.\n"));
316 node
->ctdb
->methods
->restart(node
);
320 called by the transport layer when a node is connected
322 void ctdb_node_connected(struct ctdb_node
*node
)
324 if (!(node
->flags
& NODE_FLAGS_DISCONNECTED
)) {
325 DEBUG(DEBUG_INFO
,("%s: node %s is already marked connected: %u connected\n",
326 node
->ctdb
->name
, node
->name
,
327 node
->ctdb
->num_connected
));
330 node
->ctdb
->num_connected
++;
331 node
->dead_count
= 0;
332 node
->flags
&= ~NODE_FLAGS_DISCONNECTED
;
333 node
->flags
|= NODE_FLAGS_UNHEALTHY
;
335 ("%s: connected to %s - %u connected\n",
336 node
->ctdb
->name
, node
->name
, node
->ctdb
->num_connected
));
340 struct ctdb_context
*ctdb
;
341 struct ctdb_req_header
*hdr
;
346 triggered when a deferred packet is due
348 static void queue_next_trigger(struct tevent_context
*ev
,
349 struct tevent_timer
*te
,
350 struct timeval t
, void *private_data
)
352 struct queue_next
*q
= talloc_get_type(private_data
, struct queue_next
);
353 ctdb_input_pkt(q
->ctdb
, q
->hdr
);
358 defer a packet, so it is processed on the next event loop
359 this is used for sending packets to ourselves
361 static void ctdb_defer_packet(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
363 struct queue_next
*q
;
364 q
= talloc(ctdb
, struct queue_next
);
366 DEBUG(DEBUG_ERR
,(__location__
" Failed to allocate deferred packet\n"));
370 q
->hdr
= talloc_memdup(ctdb
, hdr
, hdr
->length
);
371 if (q
->hdr
== NULL
) {
372 DEBUG(DEBUG_ERR
,("Error copying deferred packet to self\n"));
376 /* use this to put packets directly into our recv function */
377 ctdb_input_pkt(q
->ctdb
, q
->hdr
);
379 tevent_add_timer(ctdb
->ev
, q
, timeval_zero(), queue_next_trigger
, q
);
385 broadcast a packet to all nodes
387 static void ctdb_broadcast_packet_all(struct ctdb_context
*ctdb
,
388 struct ctdb_req_header
*hdr
)
391 for (i
=0; i
< ctdb
->num_nodes
; i
++) {
392 if (ctdb
->nodes
[i
]->flags
& NODE_FLAGS_DELETED
) {
395 hdr
->destnode
= ctdb
->nodes
[i
]->pnn
;
396 ctdb_queue_packet(ctdb
, hdr
);
401 broadcast a packet to all nodes in the current vnnmap
403 static void ctdb_broadcast_packet_vnnmap(struct ctdb_context
*ctdb
,
404 struct ctdb_req_header
*hdr
)
407 for (i
=0;i
<ctdb
->vnn_map
->size
;i
++) {
408 hdr
->destnode
= ctdb
->vnn_map
->map
[i
];
409 ctdb_queue_packet(ctdb
, hdr
);
414 broadcast a packet to all connected nodes
416 static void ctdb_broadcast_packet_connected(struct ctdb_context
*ctdb
,
417 struct ctdb_req_header
*hdr
)
420 for (i
=0; i
< ctdb
->num_nodes
; i
++) {
421 if (ctdb
->nodes
[i
]->flags
& NODE_FLAGS_DELETED
) {
424 if (!(ctdb
->nodes
[i
]->flags
& NODE_FLAGS_DISCONNECTED
)) {
425 hdr
->destnode
= ctdb
->nodes
[i
]->pnn
;
426 ctdb_queue_packet(ctdb
, hdr
);
432 queue a packet or die
434 void ctdb_queue_packet(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
436 struct ctdb_node
*node
;
438 switch (hdr
->destnode
) {
439 case CTDB_BROADCAST_ALL
:
440 ctdb_broadcast_packet_all(ctdb
, hdr
);
442 case CTDB_BROADCAST_VNNMAP
:
443 ctdb_broadcast_packet_vnnmap(ctdb
, hdr
);
445 case CTDB_BROADCAST_CONNECTED
:
446 ctdb_broadcast_packet_connected(ctdb
, hdr
);
450 CTDB_INCREMENT_STAT(ctdb
, node_packets_sent
);
452 if (!ctdb_validate_pnn(ctdb
, hdr
->destnode
)) {
453 DEBUG(DEBUG_CRIT
,(__location__
" cant send to node %u that does not exist\n",
458 node
= ctdb
->nodes
[hdr
->destnode
];
460 if (node
->flags
& NODE_FLAGS_DELETED
) {
461 DEBUG(DEBUG_ERR
, (__location__
" Can not queue packet to DELETED node %d\n", hdr
->destnode
));
465 if (node
->pnn
== ctdb
->pnn
) {
466 ctdb_defer_packet(ctdb
, hdr
);
470 if (ctdb
->methods
== NULL
) {
471 DEBUG(DEBUG_ALERT
, (__location__
" Can not queue packet. "
472 "Transport is DOWN\n"));
477 if (ctdb
->methods
->queue_pkt(node
, (uint8_t *)hdr
, hdr
->length
) != 0) {
478 ctdb_fatal(ctdb
, "Unable to queue packet\n");
486 a valgrind hack to allow us to get opcode specific backtraces
487 very ugly, and relies on no compiler optimisation!
489 void ctdb_queue_packet_opcode(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
, unsigned opcode
)
492 #define DO_OP(x) case x: ctdb_queue_packet(ctdb, hdr); break
594 ctdb_queue_packet(ctdb
, hdr
);