2 ctdb main protocol code
4 Copyright (C) Andrew Tridgell 2006
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "system/network.h"
22 #include "system/filesys.h"
27 #include "lib/util/dlinklist.h"
28 #include "lib/util/debug.h"
29 #include "lib/util/samba_util.h"
31 #include "ctdb_private.h"
32 #include "ctdb_client.h"
34 #include "common/common.h"
35 #include "common/logging.h"
38 choose the transport we will use
40 int ctdb_set_transport(struct ctdb_context
*ctdb
, const char *transport
)
42 ctdb
->transport
= talloc_strdup(ctdb
, transport
);
43 CTDB_NO_MEMORY(ctdb
, ctdb
->transport
);
49 Check whether an ip is a valid node ip
50 Returns the node id for this ip address or -1
52 int ctdb_ip_to_nodeid(struct ctdb_context
*ctdb
, const ctdb_sock_addr
*nodeip
)
56 for (nodeid
=0;nodeid
<ctdb
->num_nodes
;nodeid
++) {
57 if (ctdb
->nodes
[nodeid
]->flags
& NODE_FLAGS_DELETED
) {
60 if (ctdb_same_ip(&ctdb
->nodes
[nodeid
]->address
, nodeip
)) {
68 /* Load a nodes list file into a nodes array */
69 static int convert_node_map_to_list(struct ctdb_context
*ctdb
,
71 struct ctdb_node_map_old
*node_map
,
72 struct ctdb_node
***nodes
,
77 *nodes
= talloc_zero_array(mem_ctx
,
78 struct ctdb_node
*, node_map
->num
);
79 CTDB_NO_MEMORY(ctdb
, *nodes
);
80 *num_nodes
= node_map
->num
;
82 for (i
= 0; i
< node_map
->num
; i
++) {
83 struct ctdb_node
*node
;
85 node
= talloc_zero(*nodes
, struct ctdb_node
);
86 CTDB_NO_MEMORY(ctdb
, node
);
89 node
->address
= node_map
->nodes
[i
].addr
;
90 node
->name
= talloc_asprintf(node
, "%s:%u",
91 ctdb_addr_to_str(&node
->address
),
92 ctdb_addr_to_port(&node
->address
));
94 node
->flags
= node_map
->nodes
[i
].flags
;
95 if (!(node
->flags
& NODE_FLAGS_DELETED
)) {
96 node
->flags
= NODE_FLAGS_UNHEALTHY
;
98 node
->flags
|= NODE_FLAGS_DISCONNECTED
;
102 node
->dead_count
= 0;
108 /* Load the nodes list from a file */
109 void ctdb_load_nodes_file(struct ctdb_context
*ctdb
)
111 struct ctdb_node_map_old
*node_map
;
114 node_map
= ctdb_read_nodes_file(ctdb
, ctdb
->nodes_file
);
115 if (node_map
== NULL
) {
119 TALLOC_FREE(ctdb
->nodes
);
120 ret
= convert_node_map_to_list(ctdb
, ctdb
, node_map
,
121 &ctdb
->nodes
, &ctdb
->num_nodes
);
126 talloc_free(node_map
);
130 DEBUG(DEBUG_ERR
, ("Failed to load nodes file \"%s\"\n",
132 talloc_free(node_map
);
137 setup the local node address
139 int ctdb_set_address(struct ctdb_context
*ctdb
, const char *address
)
141 ctdb
->address
= talloc(ctdb
, ctdb_sock_addr
);
142 CTDB_NO_MEMORY(ctdb
, ctdb
->address
);
144 if (ctdb_parse_address(ctdb
, address
, ctdb
->address
) != 0) {
148 ctdb
->name
= talloc_asprintf(ctdb
, "%s:%u",
149 ctdb_addr_to_str(ctdb
->address
),
150 ctdb_addr_to_port(ctdb
->address
));
156 return the number of active nodes
158 uint32_t ctdb_get_num_active_nodes(struct ctdb_context
*ctdb
)
162 for (i
=0; i
< ctdb
->num_nodes
; i
++) {
163 if (!(ctdb
->nodes
[i
]->flags
& NODE_FLAGS_INACTIVE
)) {
172 called when we need to process a packet. This can be a requeued packet
173 after a lockwait, or a real packet from another node
175 void ctdb_input_pkt(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
179 /* place the packet as a child of the tmp_ctx. We then use
180 talloc_free() below to free it. If any of the calls want
181 to keep it, then they will steal it somewhere else, and the
182 talloc_free() will only free the tmp_ctx */
183 tmp_ctx
= talloc_new(ctdb
);
184 talloc_steal(tmp_ctx
, hdr
);
186 DEBUG(DEBUG_DEBUG
,(__location__
" ctdb request %u of type %u length %u from "
187 "node %u to %u\n", hdr
->reqid
, hdr
->operation
, hdr
->length
,
188 hdr
->srcnode
, hdr
->destnode
));
190 switch (hdr
->operation
) {
192 case CTDB_REPLY_CALL
:
193 case CTDB_REQ_DMASTER
:
194 case CTDB_REPLY_DMASTER
:
195 /* we don't allow these calls when banned */
196 if (ctdb
->nodes
[ctdb
->pnn
]->flags
& NODE_FLAGS_BANNED
) {
197 DEBUG(DEBUG_DEBUG
,(__location__
" ctdb operation %u"
199 " length %u from node %u to %u while node"
201 hdr
->operation
, hdr
->reqid
,
203 hdr
->srcnode
, hdr
->destnode
));
207 /* for ctdb_call inter-node operations verify that the
208 remote node that sent us the call is running in the
209 same generation instance as this node
211 if (ctdb
->vnn_map
->generation
!= hdr
->generation
) {
212 DEBUG(DEBUG_DEBUG
,(__location__
" ctdb operation %u"
214 " length %u from node %u to %u had an"
215 " invalid generation id:%u while our"
216 " generation id is:%u\n",
217 hdr
->operation
, hdr
->reqid
,
219 hdr
->srcnode
, hdr
->destnode
,
220 hdr
->generation
, ctdb
->vnn_map
->generation
));
225 switch (hdr
->operation
) {
227 CTDB_INCREMENT_STAT(ctdb
, node
.req_call
);
228 ctdb_request_call(ctdb
, hdr
);
231 case CTDB_REPLY_CALL
:
232 CTDB_INCREMENT_STAT(ctdb
, node
.reply_call
);
233 ctdb_reply_call(ctdb
, hdr
);
236 case CTDB_REPLY_ERROR
:
237 CTDB_INCREMENT_STAT(ctdb
, node
.reply_error
);
238 ctdb_reply_error(ctdb
, hdr
);
241 case CTDB_REQ_DMASTER
:
242 CTDB_INCREMENT_STAT(ctdb
, node
.req_dmaster
);
243 ctdb_request_dmaster(ctdb
, hdr
);
246 case CTDB_REPLY_DMASTER
:
247 CTDB_INCREMENT_STAT(ctdb
, node
.reply_dmaster
);
248 ctdb_reply_dmaster(ctdb
, hdr
);
251 case CTDB_REQ_MESSAGE
:
252 CTDB_INCREMENT_STAT(ctdb
, node
.req_message
);
253 ctdb_request_message(ctdb
, hdr
);
256 case CTDB_REQ_CONTROL
:
257 CTDB_INCREMENT_STAT(ctdb
, node
.req_control
);
258 ctdb_request_control(ctdb
, hdr
);
261 case CTDB_REPLY_CONTROL
:
262 CTDB_INCREMENT_STAT(ctdb
, node
.reply_control
);
263 ctdb_reply_control(ctdb
, hdr
);
266 case CTDB_REQ_KEEPALIVE
:
267 CTDB_INCREMENT_STAT(ctdb
, keepalive_packets_recv
);
268 ctdb_request_keepalive(ctdb
, hdr
);
271 case CTDB_REQ_TUNNEL
:
272 CTDB_INCREMENT_STAT(ctdb
, node
.req_tunnel
);
273 ctdb_request_tunnel(ctdb
, hdr
);
277 DEBUG(DEBUG_CRIT
,("%s: Packet with unknown operation %u\n",
278 __location__
, hdr
->operation
));
283 talloc_free(tmp_ctx
);
288 called by the transport layer when a node is dead
290 void ctdb_node_dead(struct ctdb_node
*node
)
292 if (node
->flags
& NODE_FLAGS_DISCONNECTED
) {
293 DEBUG(DEBUG_INFO
,("%s: node %s is already marked disconnected: %u connected\n",
294 node
->ctdb
->name
, node
->name
,
295 node
->ctdb
->num_connected
));
298 node
->ctdb
->num_connected
--;
299 node
->flags
|= NODE_FLAGS_DISCONNECTED
| NODE_FLAGS_UNHEALTHY
;
301 node
->dead_count
= 0;
303 DEBUG(DEBUG_ERR
,("%s: node %s is dead: %u connected\n",
304 node
->ctdb
->name
, node
->name
, node
->ctdb
->num_connected
));
305 ctdb_daemon_cancel_controls(node
->ctdb
, node
);
307 if (node
->ctdb
->methods
== NULL
) {
308 DEBUG(DEBUG_ERR
,(__location__
" Can not restart transport while shutting down daemon.\n"));
312 node
->ctdb
->methods
->restart(node
);
316 called by the transport layer when a node is connected
318 void ctdb_node_connected(struct ctdb_node
*node
)
320 if (!(node
->flags
& NODE_FLAGS_DISCONNECTED
)) {
321 DEBUG(DEBUG_INFO
,("%s: node %s is already marked connected: %u connected\n",
322 node
->ctdb
->name
, node
->name
,
323 node
->ctdb
->num_connected
));
326 node
->ctdb
->num_connected
++;
327 node
->dead_count
= 0;
328 node
->flags
&= ~NODE_FLAGS_DISCONNECTED
;
329 node
->flags
|= NODE_FLAGS_UNHEALTHY
;
331 ("%s: connected to %s - %u connected\n",
332 node
->ctdb
->name
, node
->name
, node
->ctdb
->num_connected
));
336 struct ctdb_context
*ctdb
;
337 struct ctdb_req_header
*hdr
;
342 triggered when a deferred packet is due
344 static void queue_next_trigger(struct tevent_context
*ev
,
345 struct tevent_timer
*te
,
346 struct timeval t
, void *private_data
)
348 struct queue_next
*q
= talloc_get_type(private_data
, struct queue_next
);
349 ctdb_input_pkt(q
->ctdb
, q
->hdr
);
354 defer a packet, so it is processed on the next event loop
355 this is used for sending packets to ourselves
357 static void ctdb_defer_packet(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
359 struct queue_next
*q
;
360 q
= talloc(ctdb
, struct queue_next
);
362 DEBUG(DEBUG_ERR
,(__location__
" Failed to allocate deferred packet\n"));
366 q
->hdr
= talloc_memdup(ctdb
, hdr
, hdr
->length
);
367 if (q
->hdr
== NULL
) {
368 DEBUG(DEBUG_ERR
,("Error copying deferred packet to self\n"));
372 /* use this to put packets directly into our recv function */
373 ctdb_input_pkt(q
->ctdb
, q
->hdr
);
375 tevent_add_timer(ctdb
->ev
, q
, timeval_zero(), queue_next_trigger
, q
);
381 broadcast a packet to all nodes
383 static void ctdb_broadcast_packet_all(struct ctdb_context
*ctdb
,
384 struct ctdb_req_header
*hdr
)
387 for (i
=0; i
< ctdb
->num_nodes
; i
++) {
388 if (ctdb
->nodes
[i
]->flags
& NODE_FLAGS_DELETED
) {
391 hdr
->destnode
= ctdb
->nodes
[i
]->pnn
;
392 ctdb_queue_packet(ctdb
, hdr
);
397 broadcast a packet to all nodes in the current vnnmap
399 static void ctdb_broadcast_packet_vnnmap(struct ctdb_context
*ctdb
,
400 struct ctdb_req_header
*hdr
)
403 for (i
=0;i
<ctdb
->vnn_map
->size
;i
++) {
404 hdr
->destnode
= ctdb
->vnn_map
->map
[i
];
405 ctdb_queue_packet(ctdb
, hdr
);
410 broadcast a packet to all connected nodes
412 static void ctdb_broadcast_packet_connected(struct ctdb_context
*ctdb
,
413 struct ctdb_req_header
*hdr
)
416 for (i
=0; i
< ctdb
->num_nodes
; i
++) {
417 if (ctdb
->nodes
[i
]->flags
& NODE_FLAGS_DELETED
) {
420 if (!(ctdb
->nodes
[i
]->flags
& NODE_FLAGS_DISCONNECTED
)) {
421 hdr
->destnode
= ctdb
->nodes
[i
]->pnn
;
422 ctdb_queue_packet(ctdb
, hdr
);
428 queue a packet or die
430 void ctdb_queue_packet(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
432 struct ctdb_node
*node
;
434 switch (hdr
->destnode
) {
435 case CTDB_BROADCAST_ALL
:
436 ctdb_broadcast_packet_all(ctdb
, hdr
);
438 case CTDB_BROADCAST_VNNMAP
:
439 ctdb_broadcast_packet_vnnmap(ctdb
, hdr
);
441 case CTDB_BROADCAST_CONNECTED
:
442 ctdb_broadcast_packet_connected(ctdb
, hdr
);
446 CTDB_INCREMENT_STAT(ctdb
, node_packets_sent
);
448 if (!ctdb_validate_pnn(ctdb
, hdr
->destnode
)) {
449 DEBUG(DEBUG_CRIT
,(__location__
" cant send to node %u that does not exist\n",
454 node
= ctdb
->nodes
[hdr
->destnode
];
456 if (node
->flags
& NODE_FLAGS_DELETED
) {
457 DEBUG(DEBUG_ERR
, (__location__
" Can not queue packet to DELETED node %d\n", hdr
->destnode
));
461 if (node
->pnn
== ctdb
->pnn
) {
462 ctdb_defer_packet(ctdb
, hdr
);
466 if (ctdb
->methods
== NULL
) {
467 DEBUG(DEBUG_ALERT
, (__location__
" Can not queue packet. "
468 "Transport is DOWN\n"));
473 if (ctdb
->methods
->queue_pkt(node
, (uint8_t *)hdr
, hdr
->length
) != 0) {
474 ctdb_fatal(ctdb
, "Unable to queue packet\n");
482 a valgrind hack to allow us to get opcode specific backtraces
483 very ugly, and relies on no compiler optimisation!
485 void ctdb_queue_packet_opcode(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
, unsigned opcode
)
488 #define DO_OP(x) case x: ctdb_queue_packet(ctdb, hdr); break
590 ctdb_queue_packet(ctdb
, hdr
);