4 Copyright (C) Martin Schwenke & Amitay Isaacs, DataDirect Networks 2022
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "lib/util/blocking.h"
25 #include "lib/util/sys_rw.h"
26 #include "lib/util/tevent_unix.h"
27 #include "lib/util/util.h"
28 #include "lib/util/smb_strtox.h"
30 #include "lib/async_req/async_sock.h"
32 #include "common/tmon.h"
35 enum tmon_message_type
{
44 enum tmon_message_type type
;
52 static void tmon_packet_push(struct tmon_pkt
*pkt
,
55 uint16_t type_n
, val_n
;
57 type_n
= htons(pkt
->type
);
58 val_n
= htons(pkt
->val
);
59 memcpy(&buf
->data
[0], &type_n
, 2);
60 memcpy(&buf
->data
[2], &val_n
, 2);
63 static void tmon_packet_pull(struct tmon_buf
*buf
,
66 uint16_t type_n
, val_n
;
68 memcpy(&type_n
, &buf
->data
[0], 2);
69 memcpy(&val_n
, &buf
->data
[2], 2);
71 pkt
->type
= ntohs(type_n
);
72 pkt
->val
= ntohs(val_n
);
75 static int tmon_packet_write(int fd
, struct tmon_pkt
*pkt
)
80 tmon_packet_push(pkt
, &buf
);
82 n
= sys_write(fd
, &buf
.data
[0], sizeof(buf
.data
));
89 bool tmon_set_exit(struct tmon_pkt
*pkt
)
91 *pkt
= (struct tmon_pkt
) {
92 .type
= TMON_MSG_EXIT
,
98 bool tmon_set_errno(struct tmon_pkt
*pkt
, int err
)
100 if (err
<= 0 || err
> UINT16_MAX
) {
104 *pkt
= (struct tmon_pkt
) {
105 .type
= TMON_MSG_ERRNO
,
106 .val
= (uint16_t)err
,
112 bool tmon_set_ping(struct tmon_pkt
*pkt
)
114 *pkt
= (struct tmon_pkt
) {
115 .type
= TMON_MSG_PING
,
121 bool tmon_set_ascii(struct tmon_pkt
*pkt
, char c
)
127 *pkt
= (struct tmon_pkt
) {
128 .type
= TMON_MSG_ASCII
,
135 bool tmon_set_custom(struct tmon_pkt
*pkt
, uint16_t val
)
137 *pkt
= (struct tmon_pkt
) {
138 .type
= TMON_MSG_CUSTOM
,
145 static bool tmon_parse_exit(struct tmon_pkt
*pkt
)
147 if (pkt
->type
!= TMON_MSG_EXIT
) {
157 static bool tmon_parse_errno(struct tmon_pkt
*pkt
, int *err
)
159 if (pkt
->type
!= TMON_MSG_ERRNO
) {
167 bool tmon_parse_ping(struct tmon_pkt
*pkt
)
169 if (pkt
->type
!= TMON_MSG_PING
) {
179 bool tmon_parse_ascii(struct tmon_pkt
*pkt
, char *c
)
181 if (pkt
->type
!= TMON_MSG_ASCII
) {
184 if (!isascii((int)pkt
->val
)) {
192 bool tmon_parse_custom(struct tmon_pkt
*pkt
, uint16_t *val
)
194 if (pkt
->type
!= TMON_MSG_CUSTOM
) {
205 struct tevent_context
*ev
;
207 unsigned long write_interval
;
208 unsigned long read_timeout
;
209 struct tmon_actions actions
;
210 struct tevent_timer
*timer
;
214 static void tmon_readable(struct tevent_req
*subreq
);
215 static bool tmon_set_timeout(struct tevent_req
*req
,
216 struct tevent_context
*ev
);
217 static void tmon_timedout(struct tevent_context
*ev
,
218 struct tevent_timer
*te
,
221 static void tmon_write_loop(struct tevent_req
*subreq
);
223 struct tevent_req
*tmon_send(TALLOC_CTX
*mem_ctx
,
224 struct tevent_context
*ev
,
227 unsigned long read_timeout
,
228 unsigned long write_interval
,
229 struct tmon_actions
*actions
,
232 struct tevent_req
*req
, *subreq
;
233 struct tmon_state
*state
;
236 req
= tevent_req_create(mem_ctx
, &state
, struct tmon_state
);
241 if (actions
!= NULL
) {
242 /* If FD isn't readable then read actions are invalid */
243 if (!(direction
& TMON_FD_READ
) &&
244 (actions
->timeout_callback
!= NULL
||
245 actions
->read_callback
!= NULL
||
246 read_timeout
!= 0)) {
247 tevent_req_error(req
, EINVAL
);
248 return tevent_req_post(req
, ev
);
250 /* If FD isn't writeable then write actions are invalid */
251 if (!(direction
& TMON_FD_WRITE
) &&
252 (actions
->write_callback
!= NULL
||
253 write_interval
!= 0)) {
254 tevent_req_error(req
, EINVAL
);
255 return tevent_req_post(req
, ev
);
257 /* Can't specify write interval without a callback */
258 if (state
->write_interval
!= 0 &&
259 state
->actions
.write_callback
== NULL
) {
260 tevent_req_error(req
, EINVAL
);
261 return tevent_req_post(req
, ev
);
266 state
->direction
= direction
;
268 state
->write_interval
= write_interval
;
269 state
->read_timeout
= read_timeout
;
270 state
->private_data
= private_data
;
272 if (actions
!= NULL
) {
273 state
->actions
= *actions
;
276 status
= set_close_on_exec(fd
);
278 tevent_req_error(req
, errno
);
279 return tevent_req_post(req
, ev
);
282 if (direction
& TMON_FD_READ
) {
283 subreq
= wait_for_read_send(state
, ev
, fd
, true);
284 if (tevent_req_nomem(subreq
, req
)) {
285 return tevent_req_post(req
, ev
);
287 tevent_req_set_callback(subreq
, tmon_readable
, req
);
290 if (state
->read_timeout
!= 0) {
291 status
= tmon_set_timeout(req
, state
->ev
);
293 tevent_req_error(req
, ENOMEM
);
294 return tevent_req_post(req
, ev
);
298 if (state
->write_interval
!= 0) {
299 subreq
= tevent_wakeup_send(
302 tevent_timeval_current_ofs(state
->write_interval
, 0));
303 if (tevent_req_nomem(subreq
, req
)) {
304 return tevent_req_post(req
, state
->ev
);
306 tevent_req_set_callback(subreq
, tmon_write_loop
, req
);
312 static void tmon_readable(struct tevent_req
*subreq
)
314 struct tevent_req
*req
= tevent_req_callback_data(
315 subreq
, struct tevent_req
);
316 struct tmon_state
*state
= tevent_req_data( req
, struct tmon_state
);
324 status
= wait_for_read_recv(subreq
, &ret
);
327 if (ret
== EPIPE
&& state
->actions
.close_callback
!= NULL
) {
328 ret
= state
->actions
.close_callback(state
->private_data
);
329 if (ret
== TMON_STATUS_EXIT
) {
334 tevent_req_done(req
);
336 tevent_req_error(req
, ret
);
341 nread
= sys_read(state
->fd
, buf
.data
, sizeof(buf
.data
));
343 tevent_req_error(req
, errno
);
347 /* Can't happen, treat like EPIPE, above */
348 tevent_req_error(req
, EPIPE
);
351 if (nread
!= sizeof(buf
.data
)) {
352 tevent_req_error(req
, EPROTO
);
356 tmon_packet_pull(&buf
, &pkt
);
360 status
= tmon_parse_exit(&pkt
);
362 tevent_req_error(req
, EPROTO
);
365 tevent_req_done(req
);
368 status
= tmon_parse_errno(&pkt
, &err
);
372 tevent_req_error(req
, err
);
378 if (state
->actions
.read_callback
== NULL
) {
379 /* Shouldn't happen, other end should not write */
380 tevent_req_error(req
, EIO
);
383 ret
= state
->actions
.read_callback(state
->private_data
, &pkt
);
384 if (ret
== TMON_STATUS_EXIT
) {
385 tevent_req_done(req
);
389 tevent_req_error(req
, ret
);
393 subreq
= wait_for_read_send(state
, state
->ev
, state
->fd
, true);
394 if (tevent_req_nomem(subreq
, req
)) {
397 tevent_req_set_callback(subreq
, tmon_readable
, req
);
399 /* Reset read timeout */
400 if (state
->read_timeout
!= 0) {
401 status
= tmon_set_timeout(req
, state
->ev
);
403 tevent_req_error(req
, ENOMEM
);
409 static bool tmon_set_timeout(struct tevent_req
*req
,
410 struct tevent_context
*ev
)
412 struct tmon_state
*state
= tevent_req_data(
413 req
, struct tmon_state
);
414 struct timeval endtime
=
415 tevent_timeval_current_ofs(state
->read_timeout
, 0);
417 TALLOC_FREE(state
->timer
);
419 state
->timer
= tevent_add_timer(ev
, req
, endtime
, tmon_timedout
, req
);
420 if (tevent_req_nomem(state
->timer
, req
)) {
427 static void tmon_timedout(struct tevent_context
*ev
,
428 struct tevent_timer
*te
,
432 struct tevent_req
*req
= talloc_get_type_abort(
433 private_data
, struct tevent_req
);
434 struct tmon_state
*state
= tevent_req_data(req
, struct tmon_state
);
437 TALLOC_FREE(state
->timer
);
439 if (state
->actions
.timeout_callback
!= NULL
) {
440 ret
= state
->actions
.timeout_callback(state
->private_data
);
441 if (ret
== TMON_STATUS_EXIT
) {
449 tevent_req_done(req
);
451 tevent_req_error(req
, ret
);
455 static void tmon_write_loop(struct tevent_req
*subreq
)
457 struct tevent_req
*req
= tevent_req_callback_data(
458 subreq
, struct tevent_req
);
459 struct tmon_state
*state
= tevent_req_data(
460 req
, struct tmon_state
);
465 status
= tevent_wakeup_recv(subreq
);
471 ret
= state
->actions
.write_callback(state
->private_data
, &pkt
);
472 if (ret
== TMON_STATUS_EXIT
) {
473 tevent_req_done(req
);
476 if (ret
== TMON_STATUS_SKIP
) {
480 tevent_req_error(req
, ret
);
484 status
= tmon_write(req
, &pkt
);
490 subreq
= tevent_wakeup_send(
493 tevent_timeval_current_ofs(state
->write_interval
, 0));
494 if (tevent_req_nomem(subreq
, req
)) {
497 tevent_req_set_callback(subreq
, tmon_write_loop
, req
);
500 bool tmon_write(struct tevent_req
*req
, struct tmon_pkt
*pkt
)
502 struct tmon_state
*state
= tevent_req_data(
503 req
, struct tmon_state
);
506 if (state
->fd
== -1) {
510 if (!(state
->direction
& TMON_FD_WRITE
)) {
511 tevent_req_error(req
, EINVAL
);
515 ret
= tmon_packet_write(state
->fd
, pkt
);
517 if (ret
== EPIPE
&& state
->actions
.close_callback
!= NULL
) {
518 ret
= state
->actions
.close_callback(state
->private_data
);
519 if (ret
== TMON_STATUS_EXIT
) {
525 tevent_req_done(req
);
527 tevent_req_error(req
, ret
);
536 bool tmon_recv(struct tevent_req
*req
, int *perr
)
538 if (tevent_req_is_unix_error(req
, perr
)) {
545 static int ping_writer(void *private_data
, struct tmon_pkt
*pkt
)
552 static int ping_reader(void *private_data
, struct tmon_pkt
*pkt
)
556 /* Only expect pings */
557 status
= tmon_parse_ping(pkt
);
565 struct tevent_req
*tmon_ping_send(TALLOC_CTX
*mem_ctx
,
566 struct tevent_context
*ev
,
569 unsigned long timeout
,
570 unsigned long interval
)
572 struct tevent_req
*req
;
573 struct tmon_actions actions
= {
574 .write_callback
= NULL
,
577 if ((direction
& TMON_FD_WRITE
) && interval
!= 0) {
578 actions
.write_callback
= ping_writer
;
580 if ((direction
& TMON_FD_READ
) && timeout
!= 0) {
581 actions
.read_callback
= ping_reader
;
584 req
= tmon_send(mem_ctx
,
595 bool tmon_ping_recv(struct tevent_req
*req
, int *perr
)
599 status
= tmon_recv(req
, perr
);