4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * etm_xport_api_dd.c FMA ETM-to-Transport API implementation
31 * library for establishing connections and transporting FMA events
32 * between ETMs (event transport modules) in separate fault domain,
33 * ie, between domain and service processor in same chassis, using
34 * a character device driver based transport
37 #pragma ident "%Z%%M% %I% %E% SMI"
40 * --------------------------------- includes --------------------------------
43 #include <sys/types.h>
45 #include <sys/fm/protocol.h>
46 #include <fm/fmd_api.h>
64 #include "etm_xport_api.h"
65 #include "etm_etm_proto.h"
69 * ----------------------- private consts and defns --------------------------
72 /* magic numbers (32 bits) for transport address and connection handle */
74 #define ETM_XPORT_DD_MAGIC_ADDR (0x45544D41)
75 #define ETM_XPORT_DD_MAGIC_CONN (0x45544D43)
77 /* flags to use in opening transport device */
79 #define ETM_XPORT_OPEN_FLAGS (O_RDWR | O_NOCTTY)
82 * transport address and connection handle structures overload fn and fd
83 * fields to include state information:
85 * fn file name NULL means unused or closed
86 * fd file descriptor -1 means unused or closed
89 typedef struct _etm_xport_addr
{
90 uint32_t magic_num
; /* magic number */
91 char *fn
; /* fullpath to device node */
94 typedef struct _etm_xport_conn
{
95 uint32_t magic_num
; /* magic number */
96 int fd
; /* open dev file descriptor */
97 _etm_xport_addr_t
*addr
; /* associated transport addr */
101 * filename of device node to reach SP from domain. one of these two
102 * device nodes will be used:
103 * ETM_XPORT_DEV_FN_SP - the Ontario glvc
104 * ETM_XPORT_DEV_VLDC - the more recent LDOMS 1.0 (a.k.a. Ontario+) vldc
105 * When the latter is in use, use_vldc is set to 1.
107 * filenames of device nodes to reach domains from SP
108 * are NA because SP runs ALOM vs Solaris or Linux
109 * and ETM is for Unix based OSes
111 #define ETM_XPORT_DEV_FN_SP "/dev/spfma"
113 #define ETM_XPORT_DEV_VLDC \
114 "/devices/virtual-devices@100/channel-devices@200" \
115 "/virtual-channel-client@2:spfma"
118 * -------------------------- global variables -------------------------------
121 static int use_vldc
= 0;
123 static struct stats
{
125 /* address handle failures */
127 fmd_stat_t xport_addr_magicnum_bad
;
128 fmd_stat_t xport_addr_fn_bad
;
130 /* connection handle failures */
132 fmd_stat_t xport_conn_magicnum_bad
;
133 fmd_stat_t xport_conn_fd_bad
;
135 /* internal read/peek failures */
137 fmd_stat_t xport_buffread_badargs
;
138 fmd_stat_t xport_rawpeek_badargs
;
140 /* xport API failures */
142 fmd_stat_t xport_accept_badargs
;
143 fmd_stat_t xport_get_addr_conn_badargs
;
144 fmd_stat_t xport_free_addr_badargs
;
145 fmd_stat_t xport_free_addrv_badargs
;
146 fmd_stat_t xport_get_any_lcc_badargs
;
148 /* system and library failures */
150 fmd_stat_t xport_os_open_fail
;
151 fmd_stat_t xport_os_close_fail
;
152 fmd_stat_t xport_os_read_fail
;
153 fmd_stat_t xport_os_write_fail
;
154 fmd_stat_t xport_os_peek_fail
;
155 fmd_stat_t xport_os_ioctl_fail
;
157 } etm_xport_stats
= {
159 /* address handle failures */
161 { "xport_addr_magicnum_bad", FMD_TYPE_UINT64
,
162 "invalid address handle magic number" },
163 { "xport_addr_fn_bad", FMD_TYPE_UINT64
,
164 "invalid address handle file name" },
166 /* connection handle failures */
168 { "xport_conn_magicnum_bad", FMD_TYPE_UINT64
,
169 "invalid connection handle magic number" },
170 { "xport_conn_fd_bad", FMD_TYPE_UINT64
,
171 "invalid connection handle file descriptor" },
173 /* internal read/peek failures */
175 { "xport_buffread_badargs", FMD_TYPE_UINT64
,
176 "bad arguments in etm_xport_buffered_read" },
177 { "xport_rawpeek_badargs", FMD_TYPE_UINT64
,
178 "bad arguments in etm_xport_raw_peek" },
180 /* xport API failures */
182 { "xport_accept_badargs", FMD_TYPE_UINT64
,
183 "bad arguments in etm_xport_accept" },
184 { "xport_get_addr_conn_badargs", FMD_TYPE_UINT64
,
185 "bad arguments in etm_xport_get_addr_conn" },
186 { "xport_free_addr_badargs", FMD_TYPE_UINT64
,
187 "bad arguments in etm_xport_free_addr" },
188 { "xport_free_addrv_badargs", FMD_TYPE_UINT64
,
189 "bad arguments in etm_xport_free_addrv" },
190 { "xport_get_any_lcc_badargs", FMD_TYPE_UINT64
,
191 "bad arguments in etm_xport_get_any_lcc" },
193 /* system and library failures */
195 { "xport_os_open_fail", FMD_TYPE_UINT64
,
196 "open system call failures" },
197 { "xport_os_close_fail", FMD_TYPE_UINT64
,
198 "close system call failures" },
199 { "xport_os_read_fail", FMD_TYPE_UINT64
,
200 "read system call failures" },
201 { "xport_os_write_fail", FMD_TYPE_UINT64
,
202 "write system call failures" },
203 { "xport_os_peek_fail", FMD_TYPE_UINT64
,
204 "peek (ioctl) failures" },
205 { "xport_os_ioctl_fail", FMD_TYPE_UINT64
,
206 "ioctl system call failures" }
209 /* intermediate read buffer to [partially] emulate byte stream semantics */
211 static uint8_t *etm_xport_irb_area
= NULL
; /* buffered read area */
212 static uint8_t *etm_xport_irb_head
= NULL
; /* read head (dequeue) */
213 static uint8_t *etm_xport_irb_tail
= NULL
; /* read tail (enqueue) */
214 static size_t etm_xport_irb_mtu_sz
= 0; /* MTU size (in bytes) */
217 * -------------------------- private variables ------------------------------
220 static _etm_xport_conn_t
*
221 etm_xport_vldc_conn
= NULL
; /* single connection handle for VLDC */
223 static pthread_mutex_t
224 etm_xport_vldc_lock
= PTHREAD_MUTEX_INITIALIZER
;
225 /* lock for open()/close() VLDC */
228 etm_xport_debug_lvl
= 0; /* debug level: 0 off, 1 on, 2 more, ... */
231 etm_xport_addrs
= ""; /* spec str for transport addrs to use */
234 etm_xport_should_fake_dd
= 0; /* bool for whether to fake device driver */
237 * -------------------------- private functions ------------------------------
241 * etm_fake_ioctl - fake/simulate transport driver's ioctl() behavior
242 * [for unit testing with device driver absent or
243 * for alternative directory entry based transports],
244 * return 0 for success
245 * or -1 and set errno
247 * simulation may be incomplete, especially wrt peek()
249 * Design_Note: To avoid interfering with FMD's signal mask (SIGALRM)
250 * do not use [Solaris] sleep(3C) and instead use
251 * pthread_cond_wait() or nanosleep(), both of which
252 * are POSIX spec-ed to leave signal masks alone.
253 * This is needed for Solaris and Linux (domain and SP).
257 etm_fake_ioctl(int fd
, int op
, void *buf
)
259 int rv
; /* ret val */
260 etm_xport_opt_op_t
*op_ctl_ptr
; /* ptr for option ops */
261 etm_xport_msg_peek_t
*peek_ctl_ptr
; /* ptr for peeking */
262 struct stat stat_buf
; /* file stat struct */
263 ssize_t n
; /* gen use */
264 struct timespec tms
; /* for nanosleep() */
269 rv
= 0; /* default is success */
271 if (op
== ETM_XPORT_IOCTL_DATA_PEEK
) {
273 /* sleep until some data avail, potentially forever */
275 if (fstat(fd
, &stat_buf
) < 0) {
279 if (stat_buf
.st_size
> 0) {
280 n
= MIN(peek_ctl_ptr
->pk_buflen
,
282 peek_ctl_ptr
->pk_buflen
= n
;
283 /* return bogus data assuming content unused */
284 (void) memset(peek_ctl_ptr
->pk_buf
, 0xA5, n
);
287 tms
.tv_sec
= ETM_SLEEP_QUIK
;
289 if ((n
= nanosleep(&tms
, NULL
)) < 0) {
293 } /* forever awaiting data */
294 } else if (op
== ETM_XPORT_IOCTL_OPT_OP
) {
296 /* default near MTU_SZ gets and agree with everything else */
297 if ((op_ctl_ptr
->oo_op
== ETM_XPORT_OPT_GET
) &&
298 (op_ctl_ptr
->oo_opt
== ETM_XPORT_OPT_MTU_SZ
)) {
299 op_ctl_ptr
->oo_val
= 7 * ETM_XPORT_MTU_SZ_DEF
/ 8;
302 } /* whether ioctl op is handled */
311 } /* etm_fake_ioctl() */
314 * etm_xport_get_fn - return a cached read-only copy
315 * of the device node name to use
316 * for the given I/O operation
320 etm_xport_get_fn(fmd_hdl_t
*hdl
, int io_op
)
322 static char fn_wr
[PATH_MAX
] = {0}; /* fn for write */
323 static char fn_rd
[PATH_MAX
] = {0}; /* fn for read/peek */
324 char *rv
; /* ret val */
325 char *prop_str
; /* property string */
326 char *cp
; /* char ptr */
330 /* use cached copies if avail */
332 if ((io_op
== ETM_IO_OP_WR
) && (fn_wr
[0] != '\0')) {
335 if (((io_op
== ETM_IO_OP_RD
) || (io_op
== ETM_IO_OP_PK
)) &&
336 (fn_rd
[0] != '\0')) {
340 /* create cached copies if empty "" property string */
342 prop_str
= fmd_prop_get_string(hdl
, ETM_PROP_NM_XPORT_ADDRS
);
343 if (etm_xport_debug_lvl
>= 2) {
344 fmd_hdl_debug(hdl
, "info: etm_xport_get_fn prop_str %s\n",
348 if (strlen(prop_str
) == 0) {
352 if (stat(ETM_XPORT_DEV_VLDC
, &buf
) == 0) {
354 fname
= ETM_XPORT_DEV_VLDC
;
357 fname
= ETM_XPORT_DEV_FN_SP
;
360 (void) strncpy(fn_wr
, fname
, PATH_MAX
- 1);
361 (void) strncpy(fn_rd
, fname
, PATH_MAX
- 1);
363 if (io_op
== ETM_IO_OP_WR
) {
367 } /* if no/empty property set */
369 /* create cached copies if "write[|read]" property string */
371 if (io_op
== ETM_IO_OP_WR
) {
372 (void) strncpy(fn_wr
, prop_str
, PATH_MAX
- 1);
373 if ((cp
= strchr(fn_wr
, '|')) != NULL
) {
378 if ((cp
= strchr(prop_str
, '|')) != NULL
) {
383 (void) strncpy(fn_rd
, cp
, PATH_MAX
- 1);
385 } /* whether io op is write/read/peek */
389 if (etm_xport_debug_lvl
>= 2) {
390 fmd_hdl_debug(hdl
, "info: etm_xport_get_fn fn_wr %s fn_rd %s\n",
393 fmd_prop_free_string(hdl
, prop_str
);
396 } /* etm_xport_get_fn() */
399 * etm_xport_valid_addr - validate the given transport address,
401 * or -errno value if not
405 etm_xport_valid_addr(etm_xport_addr_t addr
)
407 _etm_xport_addr_t
*_addr
; /* transport address */
408 struct stat stat_buf
; /* buffer for stat() results */
416 if (_addr
->magic_num
!= ETM_XPORT_DD_MAGIC_ADDR
) {
417 etm_xport_stats
.xport_addr_magicnum_bad
.fmds_value
.ui64
++;
421 if (stat(_addr
->fn
, &stat_buf
) < 0) {
422 /* errno assumed set by above call */
423 etm_xport_stats
.xport_addr_fn_bad
.fmds_value
.ui64
++;
429 } /* etm_xport_valid_addr() */
432 * etm_xport_valid_conn - validate the given connection handle,
434 * or -errno value if not
438 etm_xport_valid_conn(etm_xport_conn_t conn
)
440 _etm_xport_conn_t
*_conn
; /* connection handle */
448 if (_conn
->magic_num
!= ETM_XPORT_DD_MAGIC_CONN
) {
449 etm_xport_stats
.xport_conn_magicnum_bad
.fmds_value
.ui64
++;
453 if (_conn
->fd
<= -1) {
454 etm_xport_stats
.xport_conn_fd_bad
.fmds_value
.ui64
++;
460 } /* etm_xport_valid_conn() */
463 * etm_xport_free_addr - free the given transport address
467 etm_xport_free_addr(fmd_hdl_t
*hdl
, etm_xport_addr_t addr
)
470 etm_xport_stats
.xport_free_addr_badargs
.fmds_value
.ui64
++;
474 fmd_hdl_free(hdl
, addr
, sizeof (_etm_xport_addr_t
));
476 } /* etm_xport_free_addr() */
479 * etm_xport_dup_addr - duplicate the given transport address,
480 * which is to be freed separately,
481 * return the newly allocated transport address
482 * pending until possible to do so
485 static etm_xport_addr_t
486 etm_xport_dup_addr(fmd_hdl_t
*hdl
, etm_xport_addr_t addr
)
488 etm_xport_addr_t new_addr
; /* new transport address */
490 new_addr
= fmd_hdl_zalloc(hdl
, sizeof (_etm_xport_addr_t
), FMD_SLEEP
);
491 (void) memcpy(new_addr
, addr
, sizeof (_etm_xport_addr_t
));
494 } /* etm_xport_dup_addr() */
497 * etm_xport_raw_peek - try to peek N <= MTU bytes from the connection
498 * into the caller's given buffer,
499 * return how many bytes actually peeked
502 * peeked data is NOT guaranteed by all platform transports
503 * to remain enqueued if this process/thread crashes;
504 * this casts some doubt on the utility of this func
506 * transport does NOT support peek sizes > MTU
510 etm_xport_raw_peek(fmd_hdl_t
*hdl
, _etm_xport_conn_t
*_conn
,
511 void *buf
, size_t byte_cnt
)
513 ssize_t rv
; /* ret val */
514 ssize_t n
; /* gen use */
515 etm_xport_msg_peek_t peek_ctl
; /* struct for peeking */
519 /* sanity check args */
521 if ((hdl
== NULL
) || (_conn
== NULL
) || (buf
== NULL
)) {
522 etm_xport_stats
.xport_rawpeek_badargs
.fmds_value
.ui64
++;
526 if ((etm_xport_irb_mtu_sz
> 0) && (byte_cnt
> etm_xport_irb_mtu_sz
)) {
527 etm_xport_stats
.xport_rawpeek_badargs
.fmds_value
.ui64
++;
531 /* try to peek requested amt of data */
533 peek_ctl
.pk_buf
= buf
;
534 peek_ctl
.pk_buflen
= byte_cnt
;
535 peek_ctl
.pk_flags
= 0;
536 peek_ctl
.pk_rsvd
= 0;
538 if (etm_xport_should_fake_dd
) {
539 n
= etm_fake_ioctl(_conn
->fd
, ETM_XPORT_IOCTL_DATA_PEEK
,
542 n
= ioctl(_conn
->fd
, ETM_XPORT_IOCTL_DATA_PEEK
, &peek_ctl
);
545 /* errno assumed set by above call */
546 etm_xport_stats
.xport_os_peek_fail
.fmds_value
.ui64
++;
549 rv
= peek_ctl
.pk_buflen
;
552 if (etm_xport_debug_lvl
>= 3) {
553 fmd_hdl_debug(hdl
, "info: [fake] ioctl(_PEEK) ~= %d bytes\n",
558 } /* etm_xport_raw_peek() */
563 * The transport device driver did not implement byte stream semantics
564 * per the spec; its behavior is closer to that of a block device.
565 * Consequently, ETM within its Transport API attempts to make the device
566 * look like a byte stream by using an intermediate buffer in user space
567 * and maintaining progress pointers within that buffer which is populated
568 * in near-MTU sized reads. We think it's OK to leave the write side
569 * implementation as it was originally written for byte stream semantics
570 * because we were told subsequent write()s will pend until the earlier
571 * content is read() at the remote end -- essentially each write() must be
572 * paired with a single read() -- the device driver does not buffer any I/O.
574 * The early driver bugs of returning more data than requested (thus
575 * causing buffer overrun corruptions/crashes) and requiring user buffers
576 * to be stack based vs heap based, have both been corrected.
580 * etm_xport_buffered_read - try to read N <= MTU bytes from the connection
581 * or from an privately maintained intermediate buffer,
582 * into the caller's given buffer,
583 * return how many bytes actually read
587 * simple buffer scheme consumes 2x MTU bytes of memory and
588 * may do unnecesssary memory copies for ease of coding
592 etm_xport_buffered_read(fmd_hdl_t
*hdl
, _etm_xport_conn_t
*_conn
,
593 void *buf
, size_t byte_cnt
)
595 ssize_t i
, n
; /* gen use */
597 /* perform one-time initializations */
602 * These initializations are not done in etm_xport_init() because
603 * the connection/device is not yet open and hence the MTU size
604 * is not yet known. However, the corresponding cleanup is done
605 * in etm_xport_fini(). The buffering for byte stream semantics
606 * should be done on a per device vs per connection basis; the
607 * MTU size is assumed to remain constant across all connections.
610 if (etm_xport_irb_mtu_sz
== 0) {
611 if ((n
= etm_xport_get_opt(hdl
, _conn
,
612 ETM_XPORT_OPT_MTU_SZ
)) < 0) {
613 etm_xport_irb_mtu_sz
= ETM_XPORT_MTU_SZ_DEF
;
615 etm_xport_irb_mtu_sz
= n
;
618 if (etm_xport_irb_area
== NULL
) {
619 etm_xport_irb_area
= fmd_hdl_zalloc(hdl
,
620 2 * etm_xport_irb_mtu_sz
, FMD_SLEEP
);
621 etm_xport_irb_head
= etm_xport_irb_area
;
622 etm_xport_irb_tail
= etm_xport_irb_head
;
625 /* sanity check the byte count after have MTU */
627 if (byte_cnt
> etm_xport_irb_mtu_sz
) {
628 etm_xport_stats
.xport_buffread_badargs
.fmds_value
.ui64
++;
632 /* if intermediate buffer can satisfy request do so w/out xport read */
634 if (byte_cnt
<= (etm_xport_irb_tail
- etm_xport_irb_head
)) {
635 (void) memcpy(buf
, etm_xport_irb_head
, byte_cnt
);
636 etm_xport_irb_head
+= byte_cnt
;
637 if (etm_xport_debug_lvl
>= 2) {
638 fmd_hdl_debug(hdl
, "info: quik buffered read == %d\n",
644 /* slide buffer contents to front to make room for [MTU] more bytes */
646 n
= etm_xport_irb_tail
- etm_xport_irb_head
;
647 (void) memmove(etm_xport_irb_area
, etm_xport_irb_head
, n
);
648 etm_xport_irb_head
= etm_xport_irb_area
;
649 etm_xport_irb_tail
= etm_xport_irb_head
+ n
;
652 * peek to see how much data is avail and read all of it;
653 * there is no race condition between peeking and reading
654 * due to unbuffered design of the device driver
659 pollfd
.events
= POLLIN
;
661 pollfd
.fd
= _conn
->fd
;
663 if ((n
= poll(&pollfd
, 1, -1)) < 1) {
671 * set i to the maximum size --- read(..., i) below will
672 * pull in n bytes (n <= i) anyway
674 i
= etm_xport_irb_mtu_sz
;
676 if ((i
= etm_xport_raw_peek(hdl
, _conn
, etm_xport_irb_tail
,
677 etm_xport_irb_mtu_sz
)) < 0) {
681 if ((n
= read(_conn
->fd
, etm_xport_irb_tail
, i
)) < 0) {
682 /* errno assumed set by above call */
683 etm_xport_stats
.xport_os_read_fail
.fmds_value
.ui64
++;
686 etm_xport_irb_tail
+= n
;
688 /* satisfy request as best we can with what we now have */
690 n
= MIN(byte_cnt
, (etm_xport_irb_tail
- etm_xport_irb_head
));
691 (void) memcpy(buf
, etm_xport_irb_head
, n
);
692 etm_xport_irb_head
+= n
;
693 if (etm_xport_debug_lvl
>= 2) {
694 fmd_hdl_debug(hdl
, "info: slow buffered read == %d\n", n
);
698 } /* etm_xport_buffered_read() */
701 * ------------------ connection establishment functions ---------------------
705 * etm_xport_init - initialize/setup any transport infrastructure
706 * before any connections are opened,
707 * return 0 or -errno value if initialization failed
711 etm_xport_init(fmd_hdl_t
*hdl
)
713 _etm_xport_addr_t
**_addrv
; /* address vector */
714 int i
; /* vector index */
715 ssize_t n
; /* gen use */
716 int rv
; /* ret val */
717 struct stat stat_buf
; /* file stat struct */
718 char *fn
; /* filename of dev node */
720 rv
= 0; /* assume good */
729 fmd_hdl_debug(hdl
, "info: xport initializing\n");
731 /* setup statistics and properties from FMD */
733 (void) fmd_stat_create(hdl
, FMD_STAT_NOALLOC
,
734 sizeof (etm_xport_stats
) / sizeof (fmd_stat_t
),
735 (fmd_stat_t
*)&etm_xport_stats
);
737 etm_xport_debug_lvl
= fmd_prop_get_int32(hdl
, ETM_PROP_NM_DEBUG_LVL
);
738 etm_xport_addrs
= fmd_prop_get_string(hdl
, ETM_PROP_NM_XPORT_ADDRS
);
739 fmd_hdl_debug(hdl
, "info: etm_xport_debug_lvl %d\n",
740 etm_xport_debug_lvl
);
741 fmd_hdl_debug(hdl
, "info: etm_xport_addrs %s\n", etm_xport_addrs
);
743 /* decide whether to fake [some of] the device driver behavior */
745 etm_xport_should_fake_dd
= 0; /* default to false */
747 fn
= etm_xport_get_fn(hdl
, ETM_IO_OP_RD
);
748 if (stat(fn
, &stat_buf
) < 0) {
749 /* errno assumed set by above call */
750 fmd_hdl_error(hdl
, "error: bad device node %s errno %d\n",
755 if (!S_ISCHR(stat_buf
.st_mode
) && use_vldc
== 0) {
756 etm_xport_should_fake_dd
= 1; /* not a char driver */
758 fmd_hdl_debug(hdl
, "info: etm_xport_should_fake_dd %d\n",
759 etm_xport_should_fake_dd
);
761 /* validate each default dst transport address */
763 if ((_addrv
= (void *)etm_xport_get_ev_addrv(hdl
, NULL
)) == NULL
) {
764 /* errno assumed set by above call */
769 for (i
= 0; _addrv
[i
] != NULL
; i
++) {
770 if ((n
= etm_xport_valid_addr(_addrv
[i
])) < 0) {
771 fmd_hdl_error(hdl
, "error: bad xport addr %p\n",
776 } /* foreach dst addr */
779 etm_xport_vldc_conn
= etm_xport_open(hdl
, _addrv
[0]);
780 if (etm_xport_vldc_conn
== NULL
) {
781 fmd_hdl_debug(hdl
, "info: etm_xport_open() failed\n");
787 if (_addrv
!= NULL
) {
788 etm_xport_free_addrv(hdl
, (void *)_addrv
);
791 fmd_hdl_debug(hdl
, "info: xport initialized ok\n");
795 } /* etm_xport_init() */
798 * etm_xport_open - open a connection with the given endpoint,
799 * return the connection handle,
800 * or NULL and set errno if open failed
802 * Design_Note: The current transport device driver's open()
803 * call will succeed even if the SP is down;
804 * hence there's currently no need for a retry
809 etm_xport_open(fmd_hdl_t
*hdl
, etm_xport_addr_t addr
)
811 _etm_xport_addr_t
*_addr
; /* address handle */
812 _etm_xport_conn_t
*_conn
; /* connection handle */
813 ssize_t n
; /* gen use */
815 if ((n
= etm_xport_valid_addr(addr
)) < 0) {
820 _addr
= etm_xport_dup_addr(hdl
, addr
);
822 /* allocate a connection handle and start populating it */
824 _conn
= fmd_hdl_zalloc(hdl
, sizeof (_etm_xport_conn_t
), FMD_SLEEP
);
826 (void) pthread_mutex_lock(&etm_xport_vldc_lock
);
828 if (use_vldc
== 0 || etm_xport_vldc_conn
== NULL
) {
829 if ((_conn
->fd
= open(_addr
->fn
,
830 ETM_XPORT_OPEN_FLAGS
, 0)) == -1) {
831 /* errno assumed set by above call */
832 etm_xport_free_addr(hdl
, _addr
);
833 fmd_hdl_free(hdl
, _conn
, sizeof (_etm_xport_conn_t
));
834 etm_xport_stats
.xport_os_open_fail
.fmds_value
.ui64
++;
835 (void) pthread_mutex_unlock(&etm_xport_vldc_lock
);
840 if (use_vldc
&& etm_xport_vldc_conn
== NULL
) {
843 /* Set the channel to reliable mode */
844 op
.op_sel
= VLDC_OP_SET
;
845 op
.opt_sel
= VLDC_OPT_MODE
;
846 op
.opt_val
= LDC_MODE_RELIABLE
;
848 if (ioctl(_conn
->fd
, VLDC_IOCTL_OPT_OP
, &op
) != 0) {
849 /* errno assumed set by above call */
850 (void) close(_conn
->fd
);
851 etm_xport_free_addr(hdl
, _addr
);
852 fmd_hdl_free(hdl
, _conn
, sizeof (_etm_xport_conn_t
));
853 etm_xport_stats
.xport_os_ioctl_fail
.fmds_value
.ui64
++;
854 (void) pthread_mutex_unlock(&etm_xport_vldc_lock
);
858 etm_xport_vldc_conn
= _conn
;
859 } else if (use_vldc
&& etm_xport_vldc_conn
!= NULL
) {
860 _conn
->fd
= dup(etm_xport_vldc_conn
->fd
);
863 (void) pthread_mutex_unlock(&etm_xport_vldc_lock
);
865 /* return the fully formed connection handle */
867 _conn
->magic_num
= ETM_XPORT_DD_MAGIC_CONN
;
872 } /* etm_xport_open() */
875 * etm_xport_accept - accept a request to open a connection,
876 * pending until a remote endpoint opens a
877 * a new connection to us [and sends an ETM msg],
878 * per non-NULL addrp optionally indicate the
879 * remote address if known/avail (NULL if not),
880 * return the connection handle,
881 * or NULL and set errno on failure
884 * any returned transport address is valid only for
885 * as long as the associated connection remains open;
886 * callers should not try to free the transport address
888 * if new connections are rapid relative to how
889 * frequently this function is called, fairness will
890 * be provided among which connections are accepted
892 * this function may maintain state to recognize [new]
893 * connections and/or to provide fairness
897 etm_xport_accept(fmd_hdl_t
*hdl
, etm_xport_addr_t
*addrp
)
899 _etm_xport_addr_t
*_addr
; /* address handle */
900 _etm_xport_addr_t
**_addrv
; /* vector of addresses */
901 _etm_xport_conn_t
*_conn
; /* connection handle */
902 _etm_xport_conn_t
*rv
; /* ret val */
903 uint8_t buf
[4]; /* buffer for peeking */
904 int n
; /* byte cnt */
905 struct timespec tms
; /* for nanosleep() */
907 rv
= NULL
; /* default is failure */
912 tms
.tv_sec
= ETM_SLEEP_QUIK
;
916 * get the default dst transport address and open a connection to it;
917 * there is only 1 default addr
920 if ((_addrv
= (void*)etm_xport_get_ev_addrv(hdl
, NULL
)) == NULL
) {
921 /* errno assumed set by above call */
925 if (_addrv
[0] == NULL
) {
926 errno
= ENXIO
; /* missing addr */
927 etm_xport_stats
.xport_accept_badargs
.fmds_value
.ui64
++;
931 if (_addrv
[1] != NULL
) {
932 errno
= E2BIG
; /* too many addrs */
933 etm_xport_stats
.xport_accept_badargs
.fmds_value
.ui64
++;
938 _addr
->fn
= etm_xport_get_fn(hdl
, ETM_IO_OP_RD
);
940 if ((_conn
= etm_xport_open(hdl
, _addr
)) == NULL
) {
941 /* errno assumed set by above call */
945 if (etm_xport_should_fake_dd
) {
946 (void) nanosleep(&tms
, NULL
); /* delay [for resp capture] */
947 (void) ftruncate(_conn
->fd
, 0); /* act like socket/queue/pipe */
951 * peek from the connection to simulate an accept() system call
952 * behavior; this will pend until some ETM message is written
959 pollfd
.events
= POLLIN
;
961 pollfd
.fd
= _conn
->fd
;
963 if ((n
= poll(&pollfd
, 1, -1)) < 1) {
970 if ((n
= etm_xport_raw_peek(hdl
, _conn
, buf
, 1)) < 0) {
976 rv
= _conn
; /* success, return the open connection */
980 /* cleanup the connection if failed */
984 (void) etm_xport_close(hdl
, _conn
);
988 *addrp
= _conn
->addr
;
992 /* free _addrv and all its transport addresses */
994 if (_addrv
!= NULL
) {
995 etm_xport_free_addrv(hdl
, (void *)_addrv
);
998 if (etm_xport_debug_lvl
>= 2) {
999 fmd_hdl_debug(hdl
, "info: accept conn %p w/ *addrp %p\n",
1000 rv
, (addrp
!= NULL
? *addrp
: NULL
));
1005 } /* etm_xport_accept() */
1008 * etm_xport_close - close a connection from either endpoint,
1009 * return the original connection handle,
1010 * or NULL and set errno if close failed
1014 etm_xport_close(fmd_hdl_t
*hdl
, etm_xport_conn_t conn
)
1016 etm_xport_conn_t rv
; /* ret val */
1017 _etm_xport_conn_t
*_conn
; /* connection handle */
1018 int nev
; /* -errno val */
1022 rv
= _conn
; /* assume success */
1024 if ((nev
= etm_xport_valid_conn(_conn
)) < 0) {
1030 /* close the device node */
1032 (void) pthread_mutex_lock(&etm_xport_vldc_lock
);
1034 if (close(_conn
->fd
) < 0) {
1035 /* errno assumed set by above call */
1036 etm_xport_stats
.xport_os_close_fail
.fmds_value
.ui64
++;
1041 if (use_vldc
&& (_conn
== etm_xport_vldc_conn
)) {
1042 etm_xport_vldc_conn
= NULL
;
1045 (void) pthread_mutex_unlock(&etm_xport_vldc_lock
);
1049 /* cleanup the connection */
1051 if (_conn
!= NULL
) {
1052 etm_xport_free_addr(hdl
, _conn
->addr
);
1054 _conn
->magic_num
= 0;
1056 fmd_hdl_free(hdl
, _conn
, sizeof (_etm_xport_conn_t
));
1064 } /* etm_xport_close() */
1067 * etm_xport_get_ev_addrv - indicate which transport addresses
1068 * are implied as destinations by the
1069 * given FMA event, if given no FMA event
1070 * (NULL) indicate default or policy
1071 * driven dst transport addresses,
1072 * return an allocated NULL terminated
1073 * vector of allocated transport addresses,
1074 * or NULL and set errno if none
1076 * callers should never try to individually free an addr
1077 * within the returned vector
1081 etm_xport_get_ev_addrv(fmd_hdl_t
*hdl
, nvlist_t
*evp
)
1083 _etm_xport_addr_t
*_addr
; /* address handle */
1084 _etm_xport_addr_t
**_addrv
; /* vector of addresses */
1089 * allocate address handles for default/policy destinations
1091 * in reality we have just 1 dst transport addr
1094 _addr
= fmd_hdl_zalloc(hdl
, sizeof (_etm_xport_addr_t
),
1099 * allocate address handles per FMA event content
1101 * in reality we have just 1 dst transport addr
1104 _addr
= fmd_hdl_zalloc(hdl
, sizeof (_etm_xport_addr_t
),
1106 } /* whether caller passed in a FMA event */
1108 /* allocate vector with 1 non-NULL transport addr */
1110 _addrv
= fmd_hdl_zalloc(hdl
, 2 * sizeof (_etm_xport_addr_t
*),
1113 _addr
->fn
= etm_xport_get_fn(hdl
, ETM_IO_OP_WR
);
1114 _addr
->magic_num
= ETM_XPORT_DD_MAGIC_ADDR
;
1118 return ((void *) _addrv
);
1120 } /* etm_xport_get_ev_addrv() */
1123 * etm_xport_free_addrv - free the given vector of transport addresses,
1124 * including each transport address
1128 etm_xport_free_addrv(fmd_hdl_t
*hdl
, etm_xport_addr_t
*addrv
)
1130 _etm_xport_addr_t
**_addrv
; /* vector of addrs */
1131 int i
; /* vector index */
1133 if (addrv
== NULL
) {
1134 etm_xport_stats
.xport_free_addrv_badargs
.fmds_value
.ui64
++;
1138 _addrv
= (void*)addrv
;
1140 for (i
= 0; _addrv
[i
] != NULL
; i
++) {
1141 etm_xport_free_addr(hdl
, _addrv
[i
]);
1144 fmd_hdl_free(hdl
, _addrv
, (i
+ 1) * sizeof (_etm_xport_addr_t
*));
1146 } /* etm_xport_free_addrv() */
1149 * etm_xport_get_addr_conn - indicate which connections in a NULL
1150 * terminated vector of connection
1151 * handles are associated with the
1152 * given transport address,
1153 * return an allocated NULL terminated
1154 * vector of those connection handles,
1155 * or NULL and set errno if none
1159 etm_xport_get_addr_conn(fmd_hdl_t
*hdl
, etm_xport_conn_t
*connv
,
1160 etm_xport_addr_t addr
)
1162 _etm_xport_conn_t
**_connv
; /* vector of connections */
1163 _etm_xport_conn_t
**_mcv
; /* matching connections vector */
1164 _etm_xport_addr_t
*_addr
; /* transport addr to match */
1165 int n
; /* matching transport addr cnt */
1166 int i
; /* vector index */
1168 if ((connv
== NULL
) || (addr
== NULL
)) {
1170 etm_xport_stats
.xport_get_addr_conn_badargs
.fmds_value
.ui64
++;
1174 _connv
= (void*)connv
;
1175 _addr
= (void*)addr
;
1177 /* count, allocate space for, and copy, all matching addrs */
1180 for (i
= 0; _connv
[i
] != NULL
; i
++) {
1181 if ((_connv
[i
]->addr
== _addr
) ||
1182 ((_connv
[i
]->addr
!= NULL
) &&
1183 (_connv
[i
]->addr
->fn
== _addr
->fn
))) {
1186 } /* for counting how many addresses match */
1188 _mcv
= fmd_hdl_zalloc(hdl
, (n
+ 1) * sizeof (_etm_xport_conn_t
*),
1191 for (i
= 0; _connv
[i
] != NULL
; i
++) {
1192 if ((_connv
[i
]->addr
== _addr
) ||
1193 ((_connv
[i
]->addr
!= NULL
) &&
1194 (_connv
[i
]->addr
->fn
== _addr
->fn
))) {
1195 _mcv
[n
] = _connv
[i
];
1198 } /* for copying matching address pointers */
1201 return ((void *) _mcv
);
1203 } /* etm_xport_get_addr_conn() */
1206 * etm_xport_get_any_lcc - indicate which endpoint has undergone
1207 * a life cycle change and what that change
1208 * was (ex: came up), pending until a change
1209 * has occured for some/any endpoint,
1210 * return the appropriate address handle,
1211 * or NULL and set errno if problem
1214 * this function maintains or accesses state/history
1215 * regarding life cycle changes of endpoints
1217 * if life cycle changes are rapid relative to how
1218 * frequently this function is called, fairness will
1219 * be provided among which endpoints are reported
1223 etm_xport_get_any_lcc(fmd_hdl_t
*hdl
, etm_xport_lcc_t
*lccp
)
1225 if ((hdl
== NULL
) || (lccp
== NULL
)) {
1226 etm_xport_stats
.xport_get_any_lcc_badargs
.fmds_value
.ui64
++;
1232 * function not needed in FMA Phase 1 for sun4v/Ontario
1238 } /* etm_xport_get_any_lcc() */
1241 * etm_xport_fini - finish/teardown any transport infrastructure
1242 * after all connections are closed,
1243 * return 0 or -errno value if teardown failed
1247 etm_xport_fini(fmd_hdl_t
*hdl
)
1249 fmd_hdl_debug(hdl
, "info: xport finalizing\n");
1251 if (use_vldc
&& (etm_xport_vldc_conn
!= NULL
)) {
1252 (void) etm_xport_close(hdl
, etm_xport_vldc_conn
);
1253 etm_xport_vldc_conn
= NULL
;
1256 /* free any long standing properties from FMD */
1258 fmd_prop_free_string(hdl
, etm_xport_addrs
);
1260 /* cleanup the intermediate read buffer */
1262 if (etm_xport_irb_tail
!= etm_xport_irb_head
) {
1263 fmd_hdl_debug(hdl
, "warning: xport %d bytes stale data\n",
1264 (int)(etm_xport_irb_tail
- etm_xport_irb_head
));
1266 fmd_hdl_free(hdl
, etm_xport_irb_area
, 2 * etm_xport_irb_mtu_sz
);
1267 etm_xport_irb_area
= NULL
;
1268 etm_xport_irb_head
= NULL
;
1269 etm_xport_irb_tail
= NULL
;
1270 etm_xport_irb_mtu_sz
= 0;
1272 /* cleanup statistics from FMD */
1274 (void) fmd_stat_destroy(hdl
,
1275 sizeof (etm_xport_stats
) / sizeof (fmd_stat_t
),
1276 (fmd_stat_t
*)&etm_xport_stats
);
1278 fmd_hdl_debug(hdl
, "info: xport finalized ok\n");
1281 } /* etm_xport_fini() */
1284 * ------------------------ input/output functions ---------------------------
1288 * etm_xport_read - try to read N bytes from the connection
1289 * into the given buffer,
1290 * return how many bytes actually read
1295 etm_xport_read(fmd_hdl_t
*hdl
, etm_xport_conn_t conn
, void *buf
,
1298 return (etm_xport_buffered_read(hdl
, conn
, buf
, byte_cnt
));
1300 } /* etm_xport_read() */
1303 * etm_xport_write - try to write N bytes to the connection
1304 * from the given buffer,
1305 * return how many bytes actually written
1310 etm_xport_write(fmd_hdl_t
*hdl
, etm_xport_conn_t conn
, void *buf
,
1313 _etm_xport_conn_t
*_conn
; /* connection handle */
1314 int n
; /* byte cnt */
1318 if (hdl
== NULL
) { /* appease lint */
1321 if ((n
= etm_xport_valid_conn(_conn
)) < 0) {
1325 /* write to the connection device's open file descriptor */
1327 if ((n
= write(_conn
->fd
, buf
, byte_cnt
)) < 0) {
1328 /* errno assumed set by above call */
1329 etm_xport_stats
.xport_os_write_fail
.fmds_value
.ui64
++;
1335 } /* etm_xport_write() */
1338 * ------------------------ miscellaneous functions --------------------------
1342 * etm_xport_get_opt - get a connection's transport option value,
1343 * return the current value
1344 * or -errno value (ex: -ENOTSUP)
1348 etm_xport_get_opt(fmd_hdl_t
*hdl
, etm_xport_conn_t conn
, etm_xport_opt_t opt
)
1350 ssize_t rv
; /* ret val */
1351 _etm_xport_conn_t
*_conn
; /* connection handle */
1352 etm_xport_opt_op_t op_ctl
; /* struct for option ops */
1353 ssize_t n
; /* gen use */
1358 if (hdl
== NULL
) { /* appease lint */
1361 if ((n
= etm_xport_valid_conn(_conn
)) < 0) {
1365 op_ctl
.oo_op
= ETM_XPORT_OPT_GET
;
1366 op_ctl
.oo_opt
= opt
;
1368 if (etm_xport_should_fake_dd
) {
1369 n
= etm_fake_ioctl(_conn
->fd
, ETM_XPORT_IOCTL_OPT_OP
, &op_ctl
);
1370 } else if (use_vldc
) {
1371 if (opt
== ETM_XPORT_OPT_MTU_SZ
) {
1372 vldc_opt_op_t operation
;
1374 operation
.op_sel
= VLDC_OP_GET
;
1375 operation
.opt_sel
= VLDC_OPT_MTU_SZ
;
1377 n
= ioctl(_conn
->fd
, VLDC_IOCTL_OPT_OP
, &operation
);
1379 op_ctl
.oo_val
= operation
.opt_val
;
1384 n
= ioctl(_conn
->fd
, ETM_XPORT_IOCTL_OPT_OP
, &op_ctl
);
1387 /* errno assumed set by above call */
1389 etm_xport_stats
.xport_os_ioctl_fail
.fmds_value
.ui64
++;
1391 rv
= (int)op_ctl
.oo_val
;
1396 } /* etm_xport_get_opt() */