7812 Remove gender specific language
[unleashed.git] / usr / src / uts / sun4u / starfire / sys / idn.h
bloba8ee2ce9b85c945e27065ac18ad377322453a8ee
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
26 * Inter-Domain Network
29 #ifndef _SYS_IDN_H
30 #define _SYS_IDN_H
32 #ifndef _ASM
34 #ifdef _KERNEL
36 #include <sys/note.h>
38 #include <sys/cmn_err.h>
39 #include <sys/dditypes.h>
40 #include <sys/stream.h>
41 #include <sys/machsystm.h>
42 #include <sys/ethernet.h>
43 #include <sys/dlpi.h>
44 #include <sys/time.h>
45 #include <sys/kmem.h>
46 #include <sys/atomic.h>
47 #include <sys/cpuvar.h>
49 #include <sys/idn_sigb.h>
50 #include <sys/idn_smr.h>
51 #endif /* _KERNEL */
53 #ifdef __cplusplus
54 extern "C" {
55 #endif
57 typedef const char * const procname_t;
59 #define MB2B(m) ((size_t)(m) << 20) /* MBytes to Bytes */
60 #define B2MB(b) ((uint_t)((b) >> 20)) /* Bytes to MBytes */
62 #ifdef _KERNEL
65 * IDN_PROP_SMRSIZE - User specified size in MBytes.
66 * IDN_PROP_SMRADDR - OBP's internal physical address of the region.
68 * OBP properties of "memory" node that define the SMR space.
70 #define IDN_PROP_SMRSIZE "idn-smr-size"
71 #define IDN_PROP_SMRADDR "idn-smr-addr"
74 * IDN_AWOLMSG_INTERVAL (driver.conf)
76 * Number of seconds between AWOL messages on a per-domain basis.
77 * The purpose is to throttle the frequency at which these
78 * messages appear.
80 * IDN_MSGWAIT_NEGO (driver.conf)
81 * IDN_MSGWAIT_CFG
82 * IDN_MSGWAIT_CON
83 * IDN_MSGWAIT_FIN
84 * IDN_MSGWAIT_CMD
85 * IDN_MSGWAIT_DATA
87 * Number of seconds to wait for response to respective
88 * message type.
90 * IDN_RETRYFREQ_NEGO (driver.conf)
91 * IDN_RETRYFREQ_CON
92 * IDN_RETRYFREQ_FIN
94 * Number of seconds to wait between retries of the respective
95 * message type.
97 * IDN_SMR_ALIGN (not tunable)
99 * The hardware registers that describe the SMR are based on a 64K
100 * aligned physical address.
102 * IDN_SMR_SIZE (OBP [only])
104 * Total size of the SMR (Shared Memory Region) in bytes.
106 * IDN_NWR_SIZE (driver.conf)
108 * Total size of the NWR (NetWork Region) portion of the SMR which
109 * is actually used to support network activity. The NWR is managed
110 * as simply a pool of I/O buffers which are distributed by the
111 * Master domain to the Slaves for the purpose of communicating
112 * between each other. If not set then the entire SMR is used
113 * as the NWR.
114 * Req: IDN_NWR_SIZE <= IDN_SMR_SIZE
116 * IDN_SMR_BUFSIZE (driver.conf)
118 * Size of individual SMR buffers. The SMR is divided into chunks
119 * of IDN_SMR_BUFSIZE bytes. The IDN_MTU is based on this size
120 * and thus the IDN_SMR_BUFSIZE should be chosen based on performance.
122 * IDN_DATA_SIZE (NOT tunable)
124 * Portion of IDN_SMR_BUFSIZE that can contain raw non-IDN dependent
125 * data. We subtract IDN_ALIGNSIZE bytes to allow for fast bcopy
126 * alignment.
127 * Req: IDN_DATA_SIZE <=
128 * (IDN_SMR_BUFSIZE - sizeof (smr_pkthdr_t) - IDN_ALIGNSIZE)
130 * IDN_MTU (indirectly tunable via IDN_SMR_BUFSIZE)
132 * This size represents the portion of an SMR I/O buffers that can
133 * contain (ethernet headerless) data.
134 * Req: IDN_MTU <= IDN_DATA_SIZE - sizeof (ether_header)
136 * IDN_WINDOW_MAX (driver.conf)
138 * Maximum number of outstanding packets that are allowed per
139 * domain. If this value is exceeded for a particular domain
140 * no further I/Os will be transmitted to that domain until it
141 * has acknowledged enough of the previous transmission to bring
142 * down its outstanding I/O count (idn_domain.dio) below this
143 * value. In addition, if this value is exceeded then a Timer
144 * is scheduled to check for any response from the remote domain.
146 * IDN_WINDOW_INCR (driver.conf)
148 * As more channels/nets are activated on a particular domain
149 * the greater the number of possible outstanding data packets
150 * that can be outstanding to a given domain. Since this natural
151 * occurence can result in the outstanding-I/O count to a given
152 * domain to increase we run the risk of dropping into the
153 * IDN_WINDOW_MAX region even though the receiving domain
154 * may be fine with handling the load. In order to compensate
155 * for this increased activity and to not incur unjustified
156 * slips into the IDN_WINDOW_MAX region, the IDN_WINDOW_MAX
157 * value is adjusted by IDN_WINDOW_INCR for each channel/net
158 * that is activated for a given domain.
160 * IDN_WINDOW_EMAX (not tunable)
162 * The effective value of IDN_WINDOW_MAX once it has
163 * been adjusted by IDN_WINDOW_INCR.
165 * IDN_RECLAIM_MIN (driver.conf)
167 * Minimum number of outstanding packets that our allowed
168 * before subsequent sends will attempt to reclaim some number
169 * of outstanding data packets.
171 * IDN_RECLAIM_MAX (driver.conf)
172 * This value represents the maximum number of outstanding
173 * packets we will try to reclaim during a send once we've
174 * passed the IDN_RECLAIM_MIN boundary.
176 * IDN_MODUNLOADABLE (ndd)
178 * By default the IDN driver is unloadable. Setting this
179 * variable will allow the IDN driver to be unloaded provided
180 * it's not in use.
182 * IDN_LOWAT/IDN_HIWAT (driver.conf)
184 * Low/High water marks for the STREAM interface to IDN.
186 * IDN_MBOX_PER_NET (driver.conf)
188 * Number of mailbox entries that are allocated per channel/net.
189 * This value effectively represents the amount of outstanding
190 * activity that can reside at a domain. Increasing this value
191 * allows more packets to be in transit to a domain, however
192 * at some point there are diminishing returns since the receiver
193 * can only consume packets so fast.
195 * IDN_MAX_NETS (driver.conf)
197 * Maximum number of network interfaces (channels) that IDN
198 * is currently configured to allow. The absolute max is
199 * IDN_MAXMAX_NETS. We don't automatically default IDN_MAX_NETS
200 * to IDN_MAXMAX_NETS because it would mean wasted space in
201 * the mailbox region having to reserve mailboxes that will
202 * very likely go unused. The smaller this value the fewer
203 * the number of mailboxes in the SMR and thus the greater the
204 * number of possible I/O buffers available.
205 * Req: IDN_MAX_NETS <= IDN_MAXMAX_NETS
207 * IDN_CHECKSUM (driver.conf)
209 * If enabled, IDN validates the smr_pkthdr_t of incoming packets
210 * via a checksum, and calculates the checksum for outgoing packets.
211 * Only the first 3 fields of smr_pkthdr_t are checksummed and
212 * must be set to their expected values prior to calculating the
213 * checksum. Turned OFF by default when compiled DEBUG.
215 * IDN_SMR_MAXSIZE (not tunable)
217 * The absolute maximum size of the SMR region that we'll allow.
218 * Note that the virtual address space comes out kernelmap.
220 #define IDN_AWOLMSG_INTERVAL 60 /* seconds */
221 #define IDN_MSGWAIT_NEGO 20 /* seconds */
222 #define IDN_MSGWAIT_CFG 40
223 #define IDN_MSGWAIT_CON 20
224 #define IDN_MSGWAIT_FIN 40
225 #define IDN_MSGWAIT_CMD 40
226 #define IDN_MSGWAIT_DATA 30
227 #define IDN_RETRYFREQ_NEGO 2
228 #define IDN_RETRYFREQ_CON 2
229 #define IDN_RETRYFREQ_FIN 3
231 #define IDN_SMR_BUFSIZE_MIN 512
232 #define IDN_SMR_BUFSIZE_MAX (512*1024)
233 #define IDN_SMR_BUFSIZE_DEF (16*1024)
235 #define IDN_SMR_SHIFT (16)
236 #define IDN_SMR_ALIGN (1 << IDN_SMR_SHIFT) /* 64K */
237 #define IDN_SMR_SIZE idn_smr_size
238 #define IDN_NWR_SIZE idn_nwr_size
239 #define IDN_SMR_BUFSIZE idn_smr_bufsize
240 #define IDN_DATA_SIZE (IDN_SMR_BUFSIZE \
241 - sizeof (smr_pkthdr_t) \
242 - IDN_ALIGNSIZE)
243 #define IDN_MTU (IDN_DATA_SIZE - sizeof (struct ether_header))
244 #define IDN_WINDOW_MAX idn_window_max
245 #define IDN_WINDOW_INCR idn_window_incr
246 #define IDN_WINDOW_EMAX idn_window_emax
247 #define IDN_RECLAIM_MIN idn_reclaim_min
248 #define IDN_RECLAIM_MAX idn_reclaim_max
249 #define IDN_MODUNLOADABLE idn_modunloadable
250 #define IDN_LOWAT idn_lowat
251 #define IDN_HIWAT idn_hiwat
252 #define IDN_MBOX_PER_NET idn_mbox_per_net
253 #define IDN_MAX_NETS idn_max_nets
254 #define IDN_CHECKSUM idn_checksum
255 #define IDN_SMR_MAXSIZE 96
256 #define _IDN_SMR_SIZE 32 /* 32M */
257 #define _IDN_NWR_SIZE _IDN_SMR_SIZE /* 32M */
258 #define _IDN_SMR_BUFSIZE (16 * 1024) /* 16K */
261 #define IDN_TUNEVAR_NAME(v) (*(char **)((ulong_t)&(v)+(sizeof (ulong_t))))
262 #define IDN_TUNEVAR_VALUE(v) (v)
265 * History structure to support problem analysis.
267 #define IDN_HISTORY_NUM 1024
268 #define IDN_HISTORY_LOG(op, d0, d1, d2) \
269 if (idn_history) { \
270 mutex_enter(&idnhlog.h_mutex); \
271 idnhlog.h_log[idnhlog.h_index].e_time = TIMESTAMP(); \
272 idnhlog.h_log[idnhlog.h_index].e_op = (ushort_t)(op); \
273 idnhlog.h_log[idnhlog.h_index].e_data[0] = (ushort_t)(d0); \
274 idnhlog.h_log[idnhlog.h_index].e_data[1] = (ushort_t)(d1); \
275 idnhlog.h_log[idnhlog.h_index].e_data[2] = (ushort_t)(d2); \
276 idnhlog.h_index++; \
277 idnhlog.h_index &= (IDN_HISTORY_NUM - 1); \
278 mutex_exit(&idnhlog.h_mutex); \
281 #define IDNH_GSTATE 0x0001 /* d0=gstate, d1=, d2= */
282 #define IDNH_DSTATE 0x0002 /* d0=domid, d1=dstate, d2=cpuid */
283 #define IDNH_AWOL 0x0003 /* d0=domid, d1=dstate, d2=cpuid */
284 #define IDNH_MASTERID 0x0004 /* d0=masterid, d1=oldid, d2= */
285 #define IDNH_NEGO 0x0005 /* d0=domid, d1=ds_trans_on, d2=ds_connected */
286 #define IDNH_FIN 0x0006 /* d0=domid, d1=finstate, d2= */
287 #define IDNH_RELINK 0x0007 /* d0=domid, d1=dstate, d2=ds_relink */
289 struct idn_h_entry {
290 hrtime_t e_time;
291 ushort_t e_op;
292 ushort_t e_data[3];
295 struct idn_history {
296 kmutex_t h_mutex;
297 int h_index;
298 struct idn_h_entry h_log[IDN_HISTORY_NUM];
300 #endif /* _KERNEL */
303 * IDN_SIGBPIL - Interrupt level at which IDN driver
304 * wakes up idn_sigbhandler_thread
306 #define IDN_SIGBPIL PIL_3
309 * Definition of sigbintr.sb_busy values which
310 * represents state of idn_sigbhandler.
312 #define IDNSIGB_NOTREADY ((uchar_t)0)
313 #define IDNSIGB_INACTIVE ((uchar_t)1)
314 #define IDNSIGB_STARTED ((uchar_t)2)
315 #define IDNSIGB_ACTIVE ((uchar_t)3)
316 #define IDNSIGB_DIE ((uchar_t)4)
319 * Some Xfire based macros that assume 4 cpus per board.
321 #define CPUID_TO_BOARDID(c) ((c) >> 2)
322 #define MAX_CPU_PER_BRD 4
323 #define CPUSET_TO_BOARDSET(cset, bset) \
325 register int c, b; \
326 (bset) = 0; \
327 for (b = 0; b < MAX_BOARDS; b++) \
328 for (c = 0; c < MAX_CPU_PER_BRD; c++) \
329 if (CPU_IN_SET((cset), \
330 (b * MAX_CPU_PER_BRD) + c)) \
331 (bset) |= 1 << b; \
335 * Macros to manipulate boardset and domainset masks.
337 typedef ushort_t boardset_t; /* assumes max of 16 boards */
338 typedef ushort_t domainset_t; /* assumes max of 16 domains */
340 #define BOARDSET(brd) ((boardset_t)(1 << (brd)))
341 #define BOARDSET_ALL ((boardset_t)~0)
342 #define BOARD_IN_SET(set, brd) ((set) & BOARDSET(brd))
343 #define BOARDSET_ADD(set, brd) ((set) |= BOARDSET(brd))
344 #define BOARDSET_DEL(set, brd) ((set) &= ~BOARDSET(brd))
345 #define DOMAINSET(d) ((domainset_t)1 << (d))
346 #define DOMAINSET_ALL ((domainset_t)~0)
347 #define DOMAIN_IN_SET(s, d) ((s) & DOMAINSET(d))
348 #define DOMAINSET_ADD(s, d) ((s) |= DOMAINSET(d))
349 #define DOMAINSET_DEL(s, d) ((s) &= ~DOMAINSET(d))
352 * PFN_TO_SMADDR macro converts a PFN to a IDN_SMR_ALIGN'ed
353 * address suitable for the CIC bar/lar registers.
355 #if (IDN_SMR_SHIFT <= MMU_PAGESHIFT)
356 #define PFN_TO_SMADDR(pfn) ((pfn) << (MMU_PAGESHIFT - IDN_SMR_SHIFT))
357 #else
358 #define PFN_TO_SMADDR(pfn) ((pfn) >> (IDN_SMR_SHIFT - MMU_PAGESHIFT))
359 #endif
362 * Translate a physical address to a unique domain identifier.
363 * IMPORTANT - Assumes each board's memory is configured on a 8GB
364 * boundary. PA(8G) = PFN(1M).
366 #define MEM8G_SHIFT 33 /* (1 << 33) == 8G */
367 #define PADDR_TO_DOMAINID(paddr) ((int)((paddr) >> MEM8G_SHIFT) & 0xf)
369 #define VALID_NWROFFSET(off, align) \
370 (((uint_t)(off) >= 0) && \
371 ((size_t)(off) < MB2B(IDN_NWR_SIZE)) && \
372 !((uint_t)(off) & ((align) - 1)))
373 #define VALID_NWRADDR(addr, align) \
374 (((caddr_t)(addr) >= idn.smr.vaddr) && \
375 VALID_NWROFFSET(((caddr_t)(addr) - idn.smr.vaddr), (align)))
376 #define VALID_DOMAINID(d) (((d) >= 0) && ((d) < MAX_DOMAINS))
377 #define VALID_UDOMAINID(d) ((d) < MAX_DOMAINS)
378 #define VALID_CPUID(c) (((c) >= 0) && ((c) < NCPU))
379 #define VALID_CHANNEL(c) (((c) >= 0) && ((c) < IDN_MAX_NETS))
380 #define VALID_UCHANNEL(c) ((c) < IDN_MAX_NETS)
383 * The following are bit values of idn_debug, currently
384 * only useful if compiled with DEBUG.
386 #ifdef DEBUG
387 #define STRING(sss) char sss[20]
388 #define INUM2STR(mm, ss) inum2str((mm), (ss))
390 #define IDNDBG_XDC 0x00000001
391 #define IDNDBG_XF 0x00000002
392 #define IDNDBG_REGS 0x00000004
393 #define IDNDBG_SMR 0x00000008
394 #define IDNDBG_PROTO 0x00000010
395 #define IDNDBG_STR 0x00000020
396 #define IDNDBG_DRV 0x00000040
397 #define IDNDBG_DATA 0x00000080
398 #define IDNDBG_STATE 0x00000100
399 #define IDNDBG_DLPI 0x00000200
400 #define IDNDBG_KERN 0x00000400
401 #define IDNDBG_ALLOC 0x00000800
402 #define IDNDBG_REMAP 0x00001000
403 #define IDNDBG_TIMER 0x00002000
404 #define IDNDBG_CHAN 0x00004000
405 #define IDNDBG_AWOL 0x00008000
406 #define IDNDBG_SYNC 0x00010000
407 #define _IDNDBG_UNUSED0 0x00020000
408 #define IDNDBG_HITLIST 0x00040000
409 #define IDNDBG_XMON 0x00080000
410 #define IDNDBG_TEST 0x80000000
411 #define IDNDBG_ALL ((uint_t)-1)
413 #define PR_ALL if (idn_debug) printf
414 #define PR_XDC if (idn_debug & IDNDBG_XDC) printf
415 #define PR_XF if (idn_debug & IDNDBG_XF) printf
416 #define PR_REGS if (idn_debug & IDNDBG_REGS) printf
417 #define PR_SMR if (idn_debug & IDNDBG_SMR) printf
418 #define PR_PROTO if (idn_debug & IDNDBG_PROTO) printf
419 #define PR_STR if (idn_debug & IDNDBG_STR) printf
420 #define PR_DRV if (idn_debug & IDNDBG_DRV) printf
421 #define PR_DATA if (idn_debug & IDNDBG_DATA) printf
422 #define PR_STATE if (idn_debug & IDNDBG_STATE) printf
423 #define PR_DLPI if (idn_debug & IDNDBG_DLPI) printf
424 #define PR_KERN if (idn_debug & IDNDBG_KERN) printf
425 #define PR_ALLOC if (idn_debug & IDNDBG_ALLOC) printf
426 #define PR_REMAP if (idn_debug & (IDNDBG_SMR|IDNDBG_REMAP)) printf
427 #define PR_TIMER if (idn_debug & IDNDBG_TIMER) printf
428 #define PR_CHAN if (idn_debug & IDNDBG_CHAN) printf
429 #define PR_AWOL if (idn_debug & (IDNDBG_PROTO|IDNDBG_AWOL)) printf
430 #define PR_SYNC if (idn_debug & IDNDBG_SYNC) printf
431 #define _PR_UNUSED0 if (idn_debug & _IDNDBG_UNUSED0) printf
432 #define PR_HITLIST if (idn_debug & IDNDBG_HITLIST) printf
433 #define PR_XMON if (idn_debug & IDNDBG_XMON) printf
434 #define PR_TEST if (idn_debug & IDNDBG_TEST) printf
435 #else
436 #define STRING(sss) char *sss = ""
437 #define INUM2STR(mm, ss)
439 #define PR_ALL if (0) printf
440 #define PR_XDC PR_ALL
441 #define PR_XF PR_ALL
442 #define PR_REGS PR_ALL
443 #define PR_SMR PR_ALL
444 #define PR_PROTO PR_ALL
445 #define PR_STR PR_ALL
446 #define PR_DRV PR_ALL
447 #define PR_DATA PR_ALL
448 #define PR_STATE PR_ALL
449 #define PR_DLPI PR_ALL
450 #define PR_KERN PR_ALL
451 #define PR_ALLOC PR_ALL
452 #define PR_REMAP PR_ALL
453 #define PR_TIMER PR_ALL
454 #define PR_CHAN PR_ALL
455 #define PR_AWOL PR_ALL
456 #define PR_SYNC PR_ALL
457 #define PR_SNOOP PR_ALL
458 #define PR_HITLIST PR_ALL
459 #define PR_XMON PR_ALL
460 #define PR_TEST PR_ALL
461 #endif /* DEBUG */
463 #ifdef _KERNEL
465 * IDN drivers fields.
467 * IDNMINPSZ Minimum packet size the IDN supports.
469 * IDNMAXPSZ Maximum packet size that IDN supports from upper
470 * layers. Is equal to IDN_MTU + ether_header. Note
471 * that the IDN driver could support larger packets
472 * however the infrastructure to support fragmentation
473 * does not (and should not) exist with respect to
474 * ethernet packet types.
476 #ifdef DEBUG
477 #define IDNDESC "Inter-Domain Network (DEBUG)"
478 #else
479 #define IDNDESC "Inter-Domain Network"
480 #endif /* DEBUG */
482 #define IDNIDNUM 8264
483 #define IDNNAME "idn"
484 #define IDNMINPSZ 0 /* set at idnopen() */
485 #define IDNMAXPSZ 0 /* set at idnopen() */
487 #endif /* _KERNEL */
490 * IDN Global States.
492 typedef enum {
493 /* 0 */ IDNGS_OFFLINE = 0, /* idle */
494 /* 1 */ IDNGS_CONNECT, /* connecting initial domain */
495 /* 2 */ IDNGS_ONLINE, /* master selected */
496 /* 3 */ IDNGS_DISCONNECT, /* local is unlinking */
497 /* 4 */ IDNGS_RECONFIG, /* selecting new master */
498 /* 5 */ _IDNGS_UNUNSED5,
499 /* 6 */ _IDNGS_UNUNSED6,
500 /* 7 */ _IDNGS_UNUNSED7,
501 /* 8 */ _IDNGS_UNUNSED8,
502 /* 9 */ _IDNGS_UNUNSED9,
503 /* 10 */ IDNGS_IGNORE /* ignore requests (fault injection) */
504 } idn_gstate_t;
506 #ifdef _KERNEL
508 #define TIMESTAMP() (gethrtime() / 1000000ull)
511 * Spaced defined in:
512 * sigblkp[cpu0.cpu_id]->sigb_idn.reserved1.
514 #define IDNSB_GSTATE_NEW 0
515 #define IDNSB_GSTATE_OLD 1
516 #define IDNSB_MASTERCPU 2
517 #define IDNSB_RESERVED 3
519 #define IDNSB_HWCHKPT_MAX 4
521 #define IDNSB_SIZE 72
523 * This structure gets overlay onto:
524 * sigblkp[cpu0.cpu_id]->sigb_idn.reserved1.
526 * This structure must be exactly IDNSB_SIZE bytes.
528 typedef struct idnsb {
529 uchar_t id_gstate;
530 uchar_t id_pgstate;
531 uchar_t id_master_board;
532 uchar_t id_pmaster_board;
534 uchar_t reserved_DO_NOT_USE[24]; /* idnevent_t field */
536 struct {
537 uchar_t d_board;
538 uchar_t d_state;
539 } id_status[MAX_DOMAINS];
540 uint_t id_hwstate;
541 ushort_t id_hwchkpt[IDNSB_HWCHKPT_MAX];
542 } idnsb_t; /* sizeof = 72 (0x48) 18X bytes */
545 #define IDNSB_DOMAIN_UPDATE(dp) \
547 mutex_enter(&idn.idnsb_mutex); \
548 if (idn.idnsb) { \
549 int domid = (dp)->domid; \
550 if ((dp)->dcpu == IDN_NIL_DCPU) \
551 idn.idnsb->id_status[domid].d_board = \
552 (uchar_t)0xff; \
553 else if ((dp)->dvote.v.board == 0) \
554 idn.idnsb->id_status[domid].d_board = \
555 (uchar_t)CPUID_TO_BOARDID((dp)->dcpu); \
556 else \
557 idn.idnsb->id_status[domid].d_board = \
558 (uchar_t)(dp)->dvote.v.board; \
559 idn.idnsb->id_status[domid].d_state = \
560 (uchar_t)(dp)->dstate; \
562 mutex_exit(&idn.idnsb_mutex); \
565 * The following definitions and macros pertain to the
566 * id_hwstate and id_hwchkpt[] fields.
568 * id_hwstate (m = mark: 1=open, 2=close)
569 * 0 1 2 3 4 5 6 7
570 * ---------------------------------
571 * | m | m | m | m | XX unused XXX |
572 * ---------------------------------
573 * | | | |
574 * | | | +- CACHE
575 * | | +- CHAN
576 * | +- LINK
577 * +- SMR
579 * Note that nibble 4 is used in DEBUG for noting cache
580 * flush progress through idnxf_flushall_ecache(). This
581 * will override id_hwchkpt[] since it only has room for
582 * 4 items, however the BBSRAM space is there and
583 * unofficially available :-o
585 * id_hwchkpt[0] = SMR boardset
586 * id_hwchkpt[1] = LINK boardset
587 * id_hwchkpt[2] = CHAN boardset
588 * id_hwchkpt[3] = CACHE boardset.
590 #define IDNSB_CHKPT_SMR 0
591 #define IDNSB_CHKPT_LINK 1
592 #define IDNSB_CHKPT_CHAN 2
593 #define IDNSB_CHKPT_CACHE 3
594 #define IDNSB_CHKPT_UNUSED 4 /* This is the max you can have */
596 #define _CHKPT_MARKIT(item, mark) \
598 uint_t mk = (((uint_t)((mark) & 0xf)) << \
599 (((sizeof (uint_t) << 1) - 1 - (item)) << 2)); \
600 uint_t *sp = &idn.idnsb->id_hwstate; \
601 ASSERT(idn.idnsb); \
602 *sp &= ~(((uint_t)0xf) << (((sizeof (uint_t) << 1) \
603 - 1 - (item)) << 2)); \
604 *sp |= mk; \
607 #define CHECKPOINT_OPENED(item, bset, mark) \
609 mutex_enter(&idn.idnsb_mutex); \
610 if (idn.idnsb) { \
611 ushort_t *sp = &idn.idnsb->id_hwchkpt[0]; \
612 _CHKPT_MARKIT((item), (mark)); \
613 sp[item] |= ((ushort_t)(bset)); \
615 mutex_exit(&idn.idnsb_mutex); \
618 #define CHECKPOINT_CLOSED(item, bset, mark) \
620 mutex_enter(&idn.idnsb_mutex); \
621 if (idn.idnsb) { \
622 ushort_t *sp = &idn.idnsb->id_hwchkpt[0]; \
623 _CHKPT_MARKIT((item), (mark)); \
624 sp[item] &= (ushort_t)~(bset); \
626 mutex_exit(&idn.idnsb_mutex); \
629 #define CHECKPOINT_CLEAR(item, mark) \
631 mutex_enter(&idn.idnsb_mutex); \
632 if (idn.idnsb) { \
633 ushort_t *sp = &idn.idnsb->id_hwchkpt[0]; \
634 _CHKPT_MARKIT((item), (mark)); \
635 sp[item] = 0; \
637 mutex_exit(&idn.idnsb_mutex); \
639 #ifdef DEBUG
640 #define CHECKPOINT_CACHE_CLEAR_DEBUG(mark) \
641 CHECKPOINT_CLEAR(IDNSB_CHKPT_UNUSED, (mark))
642 #define CHECKPOINT_CACHE_STEP_DEBUG(bset, mark) \
643 CHECKPOINT_OPENED(IDNSB_CHKPT_UNUSED, (bset), (mark))
644 #else
645 #define CHECKPOINT_CACHE_CLEAR_DEBUG(mark)
646 #define CHECKPOINT_CACHE_STEP_DEBUG(bset, mark)
647 #endif /* DEBUG */
650 #ifdef DEBUG
651 #define IDN_GSTATE_TRANSITION(ns) \
653 hrtime_t tstamp; \
654 /*LINTED*/ \
655 IDN_HISTORY_LOG(IDNH_GSTATE, (ns), 0, 0); \
656 tstamp = TIMESTAMP(); \
657 ASSERT(IDN_GLOCK_IS_EXCL()); \
658 PR_STATE("GSTATE:%ld: (l=%d) %s(%d) -> %s(%d)\n", \
659 (uint64_t)tstamp, __LINE__, \
660 idngs_str[idn.state], idn.state, \
661 idngs_str[ns], (ns)); \
662 mutex_enter(&idn.idnsb_mutex); \
663 if (idn.idnsb) { \
664 idn.idnsb->id_pgstate = (uchar_t)idn.state; \
665 idn.idnsb->id_gstate = (uchar_t)(ns); \
667 mutex_exit(&idn.idnsb_mutex); \
668 idn.state = (ns); \
670 #else
671 #define IDN_GSTATE_TRANSITION(ns) \
673 IDN_HISTORY_LOG(IDNH_GSTATE, (ns), 0, 0); \
674 mutex_enter(&idn.idnsb_mutex); \
675 if (idn.idnsb) { \
676 idn.idnsb->id_pgstate = (uchar_t)idn.state; \
677 idn.idnsb->id_gstate = (uchar_t)(ns); \
679 mutex_exit(&idn.idnsb_mutex); \
680 idn.state = (ns); \
682 #endif /* DEBUG */
685 * IDN link/unlink operations occur asynchronously with respect to the
686 * caller. The following definitions are to support the return of
687 * success/failure back to the original requesting thread. It's
688 * unlikely to have multiple outstanding link/unlink requests so we
689 * just provide a very small cache of waiting list entries. If the
690 * cache becomes exhausted then additional ones are kmem_alloc'd.
692 #define IDNOP_CACHE_SIZE 3
693 #define IDNOP_IN_CACHE(dwl) \
694 (((dwl) >= &idn.dopers->_dop_wcache[0]) && \
695 ((dwl) < &idn.dopers->_dop_wcache[IDNOP_CACHE_SIZE]))
697 typedef struct dop_waitlist {
698 struct dop_waitlist *dw_next;
699 domainset_t dw_reqset;
700 domainset_t dw_domset;
701 short dw_op;
702 domainset_t dw_errset;
703 idnsb_error_t *dw_idnerr;
704 short dw_errors[MAX_DOMAINS];
705 } dop_waitlist_t;
707 typedef uint_t idn_xdcargs_t[4];
708 typedef uint_t idn_chanset_t;
711 * Types of synchronization zones which a connection
712 * could be in.
714 typedef enum {
715 IDNSYNC_NIL,
716 IDNSYNC_CONNECT,
717 IDNSYNC_DISCONNECT
718 } idn_synccmd_t;
721 * Type of sync-registration that is being requested.
723 typedef enum {
724 IDNSYNC_REG_REG,
725 IDNSYNC_REG_NEW,
726 IDNSYNC_REG_QUERY
727 } idn_syncreg_t;
729 #define IDN_SYNC_NUMZONE 3
730 #define IDN_SYNC_GETZONE(s) ((((s) != IDNSYNC_CONNECT) && \
731 ((s) != IDNSYNC_DISCONNECT)) ? \
732 -1 : (int)(s) - 1)
733 #define IDN_SYNC_GETTRANS(s) (((s) == IDNSYNC_CONNECT) ? \
734 idn.domset.ds_trans_on : \
735 ((s) == IDNSYNC_DISCONNECT) ? \
736 idn.domset.ds_trans_off : 0)
739 * Generic states when in a state transition region.
740 * These ultimately map to domain states via
741 * a idn_xphase_t definition. General model:
743 * PEND
744 * /\
745 * / \
746 * | |
747 * V V
748 * SENT--->RCVD
749 * \ /
750 * \ /
751 * VV
752 * FINAL
754 * Start these types with PEND = 0 so that they're
755 * compatible with idnxs_state_table[] and idn_xphase_t
756 * phases that use the value as an index.
758 typedef enum {
759 /* 0 */ IDNXS_PEND = 0,
760 /* 1 */ IDNXS_SENT,
761 /* 2 */ IDNXS_RCVD,
762 /* 3 */ IDNXS_FINAL,
763 /* 4 */ IDNXS_NIL
764 } idn_xstate_t;
767 * Locking protocol:
769 * Each routine is called with SYNC_LOCK and
770 * the respective domain's DLOCK(EXCL) held.
771 * The routines must return with these locks
772 * still held.
774 struct idn_msgtype;
776 typedef struct {
777 int t_state;
778 int (*t_check)(int domid, struct idn_msgtype *mtp,
779 idn_xdcargs_t xargs);
780 void (*t_action)(int domid, struct idn_msgtype *mtp,
781 idn_xdcargs_t xargs);
782 void (*t_error)(int domid, struct idn_msgtype *mtp,
783 idn_xdcargs_t xargs);
784 } idn_trans_t;
787 * The callback routines (xt_final & xt_exit) are called with
788 * DLOCK and SYNC_LOCK held and they are required to return
789 * with these locks still held.
791 typedef struct {
792 uint_t xt_msgtype;
793 idn_trans_t xt_trans[4];
794 void (*xt_final)(int domid);
795 void (*xt_exit)(int domid, uint_t msgtype);
796 } idn_xphase_t;
799 * Synchronization entry representing the synchronization
800 * state with respect to a given domain for a given zone.
802 typedef struct idn_syncop {
803 struct idn_syncop *s_next;
804 int s_domid;
805 idn_synccmd_t s_cmd;
806 int s_msg;
808 domainset_t s_set_exp;
809 domainset_t s_set_rdy;
810 int (*s_transfunc)(int domid, void *arg);
811 void *s_transarg;
812 #ifdef DEBUG
813 int s_query[MAX_DOMAINS];
814 #endif /* DEBUG */
815 } idn_syncop_t;
817 #ifdef DEBUG
818 #define IDN_SYNC_QUERY_INIT(d) \
819 (bzero((caddr_t)idn_domain[d].dsync.s_query, \
820 sizeof (idn_domain[d].dsync.s_query)))
821 #define IDN_SYNC_QUERY_UPDATE(md, sd) (idn_domain[md].dsync.s_query[sd]++)
822 #else /* DEBUG */
823 #define IDN_SYNC_QUERY_INIT(d)
824 #define IDN_SYNC_QUERY_UPDATE(md, sd)
825 #endif /* DEBUG */
827 typedef struct {
828 idn_syncop_t *sc_op;
829 int sc_cnt;
830 } idn_synczone_t;
832 #endif /* _KERNEL */
835 * Vote Ticket used during negotiations and elections.
837 * 31 0
838 * -----------------------------------------
839 * |m...|....|pppp|....|Cbbb|bccc|cccB|BBB1|
840 * -----------------------------------------
841 * m [31] = master/slave
842 * . [30:24] = unused
843 * p [23:20] = priority
844 * . [19:16] = unused
845 * C [15] = connected (has master)
846 * b [14:11] = nmembrds-1
847 * c [10:5] = ncpus-1
848 * B [4:1] = board_id
849 * 1 [0] = one
851 typedef union {
852 struct {
853 uint_t master :1;
854 uint_t unused0 :7;
855 uint_t priority :4;
856 uint_t unused1 :4;
857 uint_t connected :1;
858 uint_t nmembrds :4;
859 uint_t ncpus :6;
860 uint_t board :4;
861 uint_t one :1;
862 } v;
863 uint_t ticket;
864 } idn_vote_t;
866 #define IDNVOTE_PRI_MASK 0xf
867 #define IDNVOTE_MAXPRI 0xf
868 #define IDNVOTE_MINPRI 0
869 #define IDNVOTE_DEFPRI 1 /* must be larger than MINPRI */
871 * Initially:
872 * vote.v.priority = IDNVOTE_DEFPRI
873 * vote.v.one = 1
875 #define IDNVOTE_INITIAL_TICKET ((IDNVOTE_DEFPRI << 20) | 1)
876 #define IDNVOTE_PRIVALUE(vvv) \
877 ((int)vvv.v.priority + ((int)vvv.v.master ? IDNVOTE_MAXPRI : 0))
880 * During elections we only use the "elect" attributes of the
881 * election ticket, i.e. those physical attributes pertaining
882 * to the individual domain (priority, nboards, ncpus, board).
884 #define IDNVOTE_ELECT_MASK 0x00f07fff
885 #define IDNVOTE_ELECT(tkt) ((tkt).ticket & IDNVOTE_ELECT_MASK)
886 #define IDNVOTE_BASICS_MASK 0x00f0ffff
887 #define IDNVOTE_BASICS(tkt) ((tkt).ticket & IDNVOTE_BASICS_MASK)
890 * Values used in idn_select_master().
892 #define MASTER_IS_NONE 0 /* index into master_select_table */
893 #define MASTER_IS_OTHER 1
894 #define MASTER_IS_LOCAL 2
895 #define MASTER_IS_REMOTE 3
897 typedef enum {
898 MASTER_SELECT_VOTE,
899 MASTER_SELECT_VOTE_RCFG,
900 MASTER_SELECT_CONNECT,
901 MASTER_SELECT_REMOTE,
902 MASTER_SELECT_LOCAL,
903 MASTER_SELECT_WAIT,
904 MASTER_SELECT_ERROR
905 } idn_master_select_t;
908 * Used to synchronize completion of link/unlink with respect to
909 * the original requester (user). Necessary since link establishment
910 * occurs asynchronously.
912 typedef enum {
913 /* 0 */ IDNOP_DISCONNECTED, /* successfully disconnected */
914 /* 1 */ IDNOP_CONNECTED, /* successfully established */
915 /* 2 */ IDNOP_ERROR /* error trying to link/unlink */
916 } idn_opflag_t;
919 * IDN Protocol Messages.
920 * These are IDN version (IDN_VERSION) dependent.
922 * ----- 7, --- 6,5.................0
923 * | ack | nack | IDN message type |
924 * ----------------------------------
926 #define IDN_VERSION 1
929 * Must be no more than 6-bits. See DMV private data.
931 #define IDNP_ACK 0x20
932 #define IDNP_NACK 0x10
933 #define IDNP_NULL 0x00
934 #define IDNP_NEGO 0x01
935 #define IDNP_CON 0x02
936 #define IDNP_CFG 0x03
937 #define IDNP_FIN 0x04
938 #define IDNP_CMD 0x05
939 #define IDNP_DATA 0x06
941 #define IDN_NUM_MSGTYPES 7
942 #define IDNP_ACKNACK_MASK (IDNP_ACK | IDNP_NACK)
943 #define IDNP_MSGTYPE_MASK 0x0f
944 #define VALID_MSGTYPE(m) (((m) >= IDNP_NEGO) && ((m) < IDN_NUM_MSGTYPES))
946 typedef struct idn_msgtype {
947 ushort_t mt_mtype;
948 ushort_t mt_atype;
949 ushort_t mt_cookie;
950 } idn_msgtype_t;
953 * IDN private data section of DMV layout (48 bits).
955 * 47......40,39.....34,33.....28,27..24,23......16,15..............0
956 * | version | msgtype | acktype | did | cpuid | cookie |
957 * ------------------------------------------------------------------
959 * version Local domain's version of IDN software.
960 * msgtype Type of IDN message, e.g. nego, syn, etc.
961 * acktype If msgtype is a ACK or NACK, then acktype is the
962 * type of ack that we're receiving, e.g. ack/nego|ack.
963 * did Local domain's ID (netid) - system-wide unique.
964 * cpuid Local domain's CPU->cpu_id that sending message.
965 * cookie Cookie assigned by remote domain for authentication.
966 * For NEGO & NEGO+ACK messages, it's the cookie that
967 * the sender expects the receiver to use in subsequent
968 * messages. The upper-eight bits represent a timer
969 * cookie to associate timers with expected messages.
971 #endif /* !_ASM */
973 #ifdef _KERNEL
975 #define _IDNPD_COOKIE_MASK 0xffff
976 #define _IDNPD_COOKIE_SHIFT 32
977 #define _IDNPD_VER_MASK 0xff
978 #define _IDNPD_VER_SHIFT 24
979 #define _IDNPD_MTYPE_MASK 0x3f
980 #define _IDNPD_MTYPE_SHIFT 18
981 #define _IDNPD_ATYPE_MASK 0x3f
982 #define _IDNPD_ATYPE_SHIFT 12
983 #define _IDNPD_DOMID_MASK 0xf
984 #define _IDNPD_DOMID_SHIFT 8
985 #define _IDNPD_CPUID_MASK 0xff
986 #define _IDNPD_CPUID_SHIFT 0
988 #define _IDNPD_COOKIE_LEN 16
990 #ifndef _ASM
992 #define IDN_PD2COOKIE(pdata) \
993 (((uint_t)((pdata) >> _IDNPD_COOKIE_SHIFT)) & _IDNPD_COOKIE_MASK)
994 #define IDN_PD2VER(pdata) \
995 (((uint_t)((pdata) >> _IDNPD_VER_SHIFT)) & _IDNPD_VER_MASK)
996 #define IDN_PD2MTYPE(pdata) \
997 (((uint_t)((pdata) >> _IDNPD_MTYPE_SHIFT)) & _IDNPD_MTYPE_MASK)
998 #define IDN_PD2ATYPE(pdata) \
999 (((uint_t)((pdata) >> _IDNPD_ATYPE_SHIFT)) & _IDNPD_ATYPE_MASK)
1000 #define IDN_PD2DOMID(pdata) \
1001 (((uint_t)((pdata) >> _IDNPD_DOMID_SHIFT)) & _IDNPD_DOMID_MASK)
1002 #define IDN_PD2CPUID(pdata) \
1003 (((uint_t)((pdata) >> _IDNPD_CPUID_SHIFT)) & _IDNPD_CPUID_MASK)
1005 #define IDN_MAKE_PDATA(mtype, atype, cookie) \
1006 ((((uint64_t)(cookie) & UINT64_C(_IDNPD_COOKIE_MASK)) << \
1007 _IDNPD_COOKIE_SHIFT) | \
1008 (((uint64_t)idn.version & UINT64_C(_IDNPD_VER_MASK)) << \
1009 _IDNPD_VER_SHIFT) | \
1010 (((uint64_t)(mtype) & UINT64_C(_IDNPD_MTYPE_MASK)) << \
1011 _IDNPD_MTYPE_SHIFT) | \
1012 (((uint64_t)(atype) & UINT64_C(_IDNPD_ATYPE_MASK)) << \
1013 _IDNPD_ATYPE_SHIFT) | \
1014 (((uint64_t)idn.localid & UINT64_C(_IDNPD_DOMID_MASK)) << \
1015 _IDNPD_DOMID_SHIFT) | \
1016 (((uint64_t)CPU->cpu_id & UINT64_C(_IDNPD_CPUID_MASK)) << \
1017 _IDNPD_CPUID_SHIFT))
1019 #define IDN_TCOOKIE(ck) (((ck) >> 8) & 0xff)
1020 #define IDN_DCOOKIE(ck) ((ck) & 0xff)
1021 #define IDN_MAKE_COOKIE(d, t) ((((t) & 0xff) << 8) | ((d) & 0xff))
1024 * IDNP_NEGO
1026 * 127........96,95........64,63........32,31.........0
1027 * | vote | domainset |
1028 * ----------------------------------------------------
1029 * vote Local/Remote domain's vote ticket.
1030 * domainset Mask of cpuids of domains to which
1031 * sender is connected. Position in domainset
1032 * designates respective domainid.
1033 * E.g. domainset[6] = 20 -> domainid 6 is
1034 * accessible via cpuid 20.
1035 * The slot for the receiving domain
1036 * contains the masterid of the sending
1037 * domain. If the sending domain does
1038 * not have a master then the entry will
1039 * contain IDNNEG_NO_MASTER.
1041 * These macros insert a domainid-cpuid pair into the
1042 * domainset to be subsequently passed in a NEGO message,
1043 * also retrieve the cpuid from the domainset for a
1044 * given domainid.
1046 * Usage:
1047 * Sending:
1048 * mask = IDNNEG_DSET_MYMASK();
1049 * IDNNEG_DSET_INIT(dset, mask)
1050 * for (all domains except self)
1051 * IDNNEG_DSET_SET(dset, domain, cpuid, mask);
1053 * Receiving:
1054 * IDNNEG_DSET_GET_MASK(dset, recv_domid, recv_mask);
1055 * for (all domains except recv_domid)
1056 * IDNNEG_DSET_GET(dset, domid, cpuid, recv_mask);
1058 typedef uint_t idnneg_dset_t[3];
1060 #define IDNNEG_NO_MASTER 0x3f
1061 #define __IDNNEG_DSET_CLEAR(dset) (bzero((caddr_t)(dset), \
1062 sizeof (idnneg_dset_t)))
1063 #define IDNNEG_DSET_MYMASK() (idn_domain[idn.localid].dcpu)
1065 #define IDNNEG_DSET_INIT(dset, mask) \
1067 __IDNNEG_DSET_CLEAR(dset); \
1068 IDNNEG_DSET_SET((dset), idn.localid, (mask), idn.localid); \
1071 #define IDNNEG_DSET_SET(dset, domid, cpuid, mask) \
1073 uint_t _s = ((domid) & 0xf) * 6; \
1074 int _i = _s >> 5; \
1075 uint_t _s0 = _s & 0x1f; \
1076 uint_t _t = ((cpuid) ^ (mask)) & 0x3f; \
1077 /*LINTED*/ \
1078 ASSERT(((domid) == idn.localid) ? \
1079 ((mask) == idn.localid) : ((cpuid) != (mask))); \
1080 (dset)[_i] |= _t << _s0; \
1081 if ((_s0 + 6) > 32) \
1082 (dset)[_i + 1] |= _t >> (32 - _s0); \
1085 #define __IDNNEG_DSET_GET(dset, domid, cpuid, mask, uncond) \
1087 uint_t _s = ((domid) & 0xf) * 6; \
1088 int _i = _s >> 5; \
1089 uint_t _s0 = _s & 0x1f; \
1090 uint_t _s1 = (_s + 6) & 0x1f; \
1091 (cpuid) = ((dset)[_i] >> _s0) & 0x3f; \
1092 if ((_s0 + 6) > 32) \
1093 (cpuid) |= ((dset)[_i + 1] << (6 - _s1)) & 0x3f; \
1094 if ((cpuid) || (uncond)) \
1095 (cpuid) ^= (mask) & 0x3f; \
1096 else \
1097 (cpuid) = -1; \
1100 #define IDNNEG_DSET_GET_MASK(dset, domid, mask) \
1101 __IDNNEG_DSET_GET((dset), (domid), (mask), (domid), 1)
1103 #define IDNNEG_DSET_GET_MASTER(dset, master) \
1104 __IDNNEG_DSET_GET((dset), idn.localid, (master), \
1105 idn.localid+MAX_DOMAINS, 0)
1107 #define IDNNEG_DSET_SET_MASTER(dset, domid, master) \
1108 IDNNEG_DSET_SET((dset), (domid), (master), \
1109 (domid)+MAX_DOMAINS)
1111 #define IDNNEG_DSET_GET(dset, domid, cpuid, mask) \
1112 __IDNNEG_DSET_GET((dset), (domid), (cpuid), (mask), 0)
1115 * IDNP_CFG sub-types.
1117 * Format of first 32 bit word in XDC:
1118 * stX = sub-type.
1119 * staX = sub-type arg.
1120 * X = position in idn_cfgsubtype_t.param.p[] array.
1121 * num = number of parameters in this XDC (0-3)
1123 * 31...28,27...24,23...20,19...16,15...12,11....8,7.....3,2....0
1124 * | st0 . sta0 | st1 . sta1 | st2 . sta2 | phase | num |
1125 * --------------------------------------------------------------
1127 * Note that since the first 32-bit word in a (IDNP_CFG) XDC is used
1128 * for a sub-type, subsequent three 32-bits words are used for data that
1129 * pertains to respective sub-type, i.e. first sub-type corresponds
1130 * to first of the 3x32-bit words (pos=0), second sub-type corresponds
1131 * to second of the 3x32-bit words (pos=1), etc. Obviously, a max of
1132 * only three sub-types can be sent per xdc.
1134 #define IDNCFG_BARLAR 0x1 /* SMR base/limit pfn */
1135 #define IDNCFGARG_BARLAR_BAR 0 /* BAR */
1136 #define IDNCFGARG_BARLAR_LAR 1 /* LAR */
1137 #define IDNCFG_MCADR 0x2 /* MC ADR, arg = board number */
1138 #define IDNCFG_NMCADR 0x3 /* Number of MC ADRs to expect */
1139 #define IDNCFG_CPUSET 0x4 /* dcpuset of remote domain */
1140 #define IDNCFGARG_CPUSET_UPPER 0 /* 1st word */
1141 #define IDNCFGARG_CPUSET_LOWER 1 /* 2nd word */
1142 #define IDNCFG_NETID 0x5 /* dnetid, arg = 0 */
1143 #define IDNCFG_BOARDSET 0x6 /* board set, arg = 0 */
1144 #define IDNCFG_SIZE 0x7 /* SMR size parameters */
1145 #define IDNCFGARG_SIZE_MTU 0 /* IDN_MTU */
1146 #define IDNCFGARG_SIZE_BUF 1 /* IDN_SMR_BUFSIZE */
1147 #define IDNCFGARG_SIZE_SLAB 2 /* IDN_SLAB_BUFCOUNT */
1148 #define IDNCFGARG_SIZE_NWR 3 /* IDN_NWR_SIZE */
1149 #define IDNCFG_DATAMBOX 0x8 /* SMR data mailbox info */
1150 #define IDNCFGARG_DATAMBOX_TABLE 0 /* recvmbox table */
1151 #define IDNCFGARG_DATAMBOX_DOMAIN 1 /* domain's recvmbox */
1152 #define IDNCFGARG_DATAMBOX_INDEX 2 /* domain's index into table */
1153 #define IDNCFG_DATASVR 0x9 /* Data server info */
1154 #define IDNCFGARG_DATASVR_MAXNETS 0 /* max # of nets/channels */
1155 #define IDNCFGARG_DATASVR_MBXPERNET 1 /* # mbox per net/channel */
1156 #define IDNCFG_OPTIONS 0xa /* various options */
1157 #define IDNCFGARG_CHECKSUM 0 /* IDN_CHECKSUM */
1159 #define IDN_CFGPARAM(st, sta) ((uchar_t)((((st) & 0xf) << 4) | ((sta) & 0xf)))
1160 #define IDN_CFGPARAM_TYPE(p) (((p) >> 4) & 0xf)
1161 #define IDN_CFGPARAM_ARG(p) ((p) & 0xf)
1163 typedef union {
1164 struct {
1165 uchar_t p[3];
1166 uchar_t _num_phase; /* info.num, info.phase used instead */
1167 } param;
1168 struct {
1169 uint_t _p : 24; /* param.p[] used instead */
1170 uint_t num : 2;
1171 uint_t phase : 6;
1172 } info;
1173 uint_t val;
1174 } idn_cfgsubtype_t;
1177 * IDN_MASTER_NCFGITEMS
1178 * Minimum number of config items expected from master.
1180 * IDN_SLAVE_NCFGITEMS
1181 * Number of config items expected from slave.
1183 #define IDN_MASTER_NCFGITEMS 17 /* max = +14 (mcadrs) */
1184 #define IDN_SLAVE_NCFGITEMS 12
1187 * IDNP_CMD sub-types.
1189 typedef enum {
1190 /* 1 */ IDNCMD_SLABALLOC = 1, /* Request to allocate a slab */
1191 /* 2 */ IDNCMD_SLABFREE, /* Request to free a slab */
1192 /* 3 */ IDNCMD_SLABREAP, /* Reap any free slabs */
1193 /* 4 */ IDNCMD_NODENAME /* Query nodename of domain */
1194 } idn_cmd_t;
1196 #define VALID_IDNCMD(c) (((int)(c) >= (int)IDNCMD_SLABALLOC) && \
1197 ((int)(c) <= (int)IDNCMD_NODENAME))
1199 * IDNP_NACK
1201 typedef enum {
1202 /* 1 */ IDNNACK_NOCONN = 1,
1203 /* 2 */ IDNNACK_BADCHAN,
1204 /* 3 */ IDNNACK_BADCFG,
1205 /* 4 */ IDNNACK_BADCMD,
1206 /* 5 */ IDNNACK_RETRY,
1207 /* 6 */ IDNNACK_DUP,
1208 /* 7 */ IDNNACK_EXIT,
1209 /* 8 */ IDNNACK_RESERVED1,
1210 /* 9 */ IDNNACK_RESERVED2,
1211 /* 10 */ IDNNACK_RESERVED3
1212 } idn_nack_t;
1215 * IDNP_CON sub-types.
1217 typedef enum {
1218 /* 0 */ IDNCON_OFF = 0,
1219 /* 1 */ IDNCON_NORMAL, /* regular connect sequence */
1220 /* 2 */ IDNCON_QUERY /* query for connect info */
1221 } idn_con_t;
1224 * IDNP_FIN sub-types.
1226 typedef enum {
1227 /* 0 */ IDNFIN_OFF = 0, /* active, no fin */
1228 /* 1 */ IDNFIN_NORMAL, /* normal disconnect req */
1229 /* 2 */ IDNFIN_FORCE_SOFT, /* normal dis, force if goes AWOL */
1230 /* 3 */ IDNFIN_FORCE_HARD, /* force disconnect of AWOL domain */
1231 /* 4 */ IDNFIN_QUERY /* query for fin info */
1232 } idn_fin_t;
1234 #define VALID_FIN(f) (((int)(f) > 0) && \
1235 ((int)(f) < (int)IDNFIN_QUERY))
1236 #define FIN_IS_FORCE(f) (((f) == IDNFIN_FORCE_SOFT) || \
1237 ((f) == IDNFIN_FORCE_HARD))
1240 * FIN ARG types - reasons a FIN was sent.
1242 typedef enum {
1243 /* 0 */ IDNFIN_ARG_NONE = 0, /* no argument */
1244 /* 1 */ IDNFIN_ARG_SMRBAD, /* SMR is corrupted */
1245 /* 2 */ IDNFIN_ARG_CPUCFG, /* missing cpu per board */
1246 /* 3 */ IDNFIN_ARG_HWERR, /* error programming hardware */
1247 /* 4 */ IDNFIN_ARG_CFGERR_FATAL, /* Fatal error during CONFIG */
1248 /* 5 */ IDNFIN_ARG_CFGERR_MTU, /* MTU sizes conflict */
1249 /* 6 */ IDNFIN_ARG_CFGERR_BUF, /* SMR_BUF_SIZE conflicts */
1250 /* 7 */ IDNFIN_ARG_CFGERR_SLAB, /* SLAB sizes conflict */
1251 /* 8 */ IDNFIN_ARG_CFGERR_NWR, /* NWR sizes conflict */
1252 /* 9 */ IDNFIN_ARG_CFGERR_NETS, /* MAX_NETS conflict */
1253 /* 10 */ IDNFIN_ARG_CFGERR_MBOX, /* MBOX_PER_NET conflict */
1254 /* 11 */ IDNFIN_ARG_CFGERR_NMCADR, /* NMCADR mismatches actual */
1255 /* 12 */ IDNFIN_ARG_CFGERR_MCADR, /* missing some MCADRs */
1256 /* 13 */ IDNFIN_ARG_CFGERR_CKSUM, /* checksum settings conflict */
1257 /* 14 */ IDNFIN_ARG_CFGERR_SMR /* SMR sizes conflict */
1258 } idn_finarg_t;
1260 #define IDNFIN_ARG_IS_FATAL(fa) ((fa) > IDNFIN_ARG_NONE)
1262 #define SET_FIN_TYPE(x, t) \
1263 ((x) &= 0xffff, (x) |= (((uint_t)(t) & 0xffff) << 16))
1264 #define SET_FIN_ARG(x, a) \
1265 ((x) &= ~0xffff, (x) |= ((uint_t)(a) & 0xffff))
1266 #define GET_FIN_TYPE(x) ((idn_fin_t)(((x) >> 16) & 0xffff))
1267 #define GET_FIN_ARG(x) ((idn_finarg_t)((x) & 0xffff))
1269 #define FINARG2IDNKERR(fa) \
1270 (((fa) == IDNFIN_ARG_SMRBAD) ? IDNKERR_SMR_CORRUPTED : \
1271 ((fa) == IDNFIN_ARG_CPUCFG) ? IDNKERR_CPU_CONFIG : \
1272 ((fa) == IDNFIN_ARG_HWERR) ? IDNKERR_HW_ERROR : \
1273 ((fa) == IDNFIN_ARG_CFGERR_FATAL) ? IDNKERR_HW_ERROR : \
1274 ((fa) == IDNFIN_ARG_CFGERR_MTU) ? IDNKERR_CONFIG_MTU : \
1275 ((fa) == IDNFIN_ARG_CFGERR_BUF) ? IDNKERR_CONFIG_BUF : \
1276 ((fa) == IDNFIN_ARG_CFGERR_SLAB) ? IDNKERR_CONFIG_SLAB : \
1277 ((fa) == IDNFIN_ARG_CFGERR_NWR) ? IDNKERR_CONFIG_NWR : \
1278 ((fa) == IDNFIN_ARG_CFGERR_NETS) ? IDNKERR_CONFIG_NETS : \
1279 ((fa) == IDNFIN_ARG_CFGERR_MBOX) ? IDNKERR_CONFIG_MBOX : \
1280 ((fa) == IDNFIN_ARG_CFGERR_NMCADR) ? IDNKERR_CONFIG_NMCADR : \
1281 ((fa) == IDNFIN_ARG_CFGERR_MCADR) ? IDNKERR_CONFIG_MCADR : \
1282 ((fa) == IDNFIN_ARG_CFGERR_CKSUM) ? IDNKERR_CONFIG_CKSUM : \
1283 ((fa) == IDNFIN_ARG_CFGERR_SMR) ? IDNKERR_CONFIG_SMR : 0)
1286 * FIN SYNC types.
1288 #define IDNFIN_SYNC_OFF 0 /* not set */
1289 #define IDNFIN_SYNC_NO 1 /* no-sync necessary */
1290 #define IDNFIN_SYNC_YES 2 /* do fin synchronously */
1292 typedef short idn_finsync_t;
1295 * IDNP_FIN options.
1297 typedef enum {
1298 /* 0 */ IDNFIN_OPT_NONE = 0, /* none (used w/query) */
1299 /* 1 */ IDNFIN_OPT_UNLINK, /* normal unlink request */
1300 /* 2 */ IDNFIN_OPT_RELINK /* disconnect and retry link */
1301 } idn_finopt_t;
1303 #define VALID_FINOPT(f) (((f) == IDNFIN_OPT_UNLINK) || \
1304 ((f) == IDNFIN_OPT_RELINK))
1306 #define FIN_MASTER_DOMID(x) (((((x) >> 16) & 0xffff) == 0xffff) ? \
1307 IDN_NIL_DOMID : (((x) >> 16) & 0xffff))
1308 #define FIN_MASTER_CPUID(x) ((((x) & 0xffff) == 0xffff) ? \
1309 IDN_NIL_DCPU : ((x) & 0xfff))
1310 #define MAKE_FIN_MASTER(d, c) ((((uint_t)(d) & 0xffff) << 16) | \
1311 ((uint_t)(c) & 0xffff))
1312 #define NIL_FIN_MASTER MAKE_FIN_MASTER(IDN_NIL_DOMID, IDN_NIL_DCPU)
1314 #ifdef DEBUG
1315 #define IDN_FSTATE_TRANSITION(dp, ns) \
1317 int _id; \
1318 _id = (dp)->domid; \
1319 if ((dp)->dfin != (ns)) { \
1320 hrtime_t tstamp; \
1321 tstamp = TIMESTAMP(); \
1322 IDN_HISTORY_LOG(IDNH_FIN, _id, (ns), 0); \
1323 PR_STATE("FSTATE:%ld:%d: (l=%d, b/p=%d/%d) " \
1324 "%s(%d) -> %s(%d)\n", \
1325 (uint64_t)tstamp, _id, \
1326 __LINE__, \
1327 ((dp)->dcpu == IDN_NIL_DCPU) ? -1 : \
1328 CPUID_TO_BOARDID((dp)->dcpu), \
1329 (dp)->dcpu, \
1330 idnfin_str[(dp)->dfin], (dp)->dfin, \
1331 idnfin_str[ns], (ns)); \
1332 (dp)->dfin = (ns); \
1335 #else
1336 #define IDN_FSTATE_TRANSITION(dp, ns) \
1338 IDN_HISTORY_LOG(IDNH_FIN, (dp)->domid, (ns), 0); \
1339 (dp)->dfin = (ns); \
1341 #endif /* DEBUG */
1343 #endif /* !_ASM */
1344 #endif /* _KERNEL */
1346 #ifndef _ASM
1348 * IDN Per-Domain States.
1350 typedef enum {
1351 /* 0 */ IDNDS_CLOSED, /* idle */
1352 /* 1 */ IDNDS_NEGO_PEND, /* link initiating */
1353 /* 2 */ IDNDS_NEGO_SENT, /* link initiated, nego sent */
1354 /* 3 */ IDNDS_NEGO_RCVD, /* link wanted, nego+ack sent */
1355 /* 4 */ IDNDS_CONFIG, /* passing config info, prgm hw */
1356 /* 5 */ IDNDS_CON_PEND, /* connection pending */
1357 /* 6 */ IDNDS_CON_SENT, /* con sent */
1358 /* 7 */ IDNDS_CON_RCVD, /* con sent & received */
1359 /* 8 */ IDNDS_CON_READY, /* ready to establish link */
1360 /* 9 */ IDNDS_CONNECTED, /* established - linked */
1361 /* 10 */ IDNDS_FIN_PEND, /* unlink initiating */
1362 /* 11 */ IDNDS_FIN_SENT, /* unlink initiated, fin sent */
1363 /* 12 */ IDNDS_FIN_RCVD, /* unlink wanted by remote */
1364 /* 13 */ IDNDS_DMAP /* deprogramming hw */
1365 } idn_dstate_t;
1367 #define IDNDS_IS_CLOSED(dp) (((dp)->dstate == IDNDS_CLOSED) || \
1368 ((dp)->dstate == IDNDS_DMAP))
1369 #define IDNDS_IS_CONNECTING(dp) (((dp)->dstate > IDNDS_CLOSED) && \
1370 ((dp)->dstate < IDNDS_CONNECTED))
1371 #define IDNDS_IS_DISCONNECTING(dp) ((dp)->dstate > IDNDS_CONNECTED)
1372 #define IDNDS_CONFIG_DONE(dp) (((dp)->dstate == IDNDS_CLOSED) || \
1373 ((dp)->dstate > IDNDS_CONFIG))
1374 #define IDNDS_SYNC_TYPE(dp) (((dp)->dfin_sync != IDNFIN_SYNC_OFF) ? \
1375 (dp)->dfin_sync : \
1376 ((dp)->dstate < IDNDS_CON_READY) ? \
1377 IDNFIN_SYNC_NO : IDNFIN_SYNC_YES)
1379 #endif /* !_ASM */
1381 #ifdef _KERNEL
1382 #ifndef _ASM
1384 * ---------------------------------------------------------------------
1386 typedef struct idn_timer {
1387 struct idn_timer *t_forw,
1388 *t_back;
1389 struct idn_timerq *t_q;
1391 timeout_id_t t_id;
1392 short t_domid;
1393 short t_onq;
1394 ushort_t t_type;
1395 ushort_t t_subtype;
1396 uint_t t_cookie;
1397 #ifdef DEBUG
1398 hrtime_t t_posttime;
1399 hrtime_t t_exectime;
1400 #endif /* DEBUG */
1401 } idn_timer_t;
1403 #define IDN_TIMER_PUBLIC_COOKIE 0xf
1405 #define IDN_TIMERQ_IS_LOCKED(tq) (MUTEX_HELD(&(tq)->tq_mutex))
1406 #define IDN_TIMERQ_LOCK(tq) (mutex_enter(&(tq)->tq_mutex))
1407 #define IDN_TIMERQ_UNLOCK(tq) (mutex_exit(&(tq)->tq_mutex))
1409 #define IDN_TIMERQ_INIT(tq) (idn_timerq_init(tq))
1410 #define IDN_TIMERQ_DEINIT(tq) (idn_timerq_deinit(tq))
1411 #define IDN_TIMER_ALLOC() (idn_timer_alloc())
1412 #define IDN_TIMER_FREE(tp) (idn_timer_free(tp))
1414 #define IDN_TIMER_START(tq, tp, tim) \
1415 (idn_timer_start((tq), (tp), (tim)))
1416 #define IDN_TIMER_STOP(tq, typ, ck) \
1417 ((void) idn_timer_stop((tq), (typ), (ck)))
1418 #define IDN_TIMER_STOPALL(tp) \
1419 ((void) idn_timer_stopall(tp))
1420 #define IDN_TIMER_GET(tq, typ, tp, ck) \
1422 mutex_enter(&((tq)->tq_mutex)); \
1423 (tp) = idn_timer_get((tq), (typ), (ck)); \
1424 mutex_exit(&((tq)->tq_mutex)); \
1426 #define IDN_TIMER_DEQUEUE(tq, tp) \
1427 (idn_timer_dequeue((tq), (tp)))
1428 #ifdef DEBUG
1429 #define IDN_TIMER_POST(tp) \
1430 ((tp)->t_posttime = gethrtime(), (tp)->t_exectime = 0)
1431 #define IDN_TIMER_EXEC(tp) ((tp)->t_exectime = gethrtime())
1432 #else /* DEBUG */
1433 #define IDN_TIMER_POST(tp)
1434 #define IDN_TIMER_EXEC(tp)
1435 #endif /* DEBUG */
1437 #define IDN_MSGTIMER_START(domid, typ, subtyp, tim, ckp) \
1439 idn_timer_t *_tp; \
1440 char _str[15]; \
1441 ushort_t *_ckp = (ckp); \
1442 inum2str((typ), _str); \
1443 PR_TIMER("msgtimer:%d: START: type = %s (0x%x)\n", \
1444 (domid), _str, (typ)); \
1445 _tp = IDN_TIMER_ALLOC(); \
1446 _tp->t_type = (ushort_t)(typ); \
1447 _tp->t_subtype = (ushort_t)(subtyp); \
1448 _tp->t_domid = (short)(domid); \
1449 _tp->t_cookie = (_ckp) ? *(_ckp) : 0; \
1450 IDN_TIMER_POST(_tp); \
1451 if (_ckp) { \
1452 *(_ckp) = IDN_TIMER_START(&idn_domain[domid].dtimerq, \
1453 _tp, (tim)); \
1454 } else { \
1455 (void) IDN_TIMER_START(&idn_domain[domid].dtimerq, \
1456 _tp, (tim)); \
1459 #define IDN_MSGTIMER_STOP(domid, typ, ck) \
1461 char _str[15]; \
1462 inum2str((typ), _str); \
1463 PR_TIMER("msgtimer:%d: STOP: type = %s (0x%x), " \
1464 "cookie = 0x%x\n", \
1465 (domid), _str, (typ), (ck)); \
1466 IDN_TIMER_STOP(&idn_domain[domid].dtimerq, (typ), (ck)); \
1468 #define IDN_MSGTIMER_GET(dp, typ, tp, ck) \
1469 IDN_TIMER_GET(&(dp)->dtimerq, (typ), (tp), (ck))
1472 * IDN_SLABALLOC_WAITTIME
1473 * Max wait time in ticks that local domains waits for
1474 * master to respond to a slab allocation request. Has
1475 * to be at least as long as wait time for a response to
1476 * the command.
1478 #define IDN_SLABALLOC_WAITTIME ((3 * idn_msg_waittime[IDNP_CMD]) / 2)
1481 * Domain state transition macros.
1483 #ifdef DEBUG
1484 #define IDN_DSTATE_TRANSITION(dp, ns) \
1486 int id; \
1487 hrtime_t tstamp; \
1488 tstamp = TIMESTAMP(); \
1489 ASSERT(RW_WRITE_HELD(&(dp)->drwlock)); \
1490 id = (dp)->domid; \
1491 IDN_HISTORY_LOG(IDNH_DSTATE, id, (ns), \
1492 (uint_t)(dp)->dcpu); \
1493 PR_STATE("DSTATE:%ld:%d: (l=%d, b/p=%d/%d) " \
1494 "%s(%d) -> %s(%d)\n", \
1495 (uint64_t)tstamp, id, \
1496 __LINE__, \
1497 ((dp)->dcpu == IDN_NIL_DCPU) ? \
1498 -1 : CPUID_TO_BOARDID((dp)->dcpu), \
1499 (dp)->dcpu, \
1500 idnds_str[(dp)->dstate], (dp)->dstate, \
1501 idnds_str[ns], (ns)); \
1502 (dp)->dstate = (ns); \
1503 IDNSB_DOMAIN_UPDATE(dp); \
1505 #else
1506 #define IDN_DSTATE_TRANSITION(dp, ns) \
1508 IDN_HISTORY_LOG(IDNH_DSTATE, (dp)->domid, \
1509 (ns), (uint_t)(dp)->dcpu); \
1510 (dp)->dstate = (ns); \
1511 IDNSB_DOMAIN_UPDATE(dp); \
1513 #endif /* DEBUG */
1515 #define IDN_XSTATE_TRANSITION(dp, xs) \
1517 int _xs = (xs); \
1518 (dp)->dxstate = _xs; \
1519 if (_xs != IDNXS_NIL) { \
1520 ASSERT((dp)->dxp); \
1521 IDN_DSTATE_TRANSITION((dp), \
1522 (dp)->dxp->xt_trans[_xs].t_state); \
1527 * ---------------------------------------------------------------------
1528 * IDN Per-Domain Data
1530 * The comment to the right of the respective field represents
1531 * what lock protects that field. If there is no comment then
1532 * no lock is required to access the field.
1533 * ---------------------------------------------------------------------
1536 #define MAXDNAME 32
1538 typedef struct idn_domain {
1539 krwlock_t drwlock;
1541 * Assigned domid for domain. Never
1542 * changes once idn_domain[] is
1543 * initialized. We are guaranteed that
1544 * all domains in IDN will have a
1545 * uniqueue domid in the range (0-15).
1547 int domid;
1548 idn_dstate_t dstate; /* drwlock */
1549 idn_xstate_t dxstate; /* drwlock */
1551 * Gotten from uname -n for local
1552 * domain. Remote domains pass
1553 * theirs during Config phase.
1555 char dname[MAXDNAME]; /* drwlock */
1557 * IDN-wide unique identifier for the
1558 * given domain. This value will be
1559 * the same as the domid.
1561 ushort_t dnetid; /* drwlock */
1562 idn_vote_t dvote; /* drwlock */
1564 * Used during FIN sequenece to
1565 * determine what type of shutdown
1566 * (unlink) we're executing with
1567 * respect to the given domain.
1569 idn_fin_t dfin; /* drwlock */
1571 * A non-zero value for dfin_sync
1572 * indicates that unlink of respective
1573 * domain does not need to be performed
1574 * synchronously among all the IDN
1575 * member domains.
1577 short dfin_sync; /* grwlock */
1579 * Cookie used to determine the
1580 * proper context in which we're
1581 * receiving messages from the given
1582 * domain. Assigned cookies are exchanged
1583 * during initial NEGO messages.
1585 ushort_t dcookie_send; /* drwlock */
1586 ushort_t dcookie_recv; /* drwlock */
1587 short dcookie_err; /* drwlock */
1588 int dcookie_errcnt; /* drwlock */
1590 * Primary target cpu for sending
1591 * messages. Can change to help
1592 * distribute interrupts on receiving
1593 * side.
1595 int dcpu; /* drwlock */
1597 * Used to store dcpu from a previous
1598 * life. Only used when requesting
1599 * a RELINK with a domain we were just
1600 * previously linked with. Thus, it
1601 * does represent a valid cpu in the
1602 * remote domain.
1604 int dcpu_save; /* drwlock */
1606 * Used to store from which cpu the
1607 * last message was received.
1609 int dcpu_last;
1611 * Transition phase area. This field
1612 * points to the proper phase structure
1613 * depending on what stage the given
1614 * domain is in.
1616 idn_xphase_t *dxp; /* drwlock */
1618 * Actual synchronization object for
1619 * the given domain.
1621 idn_syncop_t dsync; /* drwlock & idn.sync.sz_mutex */
1623 * Slab information for given domain.
1624 * If the local domain is a master,
1625 * then this field in each domain is used
1626 * to store which slabs have been assigned
1627 * to given domain. If the local domain
1628 * is a slave, then this information is
1629 * NULL for all remote idn_domain[]
1630 * entries, but for local domain holds
1631 * those slabs assigned to local domain.
1633 smr_slab_t *dslab; /* dslab_rwlock */
1634 short dnslabs; /* dslab_rwlock */
1635 short dslab_state; /* dslab_rwlock */
1636 krwlock_t dslab_rwlock;
1638 * Set of cpus belonging to domain.
1640 cpuset_t dcpuset; /* drwlock */
1642 int dncpus; /* drwlock */
1644 * Index into dcpumap to determine
1645 * which cpu to target next for
1646 * interrupt. Intended to allow fair
1647 * distribution of interrupts on
1648 * remote domain.
1650 uint_t dcpuindex; /* drwlock */
1652 * Quick look-up map of cpus belonging
1653 * to domain. Used to select next target.
1655 uchar_t *dcpumap; /* drwlock */
1657 * Non-zero indicates outstanding
1658 * I/O's to given domain.
1660 int dio; /* drwlock */
1661 int dioerr; /* drwlock */
1663 * Set when we fail to allocate a buffer
1664 * for a domain. Dictates whether to
1665 * reclaim max buffers or not.
1667 lock_t diowanted;
1669 * Set when remote domain does not
1670 * seem to be picking up messages sent
1671 * to it. Non-zero indicates we have
1672 * an outstanding "ping" to domain.
1674 lock_t diocheck; /* drwlock */
1675 short dslabsize; /* drwlock */
1676 uint_t dmtu; /* drwlock */
1678 uint_t dbufsize; /* drwlock */
1679 short dnwrsize; /* drwlock */
1680 lock_t dreclaim_inprogress; /* drwlock */
1681 uchar_t dreclaim_index; /* drwlock */
1683 * The following field is primarily
1684 * used during CFG exchange to keep
1685 * track of certain per-domain information.
1687 union { /* all - drwlock */
1688 struct {
1689 uint_t _dcfgphase : 6;
1690 uint_t _dcfgsnddone : 1;
1691 uint_t _dcfgrcvdone : 1;
1692 uint_t _dcksum : 2;
1693 uint_t _dmaxnets : 6;
1694 uint_t _dmboxpernet : 9;
1695 uint_t _dncfgitems : 6;
1696 uint_t _drecfg : 1;
1697 } _s;
1698 int _dtmp;
1699 } _u;
1701 * Each domain entry maintains a
1702 * timer queue holding timers for
1703 * messages outstanding to that domain.
1705 struct idn_timerq {
1706 int tq_cookie; /* tq_mutex */
1707 kmutex_t tq_mutex;
1708 int tq_count; /* tq_mutex */
1709 idn_timer_t *tq_queue; /* tq_mutex */
1710 } dtimerq;
1712 * dawol is used to keep
1713 * track of AWOL details for
1714 * given domain when it is
1715 * non-responsive.
1717 struct {
1718 int a_count; /* drwlock */
1719 clock_t a_time; /* drwlock */
1720 clock_t a_last; /* drwlock */
1721 clock_t a_msg; /* drwlock */
1722 } dawol;
1724 struct hwconfig {
1725 short dh_nboards; /* drwlock */
1726 short dh_nmcadr; /* drwlock */
1727 boardset_t dh_boardset; /* drwlock */
1728 uint_t dh_mcadr[MAX_BOARDS]; /* drwlock */
1729 } dhw;
1731 * Mailbox information used to
1732 * send/recv messages to given domain.
1734 struct {
1735 kmutex_t m_mutex;
1736 struct idn_mboxtbl *m_tbl; /* m_mutex */
1737 struct idn_mainmbox *m_send; /* m_mutex */
1738 struct idn_mainmbox *m_recv; /* m_mutex */
1739 } dmbox;
1740 } idn_domain_t;
1742 typedef struct idn_timerq idn_timerq_t;
1744 #define dcfgphase _u._s._dcfgphase
1745 #define dcfgsnddone _u._s._dcfgsnddone
1746 #define dcfgrcvdone _u._s._dcfgrcvdone
1747 #define dcksum _u._s._dcksum
1748 #define dmaxnets _u._s._dmaxnets
1749 #define dmboxpernet _u._s._dmboxpernet
1750 #define dncfgitems _u._s._dncfgitems
1751 #define drecfg _u._s._drecfg
1752 #define dbindport _u._dbindport
1753 #define dconnected _u._dconnected
1754 #define dtmp _u._dtmp
1756 #define IDN_DLOCK_EXCL(dd) (rw_enter(&idn_domain[dd].drwlock, RW_WRITER))
1757 #define IDN_DLOCK_SHARED(dd) (rw_enter(&idn_domain[dd].drwlock, RW_READER))
1758 #define IDN_DLOCK_TRY_SHARED(dd) \
1759 (rw_tryenter(&idn_domain[dd].drwlock, \
1760 RW_READER))
1761 #define IDN_DLOCK_DOWNGRADE(dd) (rw_downgrade(&idn_domain[dd].drwlock))
1762 #define IDN_DUNLOCK(dd) (rw_exit(&idn_domain[dd].drwlock))
1763 #define IDN_DLOCK_IS_EXCL(dd) (RW_WRITE_HELD(&idn_domain[dd].drwlock))
1764 #define IDN_DLOCK_IS_SHARED(dd) (RW_READ_HELD(&idn_domain[dd].drwlock))
1765 #define IDN_DLOCK_IS_HELD(dd) (RW_LOCK_HELD(&idn_domain[dd].drwlock))
1767 #define IDN_MBOX_LOCK(dd) (mutex_enter(&idn_domain[dd].dmbox.m_mutex))
1768 #define IDN_MBOX_UNLOCK(dd) (mutex_exit(&idn_domain[dd].dmbox.m_mutex))
1770 #define IDN_RESET_COOKIES(dd) \
1771 (idn_domain[dd].dcookie_send = idn_domain[dd].dcookie_recv = 0)
1773 #define DSLAB_STATE_UNKNOWN 0
1774 #define DSLAB_STATE_LOCAL 1
1775 #define DSLAB_STATE_REMOTE 2
1777 #define DSLAB_READ_HELD(d) RW_READ_HELD(&idn_domain[d].dslab_rwlock)
1778 #define DSLAB_WRITE_HELD(d) RW_WRITE_HELD(&idn_domain[d].dslab_rwlock)
1780 #define DSLAB_LOCK_EXCL(d) \
1781 rw_enter(&idn_domain[d].dslab_rwlock, RW_WRITER)
1782 #define DSLAB_LOCK_SHARED(d) \
1783 rw_enter(&idn_domain[d].dslab_rwlock, RW_READER)
1784 #define DSLAB_LOCK_TRYUPGRADE(d) \
1785 rw_tryupgrade(&idn_domain[d].dslab_rwlock)
1786 #define DSLAB_UNLOCK(d) rw_exit(&idn_domain[d].dslab_rwlock)
1789 * ---------------------------------------------------------------------
1790 * Macro to pick another target for the given domain. This hopefully
1791 * improves performance by better distributing the SSI responsibilities
1792 * at the target domain.
1793 * ---------------------------------------------------------------------
1795 #define BUMP_INDEX(set, index) \
1797 register int p; \
1798 for (p = (index)+1; p < NCPU; p++) \
1799 if (CPU_IN_SET((set), p)) \
1800 break; \
1801 if (p >= NCPU) \
1802 for (p = 0; p <= (index); p++) \
1803 if (CPU_IN_SET((set), p)) \
1804 break; \
1805 if (!CPU_IN_SET((set), p)) { \
1806 uint_t _u32, _l32; \
1807 _u32 = UPPER32_CPUMASK(set); \
1808 _l32 = LOWER32_CPUMASK(set); \
1809 cmn_err(CE_PANIC, \
1810 "IDN: cpu %d not in cpuset 0x%x.%0x\n", \
1811 p, _u32, _l32); \
1813 (index) = p; \
1816 #define IDN_ASSIGN_DCPU(dp, cookie) \
1817 ((dp)->dcpu = (int)((dp)->dcpumap[(cookie) & (NCPU-1)]))
1820 * ---------------------------------------------------------------------
1821 * Atomic increment/decrement, swap, compare-swap functions.
1822 * ---------------------------------------------------------------------
1824 #define ATOMIC_INC(v) atomic_inc_32((uint_t *)&(v))
1825 #define ATOMIC_DEC(v) atomic_dec_32((uint_t *)&(v))
1826 #define ATOMIC_SUB(v, n) atomic_add_32((uint_t *)&(v), -(n))
1827 #define ATOMIC_CAS(a, c, n) atomic_cas_32((uint32_t *)(a), (uint32_t)(c), \
1828 (uint32_t)(n))
1829 #define ATOMIC_SWAPL(a, v) atomic_swap_32((uint32_t *)(a), (uint32_t)(v))
1832 * DMV vector interrupt support.
1834 * A fixed-size circular buffer is maintained as a queue of
1835 * incoming interrupts. The low-level idn_dmv_handler() waits
1836 * for an entry to become FREE and will atomically mark it INUSE.
1837 * Once it has filled in the appropriate fields it will be marked
1838 * as READY. The high-level idn_handler() will be invoked and will
1839 * process all messages in the queue that are READY. Each message
1840 * is marked PROCESS, a protojob job created and filled in, and
1841 * then the interrupt message is marked FREE for use in the next
1842 * interrupt. The iv_state field is used to hold the relevant
1843 * state and is updated atomically.
1845 #define IDN_PIL PIL_8
1846 #define IDN_DMV_PENDING_MAX 128 /* per cpu */
1848 #endif /* !_ASM */
1850 #ifndef _ASM
1853 * The size of this structure must be a power of 2
1854 * so that we can do a simple shift to calculate
1855 * our offset into based on cpuid.
1857 typedef struct idn_dmv_cpu {
1858 uint32_t idn_dmv_current;
1859 int32_t idn_dmv_lostintr;
1860 lock_t idn_dmv_active;
1861 char _padding[(2 * sizeof (uint64_t)) - \
1862 sizeof (uint32_t) - \
1863 sizeof (lock_t) - \
1864 sizeof (int32_t)];
1865 } idn_dmv_cpu_t;
1867 typedef struct idn_dmv_data {
1868 uint64_t idn_soft_inum;
1869 uint64_t idn_dmv_qbase;
1870 idn_dmv_cpu_t idn_dmv_cpu[NCPU];
1871 } idn_dmv_data_t;
1874 * Requirements of the following data structure:
1875 * - MUST be double-word (8 bytes) aligned.
1876 * - _iv_head field MUST start on double-word boundary.
1877 * - iv_xargs0 MUST start on double-word boundary
1878 * with iv_xargs1 immediately following.
1879 * - iv_xargs2 MUST start on double-word boundary
1880 * with iv_xargs3 immediately following.
1882 typedef struct idn_dmv_msg {
1883 uint32_t iv_next; /* offset */
1884 uchar_t iv_inuse;
1885 uchar_t iv_ready;
1886 ushort_t _padding;
1887 uint32_t iv_head : 16;
1888 uint32_t iv_cookie : 16;
1889 uint32_t iv_ver : 8;
1890 uint32_t iv_mtype : 6;
1891 uint32_t iv_atype : 6;
1892 uint32_t iv_domid : 4;
1893 uint32_t iv_cpuid : 8;
1894 uint32_t iv_xargs0;
1895 uint32_t iv_xargs1;
1896 uint32_t iv_xargs2;
1897 uint32_t iv_xargs3;
1898 } idn_dmv_msg_t;
1900 extern uint_t idn_dmv_inum;
1901 extern uint_t idn_soft_inum;
1904 * An IDN-network address has the following format:
1906 * 31......16,15........0
1907 * | channel | dnetid |
1908 * ----------------------
1909 * channel - network interface.
1910 * netid - idn_domain[].dnetid
1912 #define IDN_MAXMAX_NETS 32
1913 #define IDN_BROADCAST_ALLCHAN ((ushort_t)-1)
1914 #define IDN_BROADCAST_ALLNETID ((ushort_t)-1)
1916 typedef union {
1917 struct {
1918 ushort_t chan;
1919 ushort_t netid;
1920 } net;
1921 uint_t netaddr;
1922 } idn_netaddr_t;
1924 #define CHANSET_ALL (~((idn_chanset_t)0))
1925 #define CHANSET(c) \
1926 ((idn_chanset_t)1 << ((c) & 31))
1927 #define CHAN_IN_SET(m, c) \
1928 (((m) & ((idn_chanset_t)1 << ((c) & 31))) != 0)
1929 #define CHANSET_ADD(m, c) \
1930 ((m) |= ((idn_chanset_t)1 << ((c) & 31)))
1931 #define CHANSET_DEL(m, c) \
1932 ((m) &= ~((idn_chanset_t)1 << ((c) & 31)))
1933 #define CHANSET_ZERO(m) ((m) = 0)
1935 typedef enum {
1936 /* 0 */ IDNCHAN_OPEN,
1937 /* 1 */ IDNCHAN_SOFT_CLOSE,
1938 /* 2 */ IDNCHAN_HARD_CLOSE,
1939 /* 3 */ IDNCHAN_OFFLINE,
1940 /* 4 */ IDNCHAN_ONLINE
1941 } idn_chanop_t;
1944 * Retry support.
1946 #define IDN_RETRY_TOKEN(d, x) ((((d) & 0xf) << 16) | \
1947 (0xffff & (uint_t)(x)))
1948 #define IDN_RETRY_TOKEN2DOMID(t) ((int)(((t) >> 16) & 0xf))
1949 #define IDN_RETRY_TOKEN2TYPE(t) ((idn_retry_t)((t) & 0xffff))
1950 #define IDN_RETRY_TYPEALL ((idn_retry_t)0xffff)
1951 #define IDN_RETRY_INTERVAL hz /* 1 sec */
1952 #define IDN_RETRY_RECFG_MULTIPLE 10
1954 #define IDN_RETRYINTERVAL_NEGO (2 * hz)
1955 #define IDN_RETRYINTERVAL_CON (2 * hz)
1956 #define IDN_RETRYINTERVAL_FIN (2 * hz)
1958 typedef struct idn_retry_job {
1959 struct idn_retry_job *rj_prev;
1960 struct idn_retry_job *rj_next;
1961 void (*rj_func)(uint_t token, void *arg);
1962 void *rj_arg;
1963 uint_t rj_token;
1964 short rj_onq;
1965 timeout_id_t rj_id;
1966 } idn_retry_job_t;
1968 #define IDNRETRY_ALLOCJOB() \
1969 ((idn_retry_job_t *)kmem_cache_alloc(idn.retryqueue.rq_cache, KM_SLEEP))
1970 #define IDNRETRY_FREEJOB(j) \
1971 (kmem_cache_free(idn.retryqueue.rq_cache, (void *)(j)))
1973 typedef enum {
1974 /* 0 */ IDNRETRY_NIL = 0,
1975 /* 1 */ IDNRETRY_NEGO,
1976 /* 2 */ IDNRETRY_CON,
1977 /* 3 */ IDNRETRY_CONQ, /* for CON queries */
1978 /* 4 */ IDNRETRY_FIN,
1979 /* 5 */ IDNRETRY_FINQ, /* for FIN queries */
1980 /* 6 */ IDN_NUM_RETRYTYPES
1981 } idn_retry_t;
1984 * ---------------------------------------------------------------------
1986 typedef struct {
1987 int m_domid;
1988 int m_cpuid;
1989 ushort_t m_msgtype;
1990 ushort_t m_acktype;
1991 ushort_t m_cookie;
1992 idn_xdcargs_t m_xargs;
1993 } idn_protomsg_t;
1995 typedef struct idn_protojob {
1996 struct idn_protojob *j_next;
1997 int j_cache;
1998 idn_protomsg_t j_msg;
1999 } idn_protojob_t;
2001 typedef struct idn_protoqueue {
2002 struct idn_protoqueue *q_next;
2003 idn_protojob_t *q_joblist;
2004 idn_protojob_t *q_joblist_tail;
2005 int q_die;
2006 int q_id;
2007 ksema_t *q_morgue;
2008 kthread_id_t q_threadp;
2009 kcondvar_t q_cv;
2010 kmutex_t q_mutex;
2011 } idn_protoqueue_t;
2013 #define IDN_PROTOCOL_NSERVERS 4
2014 #define IDN_PROTOCOL_SERVER_HASH(d) ((d) % idn.nservers)
2015 #define IDN_PROTOJOB_CHUNKS (idn.nservers)
2018 * ---------------------------------------------------------------------
2019 * Data Server definitions.
2021 * idn_datasvr_t - Describes data server thread.
2022 * . ds_id - Per-domain identifier for data server.
2023 * . ds_domid - Domain which data server is handling.
2024 * . ds_state - Flag to enable/disable/terminate
2025 * data server.
2026 * . ds_mboxp - Pointer to data server's (local)
2027 * mailbox to be serviced.
2028 * . ds_waittime - cv_timedwait sleep time before
2029 * checking respective mailbox.
2030 * . ds_threadp - Pointer to data server thread.
2031 * . ds_cv - Condvar for sleeping.
2032 * . ds_morguep - Semaphore for terminating thread.
2034 * idn_mboxhdr_t - Resides in SMR space (MUST be cache_linesize).
2035 * . mh_svr_active - Non-zero indicates data server is
2036 * actively reading mailbox for messages.
2037 * . mh_svr_ready - Non-zero indicates data server has
2038 * allocated and is ready to accept data.
2039 * . mh_cookie - Identifier primarily for debug purposes.
2041 * idn_mboxmsg_t - Entry in the SMR space circular queue use to
2042 * represent a data packet.
2043 * . mm_owner - Non-zero indicates entry is available
2044 * to be processed by receiver's data server.
2045 * . mm_flag - Indicates whether entry needs to be
2046 * reclaimed by the sender. Also holds error
2047 * indications (e.g. bad offset).
2048 * . mm_offset - SMR offset of respective data packet.
2050 * idn_mboxtbl_t - Encapsulation of a per-domain mailbox (SMR space).
2051 * . mt_header - Header information for synchronization.
2052 * . mt_queue - Circular queue of idn_mboxmsg_t entries.
2054 * idn_mainmbox_t - Encapsulation of main SMR recv/send mailboxes.
2055 * . mm_mutex - Protects mm_* entries, enqueuing, and
2056 * dequeuing of messages. Also protects
2057 * updates to the route table pointed to
2058 * by mm_routetbl.
2059 * . mm_count - send: Current number of messages
2060 * enqueued.
2061 * - recv: Cumulative number of messages
2062 * processed.
2063 * . mm_max_count - send: Maximum number of messages
2064 * enqueued per iteration.
2065 * recv: Maximum number of messages
2066 * dequeued per iteration.
2067 * . mm_smr_mboxp - Pointer to SMR (vaddr) space where
2068 * respective mailbox resides.
2069 * ---------------------------------------------------------------------
2071 #define IDN_MBOXHDR_COOKIE_TOP ((uint_t)0xc0c0)
2072 #define IDN_MAKE_MBOXHDR_COOKIE(pd, sd, ch) \
2073 ((IDN_MBOXHDR_COOKIE_TOP << 16) \
2074 | (((uint_t)(pd) & 0xf) << 12) \
2075 | (((uint_t)(sd) & 0xf) << 8) \
2076 | ((uint_t)(ch) & 0xf))
2077 #define IDN_GET_MBOXHDR_COOKIE(mhp) \
2078 ((mhp)->mh_cookie & ~0xff00)
2079 #define VALID_MBOXHDR(mhp, ch, cksum) \
2080 ((IDN_GET_MBOXHDR_COOKIE(mhp) == \
2081 IDN_MAKE_MBOXHDR_COOKIE(0, 0, (ch))) && \
2082 ((cksum) == (*(mhp)).mh_cksum))
2084 * The number of entries in a mailbox queue must be chosen so
2085 * that (IDN_MMBOX_NUMENTRIES * sizeof (idn_mboxmsg_t)) is a multiple
2086 * of a cacheline size (64).
2088 #define IDN_MMBOX_NUMENTRIES IDN_MBOX_PER_NET
2090 * We step through the mailboxes in effectively cacheline size
2091 * incremenents so that the source and receiving cpus are not competing
2092 * for the same cacheline when transmitting/receiving messages into/from
2093 * the mailboxes. The hard requirement is that the step value be even
2094 * since the mailbox size will be chosen odd. This allows us to wraparound
2095 * the mailbox uniquely touching each entry until we've exhausted them
2096 * all at which point we'll end up where we initially started and repeat
2097 * again.
2099 #define IDN_MMBOXINDEX_STEP (((64 / sizeof (idn_mboxmsg_t)) + 1) & 0xfffe)
2100 #define IDN_MMBOXINDEX_INC(i) \
2102 if (((i) += IDN_MMBOXINDEX_STEP) >= IDN_MMBOX_NUMENTRIES) \
2103 (i) -= IDN_MMBOX_NUMENTRIES; \
2106 #define IDN_MMBOXINDEX_DIFF(i, j) \
2107 (((i) >= (j)) ? (((i) - (j)) / IDN_MMBOXINDEX_STEP) \
2108 : ((((i) + IDN_MMBOX_NUMENTRIES) - (j)) / IDN_MMBOXINDEX_STEP))
2111 * Require IDN_MBOXAREA_SIZE <= IDN_SLAB_SIZE so we don't waste
2112 * slab space.
2114 * Each domain maintains a MAX_DOMAIN(16) entry mbox_table. Each
2115 * entry represents a receive mailbox for a possible domain to which
2116 * the given domain may have a connection. The send mailbox for each
2117 * respective domain is given to the local domain at the time of
2118 * connection establishment.
2122 * ---------------------------------------------------------------------
2124 #define IDN_MBOXTBL_SIZE \
2125 (IDNROUNDUP(((IDN_MBOX_PER_NET * sizeof (idn_mboxmsg_t)) \
2126 + sizeof (idn_mboxhdr_t)), IDN_ALIGNSIZE))
2129 * ---------------------------------------------------------------------
2130 * Each domain has idn_max_nets worth of possible mailbox tables
2131 * for each domain to which it might possibly be connected.
2132 * ---------------------------------------------------------------------
2134 #define IDN_MBOXAREA_SIZE \
2135 (IDN_MBOXTBL_SIZE * IDN_MAX_NETS * MAX_DOMAINS * MAX_DOMAINS)
2136 #define IDN_MBOXAREA_OFFSET(d) \
2137 ((d) * IDN_MBOXTBL_SIZE * IDN_MAX_NETS * MAX_DOMAINS)
2140 * ---------------------------------------------------------------------
2141 * Return the base of the mailbox area (set of tables) assigned
2142 * to the given domain id.
2143 * ---------------------------------------------------------------------
2145 #define IDN_MBOXAREA_BASE(m, d) \
2146 ((idn_mboxtbl_t *)(((ulong_t)(m)) + IDN_MBOXAREA_OFFSET(d)))
2149 * ---------------------------------------------------------------------
2150 * Return the pointer to the respective receive mailbox (table set)
2151 * for the given domain id relative to the given base mailbox table.
2152 * ---------------------------------------------------------------------
2154 #define IDN_MBOXTBL_PTR(t, d) \
2155 ((idn_mboxtbl_t *)(((ulong_t)(t)) + ((d) * IDN_MBOXTBL_SIZE \
2156 * IDN_MAX_NETS)))
2158 * ---------------------------------------------------------------------
2159 * Return the pointer to the actual target mailbox based on the
2160 * given channel in the given mailbox table.
2161 * ---------------------------------------------------------------------
2163 #define IDN_MBOXTBL_PTR_CHAN(t, c) \
2164 ((idn_mboxtbl_t *)(((ulong_t)(t)) + ((c) * IDN_MBOXTBL_SIZE)))
2166 #define IDN_MBOXTBL_PTR_INC(t) \
2167 ((t) = (idn_mboxtbl_t *)(((ulong_t)(t)) + IDN_MBOXTBL_SIZE))
2169 #define IDN_MBOXCHAN_INC(i) \
2171 if (++(i) == IDN_MAX_NETS) \
2172 (i) = 0; \
2176 * ---------------------------------------------------------------------
2177 * Return the absolute location within the entire mailbox area
2178 * of the mboxtbl for the given primary and secondary domain and
2179 * channel. Only relevant when done by the master.
2180 * ---------------------------------------------------------------------
2182 #define IDN_MBOXTBL_ABS_PTR(mt, pd, sd, ch) \
2183 (IDN_MBOXTBL_PTR_CHAN( \
2184 IDN_MBOXTBL_PTR( \
2185 IDN_MBOXAREA_BASE((mt), (pd)), \
2186 (sd)), \
2187 (ch)))
2189 #define IDN_BFRAME_SHIFT idn.bframe_shift
2190 #define IDN_BFRAME2OFFSET(bf) ((bf) << IDN_BFRAME_SHIFT)
2191 #define IDN_BFRAME2ADDR(bf) IDN_OFFSET2ADDR(IDN_BFRAME2OFFSET(bf))
2192 #define IDN_OFFSET2BFRAME(off) (((off) >> IDN_BFRAME_SHIFT) & 0xffffff)
2193 #define IDN_ADDR2BFRAME(addr) IDN_OFFSET2BFRAME(IDN_ADDR2OFFSET(addr))
2195 typedef struct idn_mboxmsg {
2196 uint_t ms_owner : 1,
2197 ms_flag : 7,
2198 ms_bframe : 24;
2199 } idn_mboxmsg_t;
2201 typedef idn_mboxmsg_t idn_mboxq_t[1];
2203 #define IDN_CKSUM_MBOX_COUNT (offsetof(idn_mboxhdr_t, mh_svr_ready) / 2)
2205 #define IDN_CKSUM_MBOX(h) \
2206 (IDN_CHECKSUM ? \
2207 idn_cksum((ushort_t *)(h), IDN_CKSUM_MBOX_COUNT) : 0)
2209 typedef struct idn_mboxhdr {
2210 uint_t mh_cookie;
2211 uint_t mh_svr_ready_ptr;
2212 uint_t mh_svr_active_ptr;
2213 ushort_t mh_svr_ready;
2214 ushort_t mh_svr_active;
2216 uint_t _padding[(64 -
2217 (4*sizeof (uint_t)) -
2218 (2*sizeof (ushort_t))) / sizeof (uint_t)];
2220 uint_t mh_cksum;
2221 } idn_mboxhdr_t;
2223 typedef struct idn_mboxtbl {
2224 idn_mboxhdr_t mt_header;
2225 idn_mboxq_t mt_queue;
2226 } idn_mboxtbl_t;
2228 #define IDN_CHAN_DOMAIN_REGISTER(csp, dom) \
2229 (DOMAINSET_ADD((csp)->ch_reg_domset, (dom)))
2231 #define IDN_CHAN_DOMAIN_UNREGISTER(csp, dom) \
2232 (DOMAINSET_DEL((csp)->ch_reg_domset, (dom)))
2234 #define IDN_CHAN_DOMAIN_IS_REGISTERED(csp, dom) \
2235 (DOMAIN_IN_SET((csp)->ch_reg_domset, (dom)))
2237 #define IDN_CHANSVR_SCANSET_ADD_PENDING(csp, dom) \
2239 register int _d; \
2240 register uint64_t _domset; \
2241 (dom) &= MAX_DOMAINS - 1; /* Assumes power of 2 */ \
2242 _domset = 0ull; \
2243 for (_d = 0; _d < (csp)->ch_recv_domcount; _d++) { \
2244 if ((int)(((csp)->ch_recv_scanset_pending >> \
2245 (_d * 4)) & 0xf) == (dom)) \
2246 break; \
2247 else \
2248 _domset = (_domset << 4) | 0xfull; \
2250 if (_d == (csp)->ch_recv_domcount) { \
2251 _domset &= (csp)->ch_recv_scanset_pending; \
2252 _domset |= (uint64_t)(dom) << \
2253 ((csp)->ch_recv_domcount * 4); \
2254 (csp)->ch_recv_domcount++; \
2255 (csp)->ch_recv_scanset_pending = 0ull; \
2256 for (_d = 0; _d < 16; \
2257 _d += (csp)->ch_recv_domcount) { \
2258 (csp)->ch_recv_scanset_pending |= _domset; \
2259 _domset <<= (csp)->ch_recv_domcount * 4; \
2263 #define IDN_CHANSVR_SCANSET_DEL_PENDING(csp, dom) \
2265 register int _d; \
2266 register uint64_t _domset; \
2267 (dom) &= MAX_DOMAINS - 1; /* Assumes power of 2 */ \
2268 _domset = 0ull; \
2269 for (_d = 0; _d < (csp)->ch_recv_domcount; _d++) { \
2270 if ((int)(((csp)->ch_recv_scanset_pending >> \
2271 (_d * 4)) & 0xf) == (dom)) \
2272 break; \
2273 else \
2274 _domset = (_domset << 4) | 0xfull; \
2276 if (_d < (csp)->ch_recv_domcount) { \
2277 _domset &= (csp)->ch_recv_scanset_pending; \
2278 (csp)->ch_recv_scanset_pending >>= 4; \
2279 (csp)->ch_recv_domcount--; \
2280 for (; _d < (csp)->ch_recv_domcount; _d++) \
2281 _domset |= (csp)->ch_recv_scanset_pending &\
2282 (0xfull << (_d * 4)); \
2283 (csp)->ch_recv_scanset_pending = 0ull; \
2284 if ((csp)->ch_recv_domcount) { \
2285 for (_d = 0; _d < 16; \
2286 _d += (csp)->ch_recv_domcount) { \
2287 (csp)->ch_recv_scanset_pending |= \
2288 _domset; \
2289 _domset <<= \
2290 (csp)->ch_recv_domcount * 4; \
2296 #define IDN_CHAN_TRYLOCK_GLOBAL(csp) \
2297 mutex_tryenter(&(csp)->ch_mutex)
2298 #define IDN_CHAN_LOCK_GLOBAL(csp) \
2299 mutex_enter(&(csp)->ch_mutex)
2300 #define IDN_CHAN_UNLOCK_GLOBAL(csp) \
2301 mutex_exit(&(csp)->ch_mutex)
2302 #define IDN_CHAN_GLOBAL_IS_LOCKED(csp) \
2303 (MUTEX_HELD(&(csp)->ch_mutex))
2305 #define IDN_CHAN_LOCAL_IS_LOCKED(csp) \
2306 (MUTEX_HELD(&(csp)->ch_send.c_mutex) && \
2307 MUTEX_HELD(&(csp)->ch_recv.c_mutex))
2308 #define IDN_CHAN_LOCK_LOCAL(csp) \
2309 (mutex_enter(&(csp)->ch_recv.c_mutex, \
2310 mutex_enter(&(csp)->ch_send.c_mutex))
2311 #define IDN_CHAN_UNLOCK_LOCAL(csp) \
2312 (mutex_exit(&(csp)->ch_send.c_mutex), \
2313 mutex_exit(&(csp)->ch_recv.c_mutex))
2315 #define IDN_CHAN_RECV_IS_LOCKED(csp) \
2316 (MUTEX_HELD(&(csp)->ch_recv.c_mutex))
2317 #define IDN_CHAN_TRYLOCK_RECV(csp) \
2318 (mutex_tryenter(&(csp)->ch_recv.c_mutex))
2319 #define IDN_CHAN_LOCK_RECV(csp) \
2320 (mutex_enter(&(csp)->ch_recv.c_mutex))
2321 #define IDN_CHAN_UNLOCK_RECV(csp) \
2322 (mutex_exit(&(csp)->ch_recv.c_mutex))
2324 #define IDN_CHAN_SEND_IS_LOCKED(csp) \
2325 (MUTEX_HELD(&(csp)->ch_send.c_mutex))
2326 #define IDN_CHAN_TRYLOCK_SEND(csp) \
2327 (mutex_tryenter(&(csp)->ch_send.c_mutex))
2328 #define IDN_CHAN_LOCK_SEND(csp) \
2329 (mutex_enter(&(csp)->ch_send.c_mutex))
2330 #define IDN_CHAN_UNLOCK_SEND(csp) \
2331 (mutex_exit(&(csp)->ch_send.c_mutex))
2334 * A channel table is an array of pointers to mailboxes
2335 * for the respective domains for the given channel.
2336 * Used a cache for the frequently used items. Respective
2337 * fields in mainmbox are updated just prior to sleeping.
2341 * Reading c_state requires either c_mutex or ch_mutex.
2342 * Writing c_state requires both c_mutex and ch_mutex in the order:
2343 * ch_mutex
2344 * c_mutex
2346 typedef struct idn_chaninfo {
2347 kmutex_t c_mutex;
2348 uchar_t c_state; /* protected by c_mutex */
2349 uchar_t c_checkin; /* asynchronous flag */
2350 kcondvar_t c_cv;
2351 ushort_t c_waiters; /* protected by c_mutex */
2352 ushort_t c_inprogress; /* protected by c_mutex */
2353 } idn_chaninfo_t;
2356 * Reading/Writing ch_state requires ch_mutex.
2357 * When updating both recv and send c_state's for the locks
2358 * must be grabbed in the following order:
2359 * ch_mutex
2360 * ch_recv.c_mutex
2361 * ch_send.c_mutex
2362 * This order is necessary to prevent deadlocks.
2363 * In general ch_state is intended to represent c_state of
2364 * individual send/recv sides. During state transitions the
2365 * ch_state and c_state values may be slightly different,
2366 * but eventually should end up identical.
2368 typedef struct idn_chansvr {
2369 uchar_t ch_id;
2370 uchar_t ch_state; /* protected by ch_mutex */
2371 lock_t ch_initlck;
2372 lock_t ch_actvlck;
2373 domainset_t ch_reg_domset;
2374 kmutex_t ch_mutex;
2376 idn_chaninfo_t ch_send;
2377 int _padding2[(64 -
2378 (2*sizeof (uchar_t)) - (2*sizeof (lock_t)) -
2379 sizeof (uint_t) - sizeof (kmutex_t) -
2380 sizeof (idn_chaninfo_t)) / sizeof (int)];
2382 idn_chaninfo_t ch_recv;
2384 uint64_t ch_recv_scanset;
2385 uint64_t ch_recv_scanset_pending;
2387 domainset_t ch_recv_domset;
2388 domainset_t ch_recv_domset_pending;
2389 short ch_recv_domcount;
2390 kcondvar_t ch_recv_cv;
2391 int ch_recv_waittime;
2392 int ch_recv_changed;
2394 kthread_id_t ch_recv_threadp;
2395 ksema_t *ch_recv_morguep;
2396 int ch_bound_cpuid;
2397 int ch_bound_cpuid_pending;
2398 } idn_chansvr_t;
2400 typedef struct idn_mainmbox {
2401 kmutex_t mm_mutex;
2402 short mm_channel;
2403 short mm_domid;
2404 ushort_t mm_flags;
2405 short mm_type;
2407 idn_chansvr_t *mm_csp; /* non-NULL indicates reg'd */
2408 int mm_count;
2409 int mm_dropped;
2410 idn_mboxtbl_t *mm_smr_mboxp; /* SMR vaddr */
2412 ushort_t *mm_smr_activep; /* SMR pointer */
2413 ushort_t *mm_smr_readyp; /* SMR pointer */
2414 int mm_qiget; /* next msg to get */
2415 int mm_qiput; /* next slot to put msg */
2416 } idn_mainmbox_t;
2419 * mm_flags
2421 #define IDNMMBOX_FLAG_CORRUPTED 0x01
2423 * mm_type
2425 #define IDNMMBOX_TYPE_RECV 0x1
2426 #define IDNMMBOX_TYPE_SEND 0x2
2428 #define IDNMBOX_IS_RECV(m) ((m) == IDNMMBOX_TYPE_RECV)
2429 #define IDNMBOX_IS_SEND(m) ((m) == IDNMMBOX_TYPE_SEND)
2432 * Period between sending wakeup xdc's to remote domain.
2434 #define IDN_CHANNEL_WAKEUP_PERIOD (hz >> 1)
2436 * ms_flag bit values.
2438 #define IDN_MBOXMSG_FLAG_RECLAIM 0x1 /* needs to be reclaimed */
2439 #define IDN_MBOXMSG_FLAG_INPROCESS 0x2
2440 #define IDN_MBOXMSG_FLAG_ERR_BADOFFSET 0x4
2441 #define IDN_MBOXMSG_FLAG_ERR_NOMBOX 0x8
2442 #define IDN_MBOXMSG_FLAG_ERRMASK 0xc
2444 * ch_state/c_state bit values.
2446 #define IDN_CHANSVC_STATE_ATTACHED 0x01
2447 #define IDN_CHANSVC_STATE_ENABLED 0x02
2448 #define IDN_CHANSVC_STATE_ACTIVE 0x04
2449 #define IDN_CHANSVC_STATE_FLUSH 0x10
2450 #define IDN_CHANSVC_STATE_CORRUPTED 0x20
2451 #define IDN_CHANSVC_STATE_MASK 0x07 /* ATTACHED/ENABLED/ACTIVE */
2453 #define IDN_CHANSVC_PENDING_BITS (IDN_CHANSVC_STATE_ATTACHED | \
2454 IDN_CHANSVC_STATE_ENABLED)
2457 * GLOBAL
2459 #define IDN_CHANNEL_IS_ATTACHED(csp) \
2460 ((csp)->ch_state & IDN_CHANSVC_STATE_ATTACHED)
2461 #define IDN_CHANNEL_IS_DETACHED(csp) \
2462 (!IDN_CHANNEL_IS_ATTACHED(csp))
2463 #define IDN_CHANNEL_IS_PENDING(csp) \
2464 (((csp)->ch_state & IDN_CHANSVC_STATE_MASK) == \
2465 IDN_CHANSVC_PENDING_BITS)
2466 #define IDN_CHANNEL_IS_ACTIVE(csp) \
2467 ((csp)->ch_state & IDN_CHANSVC_STATE_ACTIVE)
2468 #define IDN_CHANNEL_IS_ENABLED(csp) \
2469 ((csp)->ch_state & IDN_CHANSVC_STATE_ENABLED)
2471 * SEND
2473 #define IDN_CHANNEL_IS_SEND_ACTIVE(csp) \
2474 ((csp)->ch_send.c_state & IDN_CHANSVC_STATE_ACTIVE)
2476 * RECV
2478 #define IDN_CHANNEL_IS_RECV_ACTIVE(csp) \
2479 ((csp)->ch_recv.c_state & IDN_CHANSVC_STATE_ACTIVE)
2480 #define IDN_CHANNEL_IS_RECV_CORRUPTED(csp) \
2481 ((csp)->ch_recv.c_state & IDN_CHANSVC_STATE_CORRUPTED)
2484 #define IDN_CHAN_SEND_INPROGRESS(csp) ((csp)->ch_send.c_inprogress++)
2485 #define IDN_CHAN_SEND_DONE(csp) \
2487 ASSERT((csp)->ch_send.c_inprogress > 0); \
2488 if ((--((csp)->ch_send.c_inprogress) == 0) && \
2489 ((csp)->ch_send.c_waiters != 0)) \
2490 cv_broadcast(&(csp)->ch_send.c_cv); \
2492 #define IDN_CHAN_RECV_INPROGRESS(csp) ((csp)->ch_recv.c_inprogress++)
2493 #define IDN_CHAN_RECV_DONE(csp) \
2495 ASSERT((csp)->ch_recv.c_inprogress > 0); \
2496 if ((--((csp)->ch_recv.c_inprogress) == 0) && \
2497 ((csp)->ch_recv.c_waiters != 0)) \
2498 cv_broadcast(&(csp)->ch_recv.c_cv); \
2501 #define IDN_CHANSVC_MARK_ATTACHED(csp) \
2502 ((csp)->ch_state = IDN_CHANSVC_STATE_ATTACHED)
2503 #define IDN_CHANSVC_MARK_DETACHED(csp) \
2504 ((csp)->ch_state = 0)
2505 #define IDN_CHANSVC_MARK_PENDING(csp) \
2506 ((csp)->ch_state |= IDN_CHANSVC_STATE_ENABLED)
2507 #define IDN_CHANSVC_MARK_DISABLED(csp) \
2508 ((csp)->ch_state &= ~IDN_CHANSVC_STATE_ENABLED)
2509 #define IDN_CHANSVC_MARK_ACTIVE(csp) \
2510 ((csp)->ch_state |= IDN_CHANSVC_STATE_ACTIVE)
2511 #define IDN_CHANSVC_MARK_IDLE(csp) \
2512 ((csp)->ch_state &= ~IDN_CHANSVC_STATE_ACTIVE)
2514 #define IDN_CHANSVC_MARK_RECV_ACTIVE(csp) \
2515 ((csp)->ch_recv.c_state |= IDN_CHANSVC_STATE_ACTIVE)
2516 #define IDN_CHANSVC_MARK_RECV_CORRUPTED(csp) \
2517 ((csp)->ch_recv.c_state |= IDN_CHANSVC_STATE_CORRUPTED)
2518 #define IDN_CHANSVC_MARK_SEND_ACTIVE(csp) \
2519 ((csp)->ch_send.c_state |= IDN_CHANSVC_STATE_ACTIVE)
2521 typedef enum {
2522 IDNCHAN_ACTION_DETACH, /* DETACH (ATTACHED = 0) */
2523 IDNCHAN_ACTION_STOP, /* DISABLE (ENABLED = 0) */
2524 IDNCHAN_ACTION_SUSPEND, /* IDLE (ACTIVE = 0) */
2525 IDNCHAN_ACTION_RESUME,
2526 IDNCHAN_ACTION_RESTART,
2527 IDNCHAN_ACTION_ATTACH
2528 } idn_chanaction_t;
2530 #define IDN_CHANNEL_SUSPEND(c, w) \
2531 (idn_chan_action((c), IDNCHAN_ACTION_SUSPEND, (w)))
2532 #define IDN_CHANNEL_RESUME(c) \
2533 (idn_chan_action((c), IDNCHAN_ACTION_RESUME, 0))
2534 #define IDN_CHANNEL_STOP(c, w) \
2535 (idn_chan_action((c), IDNCHAN_ACTION_STOP, (w)))
2536 #define IDN_CHANNEL_RESTART(c) \
2537 (idn_chan_action((c), IDNCHAN_ACTION_RESTART, 0))
2538 #define IDN_CHANNEL_DETACH(c, w) \
2539 (idn_chan_action((c), IDNCHAN_ACTION_DETACH, (w)))
2540 #define IDN_CHANNEL_ATTACH(c) \
2541 (idn_chan_action((c), IDNCHAN_ACTION_ATTACH, 0))
2544 * ds_waittime range values.
2545 * When a packet arrives the waittime starts at MIN and gradually
2546 * shifts up to MAX until another packet arrives. If still no
2547 * packet arrives then we go to a hard sleep
2549 #define IDN_NETSVR_SPIN_COUNT idn_netsvr_spin_count
2550 #define IDN_NETSVR_WAIT_MIN idn_netsvr_wait_min
2551 #define IDN_NETSVR_WAIT_MAX idn_netsvr_wait_max
2552 #define IDN_NETSVR_WAIT_SHIFT idn_netsvr_wait_shift
2555 * ---------------------------------------------------------------------
2556 * IDN Global Data
2558 * The comment to the right of the respective field represents
2559 * what lock protects that field. If there is no comment then
2560 * no lock is required to access the field.
2561 * ---------------------------------------------------------------------
2563 typedef struct idn_global { /* protected by... */
2564 krwlock_t grwlock;
2566 * Global state of IDN w.r.t.
2567 * the local domain.
2569 idn_gstate_t state; /* grwlock */
2571 * Version of the IDN driver.
2572 * Is passed in DMV header so that
2573 * other domains can validate they
2574 * support protocol used by local
2575 * domain.
2577 int version;
2579 * Set to 1 if SMR region properly
2580 * allocated and available.
2582 int enabled;
2584 * Local domains "domain id".
2586 int localid;
2588 * Domain id of the Master domain.
2589 * Set to IDN_NIL_DOMID if none
2590 * currently exists.
2592 int masterid; /* grwlock */
2594 * Primarily used during Reconfiguration
2595 * to track the expected new Master.
2596 * Once the current IDN is dismantled
2597 * the local domain will attempt to
2598 * connect to this new domain.
2600 int new_masterid; /* grwlock */
2602 * Number of protocol servers configured.
2604 int nservers;
2606 dev_info_t *dip;
2608 struct {
2610 * dmv_inum
2611 * Interrupt number assigned by
2612 * DMV subsystem to IDN's DMV
2613 * handler.
2614 * soft_inum
2615 * Soft interrupt number assigned
2616 * by OS (add_softintr) for Soft
2617 * interrupt dispatched by DMV
2618 * handler.
2620 uint_t dmv_inum;
2621 uint64_t soft_inum;
2622 caddr_t dmv_data;
2623 size_t dmv_data_len;
2624 } intr;
2626 * first_swlink
2627 * Used as synchronization to
2628 * know whether channels need
2629 * to be activated or not.
2630 * first_hwlink
2631 * Used as mechanism to determine
2632 * whether local domain needs
2633 * to publicize its SMR, assuming
2634 * it is the Master.
2635 * first_hwmaster
2636 * Domainid of the domain that
2637 * was the master at the time
2638 * the hardware was programmed.
2639 * We need to keep this so that
2640 * we deprogram with respect to
2641 * the correct domain that the
2642 * hardware was originally
2643 * programmed to.
2645 lock_t first_swlink;
2646 lock_t first_hwlink;
2647 short first_hwmasterid;
2649 * The xmit* fields are used to set-up a background
2650 * thread to monitor when a channel is ready to be
2651 * enabled again. This is necessary since IDN
2652 * can't rely on hardware to interrupt it when
2653 * things are ready to go. We need this ability
2654 * to wakeup our STREAMS queues.
2655 * Criteria for reenabling queues.
2656 * gstate == IDNGS_ONLINE
2657 * channel = !check-in
2658 * buffers are available
2660 * xmit_chanset_wanted
2661 * Indicates which channels wish to have
2662 * their queues reenabled when ready.
2663 * xmit_tid
2664 * Timeout-id of monitor.
2666 kmutex_t xmit_lock;
2667 idn_chanset_t xmit_chanset_wanted; /* xmit_lock */
2668 timeout_id_t xmit_tid; /* xmit_lock */
2670 struct {
2672 * ready
2673 * Indicates SMR region allocated
2674 * and available from OBP.
2675 * vaddr
2676 * Virtual address assigned to SMR.
2677 * locpfn
2678 * Page Frame Number associated
2679 * with local domain's SMR.
2680 * rempfn
2681 * Page Frame Number associated
2682 * with remote (Master) domain's SMR.
2683 * rempfnlim
2684 * PFN past end of remote domain's
2685 * SMR.
2686 * prom_paddr/prom_size
2687 * Physical address and size of
2688 * SMR that were assigned by OBP.
2690 int ready;
2691 caddr_t vaddr;
2692 pfn_t locpfn;
2693 pfn_t rempfn; /* grwlock */
2695 pfn_t rempfnlim; /* grwlock */
2696 uint64_t prom_paddr;
2698 uint64_t prom_size;
2699 } smr;
2702 * idnsb_mutex
2703 * Protects access to IDN's
2704 * sigblock area.
2705 * idnsb_eventp
2706 * IDN's private area in sigblock
2707 * used for signaling events
2708 * regarding IDN state to SSP.
2709 * idnsb
2710 * Area within IDN's private
2711 * sigblock area used for tracking
2712 * certain IDN state which might
2713 * be useful during arbstop
2714 * conditions (if caused by IDN!).
2716 kmutex_t idnsb_mutex;
2717 idnsb_event_t *idnsb_eventp;
2718 idnsb_t *idnsb;
2720 struct sigbintr {
2722 * sb_mutex
2723 * Protects sigbintr elements
2724 * to synchronize execution of
2725 * sigblock (IDN) mailbox handling.
2726 * sb_cpuid
2727 * Cpu whose sigblock mailbox
2728 * originally received IDN request
2729 * from SSP. Necessary to know
2730 * where to put response.
2731 * sb_busy
2732 * Flag indicating state of
2733 * sigblock handler thread.
2734 * Synchronize activity between
2735 * SSP and current IDN requests that
2736 * are in progress.
2737 * sb_cv
2738 * Condition variable for sigblock
2739 * handler thread to wait on.
2740 * sb_inum
2741 * Soft interrupt number assigned
2742 * by OS to handle soft interrupt
2743 * request make by low-level (IDN)
2744 * sigblock handler to dispatch actual
2745 * processing of sigblock (mailbox)
2746 * request.
2748 kmutex_t sb_mutex;
2749 uchar_t sb_cpuid; /* sigbintr.sb_mutex */
2750 uchar_t sb_busy; /* sigbintr.sb_mutex */
2751 kcondvar_t sb_cv; /* sigbintr.sb_mutex */
2752 uint64_t sb_inum; /* sigbintr.sb_mutex */
2753 } sigbintr;
2756 * struprwlock, strup, sip, siplock
2757 * Standard network streams
2758 * handling structures to manage
2759 * instances of IDN driver.
2761 krwlock_t struprwlock;
2762 struct idnstr *strup; /* struprwlock */
2764 struct idn *sip; /* siplock */
2765 kmutex_t sipwenlock;
2766 kmutex_t siplock;
2769 * Area where IDN maintains its kstats.
2771 kstat_t *ksp;
2773 * Number of domains that local domain
2774 * has "open".
2776 int ndomains; /* grwlock */
2778 * Number of domains that local domain
2779 * has registered as non-responsive.
2781 int nawols; /* grwlock */
2783 * Number of network channels (interfaces)
2784 * which are currently active.
2786 int nchannels; /* grwlock */
2788 * Bitmask representing channels
2789 * that are currently active.
2791 idn_chanset_t chanset; /* grwlock */
2793 * Array of channel (network/data) servers
2794 * that have been created. Not necessarily
2795 * all active.
2797 idn_chansvr_t *chan_servers; /* elmts = ch_mutex */
2799 * Pointer to sigblock handler thread
2800 * which ultimately processes SSP
2801 * IDN requests.
2803 kthread_id_t sigb_threadp;
2805 * Pointer to area used by Master
2806 * to hold mailbox structures.
2807 * Actual memory is in SMR.
2809 idn_mboxtbl_t *mboxarea; /* grwlock */
2811 struct {
2813 * IDN_SYNC_LOCK - Provides serialization
2814 * mechanism when performing synchronous
2815 * operations across domains.
2817 kmutex_t sz_mutex;
2819 * Actual synchronization zones for
2820 * CONNECT/DISCONNECT phases.
2822 idn_synczone_t sz_zone[IDN_SYNC_NUMZONE];
2823 } sync; /* sz_mutex */
2825 struct {
2827 * ds_trans_on
2828 * Set of domains which are trying
2829 * to establish a link w/local.
2830 * ds_ready_on
2831 * Set of domains which local knows
2832 * are ready for linking, but has
2833 * not yet confirmed w/peers.
2834 * ds_connected
2835 * Set of domains that local has
2836 * confirmed as being ready.
2837 * ds_trans_off
2838 * Set of domains which are trying
2839 * to unlink from local.
2840 * ds_ready_off
2841 * Set of domains which local knows
2842 * are ready for unlink, but has
2843 * not yet confirmed w/peers.
2844 * ds_relink
2845 * Set of domains we're expecting
2846 * to relink with subsequent to
2847 * a RECONFIG (new master selection).
2848 * ds_hwlinked
2849 * Set of domains for which local
2850 * has programmed its hardware.
2851 * ds_flush
2852 * Set of domains requiring that
2853 * local flush its ecache prior
2854 * to unlinking.
2855 * ds_awol
2856 * Set of domains believed to be
2857 * AWOL - haven't responded to
2858 * any queries.
2859 * ds_hitlist
2860 * Set of domains which local domain
2861 * is unlinking from and wishes to ignore
2862 * any extraneous indirect link requests
2863 * from other domains, e.g. during a
2864 * Reconfig.
2866 domainset_t ds_trans_on; /* sz_mutex */
2867 domainset_t ds_ready_on; /* sz_mutex */
2869 domainset_t ds_connected; /* sz_mutex */
2870 domainset_t ds_trans_off; /* sz_mutex */
2872 domainset_t ds_ready_off; /* sz_mutex */
2873 domainset_t ds_relink; /* sz_mutex */
2875 domainset_t ds_hwlinked; /* sz_mutex */
2876 domainset_t ds_flush; /* sz_mutex */
2878 domainset_t ds_awol; /* sz_mutex */
2879 domainset_t ds_hitlist; /* sz_mutex */
2880 } domset;
2882 * Bitmask identifying all cpus in
2883 * the local IDN.
2885 cpuset_t dc_cpuset;
2887 * Bitmask identifying all boards in
2888 * the local IDN.
2890 boardset_t dc_boardset;
2892 struct dopers {
2894 * Waiting area for IDN requests,
2895 * i.e. link & unlinks. IDN requests
2896 * are performed asynchronously so
2897 * we need a place to wait until the
2898 * operation has completed.
2900 * dop_domset
2901 * Identifies which domains the
2902 * current waiter is interested in.
2903 * dop_waitcount
2904 * Number of waiters in the room.
2905 * dop_waitlist
2906 * Actual waiting area.
2907 * dop_freelist
2908 * Freelist (small cache) of
2909 * structs for waiting area.
2911 kmutex_t dop_mutex;
2912 kcondvar_t dop_cv; /* dop_mutex */
2913 domainset_t dop_domset; /* dop_mutex */
2914 int dop_waitcount; /* dop_mutex */
2915 dop_waitlist_t *dop_waitlist; /* dop_mutex */
2916 dop_waitlist_t *dop_freelist; /* dop_mutex */
2917 /* dop_mutex */
2918 dop_waitlist_t _dop_wcache[IDNOP_CACHE_SIZE];
2919 } *dopers;
2921 struct {
2923 * Protocol Server:
2925 * p_server
2926 * Linked list of queues
2927 * describing protocol
2928 * servers in use.
2929 * p_jobpool
2930 * Kmem cache of structs
2931 * used to enqueue protocol
2932 * jobs for protocol servers.
2933 * p_morgue
2934 * Synchronization (check-in)
2935 * area used when terminating
2936 * protocol servers (threads).
2938 struct idn_protoqueue *p_serverq;
2939 kmem_cache_t *p_jobpool;
2940 ksema_t p_morgue;
2941 } protocol;
2943 struct idn_retry_queue {
2945 * rq_jobs
2946 * Queue of Retry jobs
2947 * that are outstanding.
2948 * rq_count
2949 * Number of jobs on retry
2950 * queue.
2951 * rq_cache
2952 * Kmem cache for structs
2953 * used to describe retry
2954 * jobs.
2956 idn_retry_job_t *rq_jobs; /* rq_mutex */
2957 int rq_count; /* rq_mutex */
2958 kmutex_t rq_mutex; /* rq_mutex */
2960 kcondvar_t rq_cv; /* rq_mutex */
2961 kmem_cache_t *rq_cache;
2962 } retryqueue;
2964 struct slabpool {
2966 * Slabpool:
2968 * ntotslabs
2969 * Total number of slabs
2970 * in SMR (free & in-use).
2971 * npools
2972 * Number of pools available
2973 * in list. One smr_slabtbl
2974 * exists for each pool.
2976 int ntotslabs;
2977 int npools;
2978 struct smr_slabtbl {
2980 * sarray
2981 * Array of slab structs
2982 * representing slabs in SMR.
2983 * nfree
2984 * Number of slabs actually
2985 * available in sarray.
2986 * nslabs
2987 * Number of slabs represented
2988 * in sarray (free & in-use).
2990 smr_slab_t *sarray;
2991 int nfree;
2992 int nslabs;
2993 } *pool;
2995 * Holds array of smr_slab_t structs kmem_alloc'd
2996 * for slabpool.
2998 smr_slab_t *savep;
2999 } *slabpool;
3001 struct slabwaiter {
3003 * Waiting area for threads
3004 * requesting slab allocations.
3005 * Used by Slaves for all requests,
3006 * but used by Master only for
3007 * redundant requests, i.e. multiple
3008 * requests on behalf of the same
3009 * domain. One slabwaiter area
3010 * exist for each possible domain.
3012 * w_nwaiters
3013 * Number of threads waiting
3014 * in waiting area.
3015 * w_done
3016 * Flag to indicate that
3017 * allocation request has
3018 * completed.
3019 * w_serrno
3020 * Non-zero indicates an
3021 * errno value to represent
3022 * error that occurred during
3023 * attempt to allocate slab.
3024 * w_closed
3025 * Indicates that waiting area is
3026 * closed and won't allow any new
3027 * waiters. This occurs during
3028 * the small window where we're
3029 * trying to suspend a channel.
3030 * w_cv
3031 * Condvar for waiting on.
3032 * w_sp
3033 * Holds slab structure of
3034 * successfully allocated slab.
3036 kmutex_t w_mutex;
3037 short w_nwaiters; /* w_mutex */
3038 short w_done; /* w_mutex */
3039 short w_serrno; /* w_mutex */
3040 short w_closed; /* w_mutex */
3041 kcondvar_t w_cv; /* w_mutex */
3042 smr_slab_t *w_sp; /* w_mutex */
3043 } *slabwaiter;
3045 * Kmem cache used for allocating
3046 * timer structures for outstanding
3047 * IDN requests.
3049 kmem_cache_t *timer_cache;
3051 * Effectively constant used in
3052 * translating buffer frames in
3053 * mailbox message frames to
3054 * offsets within SMR.
3056 int bframe_shift;
3057 } idn_global_t;
3059 typedef struct idn_retry_queue idn_retry_queue_t;
3061 #define IDN_GET_MASTERID() (idn.masterid)
3062 #define IDN_SET_MASTERID(mid) \
3064 int _mid = (mid); \
3065 mutex_enter(&idn.idnsb_mutex); \
3066 if (idn.idnsb) { \
3067 idn.idnsb->id_pmaster_board = \
3068 idn.idnsb->id_master_board; \
3069 if (_mid == IDN_NIL_DOMID) \
3070 idn.idnsb->id_master_board = (uchar_t)0xff; \
3071 else \
3072 idn.idnsb->id_master_board = \
3073 (uchar_t)idn_domain[_mid].dvote.v.board; \
3075 mutex_exit(&idn.idnsb_mutex); \
3076 IDN_HISTORY_LOG(IDNH_MASTERID, _mid, idn.masterid, 0); \
3077 PR_STATE("%d: MASTERID %d -> %d\n", __LINE__, \
3078 idn.masterid, _mid); \
3079 idn.masterid = _mid; \
3081 #define IDN_GET_NEW_MASTERID() (idn.new_masterid)
3082 #define IDN_SET_NEW_MASTERID(mid) \
3084 PR_STATE("%d: NEW MASTERID %d -> %d\n", __LINE__, \
3085 idn.new_masterid, (mid)); \
3086 idn.new_masterid = (mid); \
3089 #define IDN_GLOCK_EXCL() (rw_enter(&idn.grwlock, RW_WRITER))
3090 #define IDN_GLOCK_SHARED() (rw_enter(&idn.grwlock, RW_READER))
3091 #define IDN_GLOCK_TRY_SHARED() (rw_tryenter(&idn.grwlock, RW_READER))
3092 #define IDN_GLOCK_DOWNGRADE() (rw_downgrade(&idn.grwlock))
3093 #define IDN_GUNLOCK() (rw_exit(&idn.grwlock))
3094 #define IDN_GLOCK_IS_EXCL() (RW_WRITE_HELD(&idn.grwlock))
3095 #define IDN_GLOCK_IS_SHARED() (RW_READ_HELD(&idn.grwlock))
3096 #define IDN_GLOCK_IS_HELD() (RW_LOCK_HELD(&idn.grwlock))
3098 #define IDN_SYNC_LOCK() (mutex_enter(&idn.sync.sz_mutex))
3099 #define IDN_SYNC_TRYLOCK() (mutex_tryenter(&idn.sync.sz_mutex))
3100 #define IDN_SYNC_UNLOCK() (mutex_exit(&idn.sync.sz_mutex))
3101 #define IDN_SYNC_IS_LOCKED() (MUTEX_HELD(&idn.sync.sz_mutex))
3104 * Macro to reset some globals necessary in preparing
3105 * for initialization of HW for IDN.
3107 #define IDN_PREP_HWINIT() \
3109 ASSERT(IDN_GLOCK_IS_EXCL()); \
3110 lock_clear(&idn.first_swlink); \
3111 lock_clear(&idn.first_hwlink); \
3112 idn.first_hwmasterid = (short)IDN_NIL_DOMID; \
3116 * Return values of idn_send_data.
3118 #define IDNXMIT_OKAY 0 /* xmit successful */
3119 #define IDNXMIT_LOOP 1 /* loopback */
3120 #define IDNXMIT_DROP 2 /* drop packet */
3121 #define IDNXMIT_RETRY 3 /* retry packet (requeue and qenable) */
3122 #define IDNXMIT_REQUEUE 4 /* requeue packet, but don't qenable */
3125 * ---------------------------------------------------------------------
3126 * ss_rwlock must be acquired _before_ any idn_domain locks are
3127 * acquired if both structs need to be accessed.
3128 * idn.struprwlock is acquired when traversing IDN's strup list
3129 * and when adding or deleting entries.
3131 * ss_nextp Linked list of streams.
3132 * ss_rq Respective read queue.
3133 * ss_sip Attached device.
3134 * ss_state Current DL state.
3135 * ss_sap Bound SAP.
3136 * ss_flags Misc. flags.
3137 * ss_mccount # enabled multicast addrs.
3138 * ss_mctab Table of multicast addrs.
3139 * ss_minor Minor device number.
3140 * ss_rwlock Protects ss_linkup fields and DLPI state machine.
3141 * ss_linkup Boolean flag indicating whether particular (domain) link
3142 * is up.
3143 * ---------------------------------------------------------------------
3145 struct idnstr { /* gets shoved into q_ptr */
3146 struct idnstr *ss_nextp;
3147 queue_t *ss_rq;
3148 struct idn *ss_sip;
3149 t_uscalar_t ss_state;
3150 t_uscalar_t ss_sap;
3151 uint_t ss_flags;
3152 uint_t ss_mccount;
3153 struct ether_addr *ss_mctab;
3154 minor_t ss_minor;
3155 krwlock_t ss_rwlock;
3159 * idnstr.ss_flags - Per-stream flags
3161 #define IDNSFAST 0x01 /* "M_DATA fastpath" mode */
3162 #define IDNSRAW 0x02 /* M_DATA plain raw mode */
3163 #define IDNSALLPHYS 0x04 /* "promiscuous mode" */
3164 #define IDNSALLMULTI 0x08 /* enable all multicast addresses */
3165 #define IDNSALLSAP 0x10 /* enable all ether type values */
3168 * Maximum number of multicast address per stream.
3170 #define IDNMAXMC 64
3171 #define IDNMCALLOC (IDNMAXMC * sizeof (struct ether_addr))
3174 * Full DLSAP address length (in struct dladdr format).
3176 #define IDNADDRL (ETHERADDRL + sizeof (ushort_t))
3178 struct idndladdr {
3179 struct ether_addr dl_phys;
3180 ushort_t dl_sap;
3183 #define IDNHEADROOM 64
3184 #define IDNROUNDUP(a, n) (((a) + ((n) - 1)) & ~((n) - 1))
3187 * Respective interpretation of bytes in 6 byte ethernet address.
3189 #define IDNETHER_ZERO 0
3190 #define IDNETHER_COOKIE1 1
3191 #define IDNETHER_COOKIE1_VAL 0xe5
3192 #define IDNETHER_COOKIE2 2
3193 #define IDNETHER_COOKIE2_VAL 0x82
3194 #define IDNETHER_NETID 3
3195 #define IDNETHER_CHANNEL 4
3196 #define IDNETHER_RESERVED 5
3197 #define IDNETHER_RESERVED_VAL 0x64
3200 * IDN driver supports multliple instances, however they
3201 * still all refer to the same "physical" device. Multiple
3202 * instances are supported primarily to allow increased
3203 * STREAMs bandwidth since each instance has it's own IP queue.
3204 * This structure is primarily defined to be consistent with
3205 * other network drivers and also to hold the kernel stats.
3207 struct idn_kstat {
3208 ulong_t si_ipackets; /* # packets received */
3209 ulong_t si_ierrors; /* # total input errors */
3210 ulong_t si_opackets; /* # packets sent */
3211 ulong_t si_oerrors; /* # total output errors */
3213 ulong_t si_txcoll; /* # xmit collisions */
3214 ulong_t si_rxcoll; /* # recv collisions */
3215 ulong_t si_crc; /* # recv crc errors */
3216 ulong_t si_buff; /* # recv pkt sz > buf sz */
3218 ulong_t si_nolink; /* # loss of connection */
3219 ulong_t si_linkdown; /* # link is down */
3220 ulong_t si_inits; /* # driver inits */
3221 ulong_t si_nocanput; /* # canput() failures */
3223 ulong_t si_allocbfail; /* # allocb() failures */
3224 ulong_t si_notbufs; /* # out of xmit buffers */
3225 ulong_t si_reclaim; /* # reclaim failures */
3226 ulong_t si_smraddr; /* # bad SMR addrs */
3228 ulong_t si_txmax; /* # xmit over limit */
3229 ulong_t si_txfull; /* # xmit mbox full */
3230 ulong_t si_xdcall; /* # xdcalls sent */
3231 ulong_t si_sigsvr; /* # data server wakeups */
3233 ulong_t si_mboxcrc; /* # send mbox crc errors */
3235 * MIB II kstat variables
3237 ulong_t si_rcvbytes; /* # bytes received */
3238 ulong_t si_xmtbytes; /* # bytes transmitted */
3239 ulong_t si_multircv; /* # multicast packets received */
3241 ulong_t si_multixmt; /* # multicast packets for xmit */
3242 ulong_t si_brdcstrcv; /* # broadcast packets received */
3243 ulong_t si_brdcstxmt; /* # broadcast packets for xmit */
3244 ulong_t si_norcvbuf; /* # rcv packets discarded */
3246 ulong_t si_noxmtbuf; /* # xmit packets discarded */
3248 * PSARC 1997/198 : 64 bit kstats
3250 uint64_t si_ipackets64; /* # packets received */
3251 uint64_t si_opackets64; /* # packets transmitted */
3252 uint64_t si_rbytes64; /* # bytes received */
3253 uint64_t si_obytes64; /* # bytes transmitted */
3255 * PSARC 1997/247 : RFC 1643 dot3Stats...
3257 ulong_t si_fcs_errors; /* FCSErrors */
3258 ulong_t si_macxmt_errors; /* InternalMacTransmitErrors */
3259 ulong_t si_toolong_errors; /* FrameTooLongs */
3260 ulong_t si_macrcv_errors; /* InternalMacReceiveErrors */
3264 * Per logical interface private data structure.
3266 struct idn {
3267 struct idn *si_nextp; /* linked instances */
3268 dev_info_t *si_dip; /* assoc. dev_info */
3269 struct ether_addr si_ouraddr; /* enet address */
3271 uint_t si_flags; /* misc. flags */
3272 uint_t si_wantw; /* xmit: out of res. */
3273 queue_t *si_ip4q; /* ip (v4) read queue */
3274 queue_t *si_ip6q; /* ip (v6) read queue */
3276 kstat_t *si_ksp; /* kstat pointer */
3277 struct idn_kstat si_kstat; /* per-inst kstat */
3280 struct idn_gkstat {
3281 ulong_t gk_reconfigs; /* # reconfigs */
3282 ulong_t gk_reconfig_last; /* timestamep */
3283 ulong_t gk_reaps; /* # of reap request */
3284 ulong_t gk_reap_last; /* timestamep */
3286 ulong_t gk_links; /* # of IDN links */
3287 ulong_t gk_link_last; /* timestamep */
3288 ulong_t gk_unlinks; /* # of IDN unlinks */
3289 ulong_t gk_unlink_last; /* timestamep */
3291 ulong_t gk_buffail; /* # bad bufalloc */
3292 ulong_t gk_buffail_last; /* timestamp */
3293 ulong_t gk_slabfail; /* # bad slaballoc */
3294 ulong_t gk_slabfail_last; /* timestamp */
3296 ulong_t gk_reap_count; /* # of slabs reaped */
3297 ulong_t gk_dropped_intrs; /* dropped intrs */
3300 extern struct idn_gkstat sg_kstat;
3302 #ifdef IDN_NO_KSTAT
3304 #define IDN_KSTAT_INC(s, i)
3305 #define IDN_KSTAT_ADD(s, i, n)
3306 #define IDN_GKSTAT_INC(i)
3307 #define IDN_GKSTAT_ADD(vvv, iii)
3308 #define IDN_GKSTAT_GLOBAL_EVENT(vvv, nnn)
3310 #else /* IDN_NO_KSTAT */
3312 #define IDN_KSTAT_INC(sss, vvv) \
3313 ((((struct idn *)(sss))->si_kstat.vvv)++)
3314 #define IDN_KSTAT_ADD(sss, vvv, nnn) \
3315 ((((struct idn *)(sss))->si_kstat.vvv) += (nnn))
3316 #define IDN_GKSTAT_INC(vvv) ((sg_kstat.vvv)++)
3317 #define IDN_GKSTAT_ADD(vvv, iii) ((sg_kstat.vvv) += (iii))
3318 #define IDN_GKSTAT_GLOBAL_EVENT(vvv, ttt) \
3319 ((sg_kstat.vvv)++, ((sg_kstat.ttt) = ddi_get_lbolt()))
3321 #endif /* IDN_NO_KSTAT */
3324 * idn.si_flags
3326 #define IDNRUNNING 0x01 /* IDNnet is UP */
3327 #define IDNPROMISC 0x02 /* promiscuous mode enabled */
3328 #define IDNSUSPENDED 0x04 /* suspended (DR) */
3330 typedef struct kstat_named kstate_named_t;
3332 struct idn_kstat_named {
3333 kstat_named_t sk_ipackets; /* # packets received */
3334 kstat_named_t sk_ierrors; /* # total input errors */
3335 kstat_named_t sk_opackets; /* # packets sent */
3336 kstat_named_t sk_oerrors; /* # total output errors */
3338 kstat_named_t sk_txcoll; /* # xmit collisions */
3339 kstat_named_t sk_rxcoll; /* # recv collisions */
3340 kstat_named_t sk_crc; /* # recv crc errors */
3341 kstat_named_t sk_buff; /* # recv pkt sz > buf sz */
3343 kstat_named_t sk_nolink; /* # loss of connection */
3344 kstat_named_t sk_linkdown; /* # link is down */
3345 kstat_named_t sk_inits; /* # driver inits */
3346 kstat_named_t sk_nocanput; /* # canput() failures */
3348 kstat_named_t sk_allocbfail; /* # allocb() failures */
3349 kstat_named_t sk_notbufs; /* # out of xmit buffers */
3350 kstat_named_t sk_reclaim; /* # reclaim failures */
3351 kstat_named_t sk_smraddr; /* # bad SMR addrs */
3353 kstat_named_t sk_txmax; /* # xmit over limit */
3354 kstat_named_t sk_txfull; /* # xmit mbox full */
3355 kstat_named_t sk_xdcall; /* # xdcalls sent */
3356 kstat_named_t sk_sigsvr; /* # data server wakeups */
3358 kstat_named_t sk_mboxcrc; /* # send mbox crc errors */
3360 * MIB II kstat variables
3362 kstat_named_t sk_rcvbytes; /* # bytes received */
3363 kstat_named_t sk_xmtbytes; /* # bytes transmitted */
3364 kstat_named_t sk_multircv; /* # multicast packets received */
3366 kstat_named_t sk_multixmt; /* # multicast packets for xmit */
3367 kstat_named_t sk_brdcstrcv; /* # broadcast packets received */
3368 kstat_named_t sk_brdcstxmt; /* # broadcast packets for xmit */
3369 kstat_named_t sk_norcvbuf; /* # rcv packets discarded */
3371 kstat_named_t sk_noxmtbuf; /* # xmit packets discarded */
3373 * PSARC 1997/198 : 64bit kstats
3375 kstat_named_t sk_ipackets64; /* # packets received */
3376 kstat_named_t sk_opackets64; /* # packets transmitted */
3377 kstat_named_t sk_rbytes64; /* # bytes received */
3378 kstat_named_t sk_obytes64; /* # bytes transmitted */
3380 * PSARC 1997/247 : RFC 1643 dot3Stats...
3382 kstat_named_t sk_fcs_errors; /* FCSErr */
3383 kstat_named_t sk_macxmt_errors; /* InternalMacXmtErr */
3384 kstat_named_t sk_toolong_errors; /* FrameTooLongs */
3385 kstat_named_t sk_macrcv_errors; /* InternalMacRcvErr */
3389 * Stats for global events of interest (non-counters).
3391 struct idn_gkstat_named {
3392 kstat_named_t sk_curtime; /* current time */
3393 kstat_named_t sk_reconfigs; /* # master recfgs */
3394 kstat_named_t sk_reconfig_last; /* timestamp */
3395 kstat_named_t sk_reaps; /* # of reap req */
3396 kstat_named_t sk_reap_last; /* timestamp */
3397 kstat_named_t sk_links; /* # of links */
3398 kstat_named_t sk_link_last; /* timestamp */
3399 kstat_named_t sk_unlinks; /* # of unlinks */
3400 kstat_named_t sk_unlink_last; /* timestamp */
3401 kstat_named_t sk_buffail; /* # bad buf alloc */
3402 kstat_named_t sk_buffail_last; /* timestamp */
3403 kstat_named_t sk_slabfail; /* # bad buf alloc */
3404 kstat_named_t sk_slabfail_last; /* timestamp */
3405 kstat_named_t sk_reap_count; /* # slabs reaped */
3406 kstat_named_t sk_dropped_intrs; /* intrs dropped */
3410 * ---------------------------------------------------------------------
3412 #ifdef DEBUG
3413 #define IDNXDC(d, mt, a1, a2, a3, a4) \
3414 ((void) debug_idnxdc("idnxdc", (int)(d), (mt), \
3415 (uint_t)(a1), (uint_t)(a2), (uint_t)(a3), (uint_t)(a4)))
3416 #else /* DEBUG */
3417 #define IDNXDC(d, mt, a1, a2, a3, a4) \
3418 (idnxdc((int)(d), (mt), \
3419 (uint_t)(a1), (uint_t)(a2), (uint_t)(a3), (uint_t)(a4)))
3420 #endif /* DEBUG */
3421 #define IDNXDC_BROADCAST(ds, mt, a1, a2, a3, a4) \
3422 (idnxdc_broadcast((domainset_t)(ds), (mt), \
3423 (uint_t)(a1), (uint_t)(a2), (uint_t)(a3), (uint_t)(a4)))
3426 * ---------------------------------------------------------------------
3428 #define SET_XARGS(x, a0, a1, a2, a3) \
3429 ((x)[0] = (uint_t)(a0), (x)[1] = (uint_t)(a1), \
3430 (x)[2] = (uint_t)(a2), (x)[3] = (uint_t)(a3))
3432 #define GET_XARGS(x, a0, a1, a2, a3) \
3433 ((*(uint_t *)(a0) = (x)[0]), \
3434 (*(uint_t *)(a1) = (x)[1]), \
3435 (*(uint_t *)(a2) = (x)[2]), \
3436 (*(uint_t *)(a3) = (x)[3]))
3438 #define CLR_XARGS(x) \
3439 ((x)[0] = (x)[1] = (x)[2] = (x)[3] = 0)
3441 #define GET_XARGS_NEGO_TICKET(x) ((uint_t)(x)[0])
3442 #define GET_XARGS_NEGO_DSET(x, d) \
3443 ((d)[0] = (x)[1], (d)[1] = (x)[2], (d)[2] = (x)[3])
3444 #define SET_XARGS_NEGO_TICKET(x, t) ((x)[0] = (uint_t)(t))
3445 #define SET_XARGS_NEGO_DSET(x, d) \
3446 ((x)[1] = (uint_t)(d)[0], \
3447 (x)[2] = (uint_t)(d)[1], \
3448 (x)[3] = (uint_t)(d)[2])
3450 #define GET_XARGS_CON_TYPE(x) ((idn_con_t)(x)[0])
3451 #define GET_XARGS_CON_DOMSET(x) ((domainset_t)(x)[1])
3452 #define SET_XARGS_CON_TYPE(x, t) ((x)[0] = (uint_t)(t))
3453 #define SET_XARGS_CON_DOMSET(x, s) ((x)[1] = (uint_t)(s))
3455 #define GET_XARGS_FIN_TYPE(x) GET_FIN_TYPE((x)[0])
3456 #define GET_XARGS_FIN_ARG(x) GET_FIN_ARG((x)[0])
3457 #define GET_XARGS_FIN_DOMSET(x) ((domainset_t)(x)[1])
3458 #define GET_XARGS_FIN_OPT(x) ((idn_finopt_t)(x)[2])
3459 #define GET_XARGS_FIN_MASTER(x) ((uint_t)(x)[3])
3460 #define SET_XARGS_FIN_TYPE(x, t) SET_FIN_TYPE((x)[0], (t))
3461 #define SET_XARGS_FIN_ARG(x, a) SET_FIN_ARG((x)[0], (a))
3462 #define SET_XARGS_FIN_DOMSET(x, s) ((x)[1] = (uint_t)(s))
3463 #define SET_XARGS_FIN_OPT(x, o) ((x)[2] = (uint_t)(o))
3464 #define SET_XARGS_FIN_MASTER(x, m) ((x)[3] = (uint_t)(m))
3466 #define GET_XARGS_NACK_TYPE(x) ((idn_nack_t)(x)[0])
3467 #define GET_XARGS_NACK_ARG1(x) ((x)[1])
3468 #define GET_XARGS_NACK_ARG2(x) ((x)[2])
3469 #define SET_XARGS_NACK_TYPE(x, t) ((x)[0] = (uint_t)(t))
3470 #define SET_XARGS_NACK_ARG1(x, a1) ((x)[1] = (uint_t)(a1))
3471 #define SET_XARGS_NACK_ARG2(x, a2) ((x)[2] = (uint_t)(a2))
3473 #define GET_XARGS_CFG_PHASE(x) ((int)(x)[0])
3474 #define SET_XARGS_CFG_PHASE(x, p) ((x)[0] = (uint_t)(p))
3477 * ---------------------------------------------------------------------
3480 * Device instance to SIP (IDN instance pointer).
3482 #ifdef DEBUG
3483 #define IDN_INST2SIP(i) \
3484 (ASSERT(((i) >= 0) && ((i) < (IDN_MAXMAX_NETS << 1))), \
3485 idn_i2s_table[i])
3486 #else /* DEBUG */
3487 #define IDN_INST2SIP(i) (idn_i2s_table[i])
3488 #endif /* DEBUG */
3490 #define IDN_SET_INST2SIP(i, s) \
3492 ASSERT(((i) >= 0) && ((i) < (IDN_MAXMAX_NETS << 1))); \
3493 idn_i2s_table[i] = (s); \
3496 #define IDN_NETID2DOMID(n) (VALID_UDOMAINID(n) ? \
3497 ((int)(n)) : IDN_NIL_DOMID)
3498 #define IDN_DOMID2NETID(d) ((ushort_t)(d))
3500 #ifdef DEBUG
3501 #define IDNDL_ETHER2DOMAIN(eap) \
3502 (_idndl_ether2domain(eap))
3503 #define IDNDL_ETHER2SIP(eap) \
3504 (_idndl_ether2sip(eap))
3505 #else
3507 * The following values can be returned from IDNDL_ETHER2DOMAIN:
3508 * IDN_NIL_DOMID
3509 * Ether address is broadcast (0xff) or domain doesn't exist.
3510 * domid Domain id with drwlock(reader) held.
3512 #define IDNDL_ETHER2DOMAIN(eap) \
3513 (IDN_NETID2DOMID((eap)->ether_addr_octet[IDNETHER_NETID]))
3514 #define IDNDL_ETHER2SIP(eap) \
3515 (((eap)->ether_addr_octet[IDNETHER_CHANNEL] == 0xff) ? NULL : \
3516 IDN_INST2SIP((int)(eap)->ether_addr_octet[IDNETHER_CHANNEL]))
3517 #endif /* DEBUG */
3519 #define UPPER32_CPUMASK(s) _upper32cpumask(s)
3520 #define LOWER32_CPUMASK(s) _lower32cpumask(s)
3521 #define MAKE64_CPUMASK(s, u, l) _make64cpumask(&(s), (u), (l))
3523 #ifdef DEBUG
3524 extern caddr_t _idn_getstruct(char *structname, int size);
3525 extern void _idn_freestruct(caddr_t ptr, char *structname, int size);
3527 #define GETSTRUCT(structure, num) \
3528 ((structure *)_idn_getstruct("structure", sizeof (structure)*(num)))
3529 #define FREESTRUCT(ptr, structure, num) \
3530 (_idn_freestruct((caddr_t)ptr, "structure", sizeof (structure)*(num)))
3531 #else /* DEBUG */
3532 #define GETSTRUCT(structure, num) \
3533 ((structure *)kmem_zalloc((uint_t)(sizeof (structure) * (num)), \
3534 KM_SLEEP))
3535 #define FREESTRUCT(ptr, structure, num) \
3536 (kmem_free((caddr_t)(ptr), sizeof (structure) * (num)))
3537 #endif /* DEBUG */
3539 extern int idn_debug;
3540 extern idn_global_t idn;
3541 extern idn_domain_t idn_domain[];
3542 extern struct idn *idn_i2s_table[];
3543 extern int idn_history;
3544 extern struct idn_history idnhlog;
3546 extern int idn_smr_size;
3547 extern int idn_nwr_size;
3548 extern int idn_protocol_nservers;
3549 extern int idn_awolmsg_interval;
3550 extern int idn_smr_bufsize;
3551 extern int idn_slab_bufcount;
3552 extern int idn_slab_prealloc;
3553 extern int idn_slab_mintotal;
3554 extern int idn_window_max;
3555 extern int idn_window_incr;
3556 extern int idn_reclaim_min;
3557 extern int idn_reclaim_max;
3558 extern int idn_mbox_per_net;
3559 extern int idn_max_nets;
3561 extern int idn_netsvr_spin_count;
3562 extern int idn_netsvr_wait_min;
3563 extern int idn_netsvr_wait_max;
3564 extern int idn_netsvr_wait_shift;
3566 extern int idn_checksum;
3568 extern int idn_msgwait_nego;
3569 extern int idn_msgwait_cfg;
3570 extern int idn_msgwait_con;
3571 extern int idn_msgwait_fin;
3572 extern int idn_msgwait_cmd;
3573 extern int idn_msgwait_data;
3575 extern int idn_retryfreq_nego;
3576 extern int idn_retryfreq_con;
3577 extern int idn_retryfreq_fin;
3579 extern int idn_window_emax; /* calculated */
3580 extern int idn_slab_maxperdomain; /* calculated */
3583 * ---------------------------------------------------------------------
3584 * io/idn.c
3585 * ---------------------------------------------------------------------
3587 extern int board_to_ready_cpu(int board, cpuset_t cpuset);
3588 extern int idn_open_domain(int domid, int cpuid, uint_t ticket);
3589 extern void idn_close_domain(int domid);
3590 extern void inum2str(uint_t inum, char str[]);
3591 extern idn_timer_t *idn_timer_alloc();
3592 extern void idn_timer_free(idn_timer_t *tp);
3593 extern void idn_timerq_init(idn_timerq_t *tq);
3594 extern void idn_timerq_deinit(idn_timerq_t *tq);
3595 extern void idn_timerq_free(idn_timerq_t *tq);
3596 extern ushort_t idn_timer_start(idn_timerq_t *tq, idn_timer_t *tp,
3597 clock_t tval);
3598 extern int idn_timer_stopall(idn_timer_t *tp);
3599 extern void idn_timer_dequeue(idn_timerq_t *tq, idn_timer_t *tp);
3600 extern void idn_timer_stop(idn_timerq_t *tq, int subtype, ushort_t tcookie);
3601 extern idn_timer_t *idn_timer_get(idn_timerq_t *tq, int subtype,
3602 ushort_t tcookie);
3603 extern void idn_domain_resetentry(idn_domain_t *dp);
3604 extern void idn_strlinks_enable(uint_t netaddr, int domid);
3605 extern void idn_strlinks_disable(uint_t domset, uint_t netaddr,
3606 int disconnect);
3607 extern void idn_dopcache_init();
3608 extern void idn_dopcache_deinit();
3609 extern void *idn_init_op(idn_opflag_t opflag, boardset_t boardset,
3610 idnsb_error_t *sep);
3611 extern void idn_add_op(idn_opflag_t opflag, domainset_t domset);
3612 extern void idn_update_op(idn_opflag_t opflag, domainset_t domset,
3613 idnsb_error_t *sep);
3614 extern void idn_deinit_op(void *cookie);
3615 extern int idn_wait_op(void *cookie, boardset_t *domsetp,
3616 int wait_timeout);
3617 extern int idn_wakeup_op(boardset_t boardset, uint_t domset,
3618 idn_opflag_t opflag, int error);
3619 extern void idn_error_op(uint_t domset, boardset_t boardset, int error);
3620 extern void cpuset2str(cpuset_t cset, char buffer[]);
3621 extern void domainset2str(domainset_t dset, char buffer[]);
3622 extern void boardset2str(boardset_t bset, char buffer[]);
3623 extern void mask2str(uint_t mask, char buffer[], int maxnum);
3624 extern int idnxdc(int domid, idn_msgtype_t *mtp,
3625 uint_t arg1, uint_t arg2,
3626 uint_t arg3, uint_t arg4);
3627 extern void idnxdc_broadcast(domainset_t domset, idn_msgtype_t *mtp,
3628 uint_t arg1, uint_t arg2,
3629 uint_t arg3, uint_t arg4);
3630 extern void idn_awol_event_set(boardset_t boardset);
3631 extern void idn_awol_event_clear(boardset_t boardset);
3632 #ifdef DEBUG
3633 extern int debug_idnxdc(char *f, int domid, idn_msgtype_t *mtp,
3634 uint_t arg1, uint_t arg2,
3635 uint_t arg3, uint_t arg4);
3636 #endif /* DEBUG */
3637 extern boardset_t cpuset2boardset(cpuset_t portset);
3638 extern uint_t _upper32cpumask(cpuset_t cset);
3639 extern uint_t _lower32cpumask(cpuset_t cset);
3640 extern void _make64cpumask(cpuset_t *csetp, uint_t upper, uint_t lower);
3643 * ---------------------------------------------------------------------
3644 * io/idn_proto.c
3645 * ---------------------------------------------------------------------
3647 extern void idn_assign_cookie(int domid);
3648 extern int idn_rput_data(queue_t *q, mblk_t *mp, int isput);
3649 extern int idn_wput_data(queue_t *q, mblk_t *mp, int isput);
3650 extern int idn_send_data(int dst_domid, idn_netaddr_t dst_netaddr,
3651 queue_t *wq, mblk_t *mp);
3652 extern void idn_recv_signal(mblk_t *mp);
3653 extern int idn_link(int domid, int cpuid, int pri, int waittime,
3654 idnsb_error_t *sep);
3655 extern int idn_unlink(int domid, boardset_t idnset, idn_fin_t fintype,
3656 idn_finopt_t finopt, int waittime,
3657 idnsb_error_t *sep);
3658 extern int idnh_recv_dataack(int domid, int src_proc,
3659 uint_t acknack, idn_xdcargs_t xargs);
3660 extern int idnh_recv_other(int sourceid, int src_proc, int dst_proc,
3661 uint_t inum, uint_t acknack,
3662 idn_xdcargs_t xargs);
3663 extern void idn_send_cmd(int domid, idn_cmd_t cmdtype,
3664 uint_t arg1, uint_t arg2, uint_t arg3);
3665 extern void idn_send_cmdresp(int domid, idn_msgtype_t *mtp,
3666 idn_cmd_t cmdtype, uint_t arg1,
3667 uint_t arg2, uint_t cerrno);
3668 extern void idn_broadcast_cmd(idn_cmd_t cmdtype,
3669 uint_t arg1, uint_t arg2, uint_t arg3);
3670 extern int idn_reclaim_mboxdata(int domid, int channel, int nbufs);
3671 extern void idn_clear_awol(int domid);
3672 extern int idn_protocol_init(int nservers);
3673 extern void idn_protocol_deinit();
3674 extern void idn_timer_expired(void *arg);
3675 extern int idn_open_channel(int channel);
3676 extern void idn_close_channel(int channel, idn_chanop_t chanop);
3677 extern idn_mainmbox_t *idn_mainmbox_init(int domid, int mbx);
3678 extern void idn_mainmbox_deinit(int domid, idn_mainmbox_t *mmp);
3679 extern void idn_signal_data_server(int domid, ushort_t channel);
3680 extern int idn_chanservers_init();
3681 extern void idn_chanservers_deinit();
3682 extern void idn_chanserver_bind(int net, int cpuid);
3683 extern int idn_retry_terminate(uint_t token);
3684 extern idn_protojob_t *idn_protojob_alloc(int kmflag);
3685 extern void idn_protojob_submit(int cookie, idn_protojob_t *jp);
3686 extern int idn_domain_is_registered(int domid, int channel,
3687 idn_chanset_t *chansetp);
3688 extern void idn_xmit_monitor_kickoff(int chan_wanted);
3689 extern void idn_sync_exit(int domid, idn_synccmd_t cmd);
3691 * ---------------------------------------------------------------------
3692 * io/idn_xf.c
3693 * ---------------------------------------------------------------------
3695 extern void idnxf_flushall_ecache();
3696 extern int idnxf_shmem_add(int is_master, boardset_t boardset,
3697 pfn_t pfnbase, pfn_t pfnlimit,
3698 uint_t *mcadr);
3699 extern int idnxf_shmem_sub(int is_master, boardset_t boardset);
3700 extern int idn_cpu_per_board(void *p2o, cpuset_t cset,
3701 struct hwconfig *hwp);
3703 * ---------------------------------------------------------------------
3704 * io/idn_dlpi.c
3705 * ---------------------------------------------------------------------
3707 extern int idndl_init(struct idn *sip);
3708 extern void idndl_uninit(struct idn *sip);
3709 extern void idndl_statinit(struct idn *sip);
3710 extern void idndl_dodetach(struct idnstr *);
3711 extern int idnioc_dlpi(queue_t *wq, mblk_t *mp, int *argsize);
3712 extern void idndl_localetheraddr(struct idn *sip, struct ether_addr *eap);
3713 extern int idndl_domain_etheraddr(int domid, int instance,
3714 struct ether_addr *eap);
3715 extern void idndl_dlpi_init();
3716 extern int idndl_start(queue_t *wq, mblk_t *mp, struct idn *sip);
3717 extern void idndl_read(struct idn *sip, mblk_t *mp);
3718 extern void idndl_proto(queue_t *wq, mblk_t *mp);
3719 extern void idndl_sendup(struct idn *, mblk_t *, struct idnstr *(*)());
3720 extern struct idnstr *idndl_accept(struct idnstr *, struct idn *, int,
3721 struct ether_addr *);
3722 extern struct idnstr *idndl_paccept(struct idnstr *, struct idn *, int,
3723 struct ether_addr *);
3724 extern void idndl_wenable(struct idn *);
3726 * ---------------------------------------------------------------------
3727 * io/idn_smr.c
3728 * ---------------------------------------------------------------------
3730 extern void smr_slabwaiter_open(domainset_t domset);
3731 extern void smr_slabwaiter_close(domainset_t domset);
3733 * ---------------------------------------------------------------------
3735 extern void idn_smrsize_init();
3736 extern void idn_init_autolink();
3737 extern void idn_deinit_autolink();
3739 extern void idn_dmv_handler(void *arg);
3740 extern void idnxf_init_mondo(uint64_t dmv_word0,
3741 uint64_t dmv_word1, uint64_t dmv_word2);
3742 extern int idnxf_send_mondo(int upaid);
3744 extern clock_t idn_msg_waittime[];
3745 extern clock_t idn_msg_retrytime[];
3747 #endif /* !_ASM */
3748 #endif /* _KERNEL */
3750 #ifndef _ASM
3752 * ---------------------------------------------------------------------
3754 #define IDN_NIL_DOMID -1
3755 #define IDN_NIL_DCPU -1
3758 * ---------------------------------------------------------------------
3762 * IOCTL Interface
3764 * Commands must stay in the range (1 - 4096) since only 12 bits
3765 * are allotted.
3767 #define _IDN(n) (('I' << 20) | ('D' << 12) | (n))
3768 #define IDNIOC_LINK _IDN(1) /* domain_link */
3769 #define IDNIOC_UNLINK _IDN(2) /* domain_unlink */
3770 #define IDNIOC_unused0 _IDN(3)
3771 #define IDNIOC_unused1 _IDN(4)
3772 #define IDNIOC_unused2 _IDN(5)
3773 #define IDNIOC_unused3 _IDN(6)
3774 #define IDNIOC_unused4 _IDN(7)
3775 #define IDNIOC_DLPI_ON _IDN(8) /* Turn ON DLPI on str */
3776 #define IDNIOC_DLPI_OFF _IDN(9) /* Turn OFF DLPI on str */
3777 #define IDNIOC_PING _IDN(10) /* For latency testing */
3778 #define IDNIOC_PING_INIT _IDN(11)
3779 #define IDNIOC_PING_DEINIT _IDN(12)
3780 #define IDNIOC_MEM_RW _IDN(13) /* Random R/W of SMR */
3783 #define VALID_NDOP(op) (((op) == ND_SET) || ((op) == ND_GET))
3785 #define VALID_DLPIOP(op) (((op) == DLIOCRAW) || \
3786 ((op) == DL_IOC_HDR_INFO))
3788 #define VALID_IDNOP(op) (((op) >= _IDN(1)) && ((op) <= _IDN(13)))
3790 #define VALID_IDNIOCTL(op) (VALID_IDNOP(op) || \
3791 VALID_NDOP(op) || \
3792 VALID_DLPIOP(op))
3794 typedef union idnop {
3795 struct {
3796 int domid; /* input */
3797 int cpuid; /* input */
3798 int master; /* input */
3799 int wait; /* input */
3800 } link;
3801 struct {
3802 int domid; /* input */
3803 int cpuid; /* input */
3804 int force; /* input */
3805 int wait; /* input */
3806 } unlink;
3807 struct {
3808 int domid; /* input */
3809 int cpuid; /* input */
3810 } ping;
3811 struct {
3812 uint_t lo_off; /* input */
3813 uint_t hi_off; /* input */
3814 int blksize; /* input */
3815 int num; /* input */
3816 int rw; /* input */
3817 int goawol; /* input */
3818 } rwmem;
3819 } idnop_t;
3821 #ifdef _KERNEL
3823 * ndd support for IDN tunables.
3825 typedef struct idnparam {
3826 ulong_t sp_min;
3827 ulong_t sp_max;
3828 ulong_t sp_val;
3829 char *sp_name;
3830 } idnparam_t;
3832 extern idnparam_t idn_param_arr[];
3834 #define idn_modunloadable idn_param_arr[0].sp_val
3835 #ifdef IDN_PERF
3836 #define _LP 0
3837 #define _xxx_tbd idn_param_arr[_LP+1].sp_val
3838 #endif /* IDN_PERF */
3841 * =====================================================================
3845 * Some junk to pretty print board lists and cpu lists in
3846 * log/console messages. Length is big enough to display 64 double
3847 * digit cpus separated by a command and single space. (Board list
3848 * is similar, but only 16 entries possible.
3850 #define _DSTRLEN 400
3851 #define ALLOC_DISPSTRING() ((char *)kmem_alloc(_DSTRLEN, KM_NOSLEEP))
3852 #define FREE_DISPSTRING(b) (kmem_free((void *)(b), _DSTRLEN))
3855 * These are declared in idn.c.
3857 extern const char *idnds_str[];
3858 extern const char *idnxs_str[];
3859 extern const char *idngs_str[];
3860 extern const char *idncmd_str[];
3861 extern const char *idncon_str[];
3862 extern const char *idnfin_str[];
3863 extern const char *idnfinarg_str[];
3864 extern const char *idnfinopt_str[];
3865 extern const char *idnreg_str[];
3866 extern const char *idnnack_str[];
3867 extern const char *idnop_str[];
3868 extern const char *idnsync_str[];
3869 extern const char *chanop_str[];
3870 extern const char *chanaction_str[];
3871 extern const char *inum_str[];
3872 extern const int inum_bump;
3873 extern const int inum_max;
3874 extern const int acknack_shift;
3876 extern const char *timer_str[];
3877 extern const char *res_str[];
3879 #endif /* _KERNEL */
3880 #endif /* !_ASM */
3882 #ifdef __cplusplus
3884 #endif
3886 #endif /* _SYS_IDN_H */