9348 mii: duplicate 'const' declaration specifier
[unleashed.git] / usr / src / uts / common / io / gldutil.c
blob8f516b5d6ecdbd3983e3d7055b3770a3fd4654a7
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
21 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
22 * Use is subject to license terms.
23 * Copyright (c) 2016 by Delphix. All rights reserved.
26 #pragma ident "%Z%%M% %I% %E% SMI"
29 * gld - Generic LAN Driver
30 * media dependent routines
33 #include <sys/types.h>
34 #include <sys/errno.h>
35 #include <sys/stropts.h>
36 #include <sys/stream.h>
37 #include <sys/kmem.h>
38 #include <sys/stat.h>
39 #include <sys/modctl.h>
40 #include <sys/kstat.h>
41 #include <sys/debug.h>
43 #include <sys/byteorder.h>
44 #include <sys/strsun.h>
45 #include <sys/dlpi.h>
46 #include <sys/ethernet.h>
47 #include <sys/multidata.h>
48 #include <sys/gld.h>
49 #include <sys/gldpriv.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 #include <sys/sysmacros.h>
53 #include <sys/ib/clients/ibd/ibd.h>
54 #include <sys/pattr.h>
56 #define DLSAPLENGTH(macinfo) \
57 ((macinfo)->gldm_addrlen + ABS((macinfo)->gldm_saplen))
59 #ifdef GLD_DEBUG
60 extern int gld_debug;
61 #endif
63 extern void gld_bitrevcopy(caddr_t src, caddr_t target, size_t n);
64 extern char *gld_macaddr_sprintf(char *, unsigned char *, int);
65 extern gld_vlan_t *gld_find_vlan(gld_mac_info_t *, uint32_t);
66 extern uint32_t gld_global_options;
68 static struct llc_snap_hdr llc_snap_def = {
69 LSAP_SNAP, /* DLSAP 0xaa */
70 LSAP_SNAP, /* SLSAP 0xaa */
71 CNTL_LLC_UI, /* Control 0x03 */
72 0x00, 0x00, 0x00, /* Org[3] */
73 0x00 /* Type */
76 #define ISETHERTYPE(snaphdr) \
77 (snaphdr->d_lsap == LSAP_SNAP && \
78 snaphdr->s_lsap == LSAP_SNAP && \
79 snaphdr->control == CNTL_LLC_UI && \
80 snaphdr->org[0] == 0 && \
81 snaphdr->org[1] == 0 && \
82 snaphdr->org[2] == 0)
84 /* ======== */
85 /* Ethernet */
86 /* ======== */
88 static mac_addr_t ether_broadcast = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
90 void
91 gld_init_ether(gld_mac_info_t *macinfo)
93 struct gldkstats *sp =
94 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->kstatp->ks_data;
96 /* Assumptions we make for this medium */
97 ASSERT(macinfo->gldm_type == DL_ETHER);
98 ASSERT(macinfo->gldm_addrlen == 6);
99 ASSERT(macinfo->gldm_saplen == -2);
100 #ifndef lint
101 ASSERT(sizeof (struct ether_header) == 14);
102 ASSERT(sizeof (mac_addr_t) == 6);
103 #endif
105 kstat_named_init(&sp->glds_frame, "align_errors", KSTAT_DATA_ULONG);
106 kstat_named_init(&sp->glds_crc, "fcs_errors", KSTAT_DATA_ULONG);
107 kstat_named_init(&sp->glds_collisions, "collisions", KSTAT_DATA_ULONG);
108 kstat_named_init(&sp->glds_nocarrier, "carrier_errors",
109 KSTAT_DATA_ULONG);
110 kstat_named_init(&sp->glds_defer, "defer_xmts", KSTAT_DATA_ULONG);
111 kstat_named_init(&sp->glds_xmtlatecoll, "tx_late_collisions",
112 KSTAT_DATA_ULONG);
113 kstat_named_init(&sp->glds_short, "runt_errors", KSTAT_DATA_ULONG);
114 kstat_named_init(&sp->glds_excoll, "ex_collisions", KSTAT_DATA_ULONG);
117 * only initialize the new statistics if the driver
118 * knows about them.
120 if (macinfo->gldm_driver_version != GLD_VERSION_200)
121 return;
123 kstat_named_init(&sp->glds_dot3_first_coll,
124 "first_collisions", KSTAT_DATA_UINT32);
125 kstat_named_init(&sp->glds_dot3_multi_coll,
126 "multi_collisions", KSTAT_DATA_UINT32);
127 kstat_named_init(&sp->glds_dot3_sqe_error,
128 "sqe_errors", KSTAT_DATA_UINT32);
129 kstat_named_init(&sp->glds_dot3_mac_xmt_error,
130 "macxmt_errors", KSTAT_DATA_UINT32);
131 kstat_named_init(&sp->glds_dot3_mac_rcv_error,
132 "macrcv_errors", KSTAT_DATA_UINT32);
133 kstat_named_init(&sp->glds_dot3_frame_too_long,
134 "toolong_errors", KSTAT_DATA_UINT32);
135 kstat_named_init(&sp->glds_duplex, "duplex", KSTAT_DATA_CHAR);
138 /*ARGSUSED*/
139 void
140 gld_uninit_ether(gld_mac_info_t *macinfo)
145 gld_interpret_ether(gld_mac_info_t *macinfo, mblk_t *mp, pktinfo_t *pktinfo,
146 packet_flag_t flags)
148 struct ether_header *mh;
149 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
150 struct llc_snap_hdr *snaphdr;
151 mblk_t *pmp = NULL, *savemp = mp;
152 unsigned short typelen;
153 int ret = 0;
156 * Quickly handle receive fastpath for IPQ hack.
158 if (flags == GLD_RXQUICK) {
159 pktinfo->pktLen = msgdsize(mp);
161 * Check whether the header is contiguous, which
162 * also implicitly makes sure the packet is big enough.
164 if (MBLKL(mp) < sizeof (struct ether_header))
165 return (-1);
166 mh = (struct ether_header *)mp->b_rptr;
167 pktinfo->ethertype = REF_NET_USHORT(mh->ether_type);
168 pktinfo->isForMe = mac_eq(&mh->ether_dhost,
169 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
170 pktinfo->macLen = sizeof (struct ether_header);
172 return (0);
175 bzero((void *)pktinfo, sizeof (*pktinfo));
177 pktinfo->pktLen = msgdsize(mp);
179 /* make sure packet has at least a whole mac header */
180 if (pktinfo->pktLen < sizeof (struct ether_header))
181 return (-1);
183 /* make sure the mac header falls into contiguous memory */
184 if (MBLKL(mp) < sizeof (struct ether_header)) {
185 if ((pmp = msgpullup(mp, -1)) == NULL) {
186 #ifdef GLD_DEBUG
187 if (gld_debug & GLDERRS)
188 cmn_err(CE_WARN,
189 "GLD: interpret_ether cannot msgpullup");
190 #endif
191 return (-1);
193 mp = pmp; /* this mblk contains the whole mac header */
196 mh = (struct ether_header *)mp->b_rptr;
198 /* Check to see if the mac is a broadcast or multicast address. */
199 if (mac_eq(&mh->ether_dhost, ether_broadcast, macinfo->gldm_addrlen))
200 pktinfo->isBroadcast = 1;
201 else if (mh->ether_dhost.ether_addr_octet[0] & 1)
202 pktinfo->isMulticast = 1;
204 typelen = REF_NET_USHORT(mh->ether_type);
206 * If the hardware is capable of VLAN tag insertion
207 * strip out the VLAN tag info. Knowing hardware is
208 * capable of VLAN can be established by the presance
209 * of non null 'macinfo->gldm_send_tagged'.
211 if (flags == GLD_TX) {
212 if ((typelen == ETHERTYPE_VLAN) &&
213 (macinfo->gldm_send_tagged != NULL)) {
214 struct ether_vlan_header *evhp;
215 uint16_t tci;
217 if ((MBLKL(mp) < sizeof (struct ether_vlan_header)) &&
218 (pullupmsg(mp, sizeof (struct ether_vlan_header))
219 == 0)) {
220 ret = -1;
221 goto out;
223 evhp = (struct ether_vlan_header *)mp->b_rptr;
224 tci = REF_NET_USHORT(evhp->ether_tci);
227 * We don't allow the VID and priority are both zero.
229 if ((GLD_VTAG_PRI((int32_t)tci) == 0 &&
230 GLD_VTAG_VID((int32_t)tci) == VLAN_VID_NONE) ||
231 (GLD_VTAG_CFI((uint32_t)tci)) != VLAN_CFI_ETHER) {
232 ret = -1;
233 goto out;
237 * Remember the VTAG info in order to reinsert it,
238 * Then strip the tag. This is required because some
239 * drivers do not allow the size of message (passed
240 * by the gldm_send_tagged() function) to be greater
241 * than ETHERMAX.
243 GLD_SAVE_MBLK_VTAG(savemp, GLD_TCI2VTAG(tci));
244 ovbcopy(mp->b_rptr, mp->b_rptr + VTAG_SIZE,
245 2 * ETHERADDRL);
246 mp->b_rptr += VTAG_SIZE;
248 goto out; /* Got all info we need for xmit case */
251 ASSERT(GLDM_LOCK_HELD(macinfo));
254 * Deal with the mac header
257 mac_copy(&mh->ether_dhost, pktinfo->dhost, macinfo->gldm_addrlen);
258 mac_copy(&mh->ether_shost, pktinfo->shost, macinfo->gldm_addrlen);
260 pktinfo->isLooped = mac_eq(pktinfo->shost,
261 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
262 pktinfo->isForMe = mac_eq(pktinfo->dhost,
263 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
265 pktinfo->macLen = sizeof (struct ether_header);
267 if (typelen > ETHERMTU) {
268 pktinfo->ethertype = typelen; /* use type interpretation */
269 goto out;
273 * Packet is 802.3 so the ether type/length field
274 * specifies the number of bytes that should be present
275 * in the data field. Additional bytes are padding, and
276 * should be removed
279 int delta = pktinfo->pktLen -
280 (sizeof (struct ether_header) + typelen);
282 if (delta > 0 && adjmsg(mp, -delta))
283 pktinfo->pktLen -= delta;
287 * Before trying to look beyond the MAC header, make sure the LLC
288 * header exists, and that both it and any SNAP header are contiguous.
290 if (pktinfo->pktLen < pktinfo->macLen + LLC_HDR1_LEN)
291 goto out; /* LLC hdr should have been there! */
293 pktinfo->isLLC = 1;
295 if (gld_global_options & GLD_OPT_NO_ETHRXSNAP ||
296 pktinfo->pktLen < pktinfo->macLen + LLC_SNAP_HDR_LEN)
297 goto out;
299 if (MBLKL(mp) < sizeof (struct ether_header) + LLC_SNAP_HDR_LEN &&
300 MBLKL(mp) < pktinfo->pktLen) {
302 * we don't have the entire packet within the first mblk (and
303 * therefore we didn't do the msgpullup above), AND the first
304 * mblk may not contain all the data we need to look at.
306 ASSERT(pmp == NULL); /* couldn't have done msgpullup above */
307 if ((pmp = msgpullup(mp, -1)) == NULL) {
308 #ifdef GLD_DEBUG
309 if (gld_debug & GLDERRS)
310 cmn_err(CE_WARN,
311 "GLD: interpret_ether cannot msgpullup2");
312 #endif
313 goto out; /* can't interpret this pkt further */
315 mp = pmp; /* this mblk should contain everything needed */
319 * Check SAP/SNAP information for EtherType.
322 snaphdr = (struct llc_snap_hdr *)(mp->b_rptr + pktinfo->macLen);
323 if (ISETHERTYPE(snaphdr)) {
324 pktinfo->ethertype = REF_NET_USHORT(snaphdr->type);
325 pktinfo->hdrLen = LLC_SNAP_HDR_LEN;
327 out:
328 if (pmp != NULL)
329 freemsg(pmp);
331 return (ret);
334 mblk_t *
335 gld_unitdata_ether(gld_t *gld, mblk_t *mp)
337 gld_mac_info_t *macinfo = gld->gld_mac_info;
338 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
339 struct gld_dlsap *gldp = DLSAP(dlp, dlp->dl_dest_addr_offset);
340 mac_addr_t dhost;
341 unsigned short typelen;
342 mblk_t *nmp;
343 struct ether_header *mh;
344 int hdrlen;
345 uint32_t vptag;
346 gld_vlan_t *gld_vlan;
348 ASSERT(macinfo);
350 /* extract needed info from the mblk before we maybe reuse it */
351 mac_copy(gldp->glda_addr, dhost, macinfo->gldm_addrlen);
353 /* look in the unitdata request for a sap, else use bound one */
354 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
355 REF_HOST_USHORT(gldp->glda_sap) != 0)
356 typelen = REF_HOST_USHORT(gldp->glda_sap);
357 else
358 typelen = gld->gld_sap;
361 * We take values less than or equal to ETHERMTU to mean that the
362 * packet should not have an encoded EtherType and so we use the
363 * IEEE 802.3 length interpretation of the type/length field.
365 if (typelen <= ETHERMTU)
366 typelen = msgdsize(mp);
368 hdrlen = sizeof (struct ether_header);
371 * Check to see if VLAN is enabled on this stream
372 * if so then make the header bigger to hold a clone
373 * vlan tag.
375 gld_vlan = (gld_vlan_t *)gld->gld_vlan;
376 if (gld_vlan && (gld_vlan->gldv_id != VLAN_VID_NONE)) {
377 hdrlen += VTAG_SIZE;
378 vptag = gld_vlan->gldv_ptag;
381 /* need a buffer big enough for the headers */
382 nmp = mp->b_cont; /* where the packet payload M_DATA is */
383 if (DB_REF(nmp) == 1 && MBLKHEAD(nmp) >= hdrlen) {
384 /* it fits at the beginning of the first M_DATA block */
385 freeb(mp); /* don't need the M_PROTO anymore */
386 } else if (DB_REF(mp) == 1 && MBLKSIZE(mp) >= hdrlen) {
387 /* we can reuse the dl_unitdata_req M_PROTO mblk */
388 nmp = mp;
389 DB_TYPE(nmp) = M_DATA;
390 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
391 } else {
392 /* we need to allocate one */
393 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
394 return (NULL);
395 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
396 linkb(nmp, mp->b_cont);
397 freeb(mp);
400 /* Got the space, now copy in the header components */
402 nmp->b_rptr -= sizeof (typelen);
403 SET_NET_USHORT(*(uint16_t *)nmp->b_rptr, typelen);
404 if (hdrlen > sizeof (struct ether_header)) {
405 nmp->b_rptr -= sizeof (uint16_t);
406 SET_NET_USHORT(*(uint16_t *)nmp->b_rptr, vptag);
407 vptag >>= 16;
408 nmp->b_rptr -= sizeof (uint16_t);
409 SET_NET_USHORT(*(uint16_t *)nmp->b_rptr, vptag);
411 nmp->b_rptr -= (ETHERADDRL * 2);
412 mh = (struct ether_header *)nmp->b_rptr;
413 mac_copy(dhost, &mh->ether_dhost, macinfo->gldm_addrlen);
416 * We access the mac address without the mutex to prevent
417 * mutex contention (BUG 4211361)
419 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
420 &mh->ether_shost, macinfo->gldm_addrlen);
422 return (nmp);
426 * Insert the VLAN tag into the packet. The packet now is an Ethernet header
427 * without VLAN tag information.
429 mblk_t *
430 gld_insert_vtag_ether(mblk_t *mp, uint32_t vtag)
432 struct ether_vlan_header *evhp;
433 struct ether_header *ehp;
434 mblk_t *nmp;
436 if (vtag == VLAN_VID_NONE)
437 return (mp);
439 if (DB_REF(mp) == 1 && MBLKHEAD(mp) >= VTAG_SIZE) {
440 /* it fits at the beginning of the message block */
441 nmp = mp;
442 ovbcopy(nmp->b_rptr, nmp->b_rptr - VTAG_SIZE, 2 * ETHERADDRL);
443 nmp->b_rptr -= VTAG_SIZE;
444 evhp = (struct ether_vlan_header *)nmp->b_rptr;
445 } else {
446 /* we need to allocate one */
447 if ((nmp = allocb(sizeof (struct ether_vlan_header),
448 BPRI_MED)) == NULL) {
449 return (NULL);
451 nmp->b_wptr += sizeof (struct ether_vlan_header);
453 /* transfer the ether_header fields */
454 evhp = (struct ether_vlan_header *)nmp->b_rptr;
455 ehp = (struct ether_header *)mp->b_rptr;
456 mac_copy(&ehp->ether_dhost, &evhp->ether_dhost, ETHERADDRL);
457 mac_copy(&ehp->ether_shost, &evhp->ether_shost, ETHERADDRL);
458 bcopy(&ehp->ether_type, &evhp->ether_type, sizeof (uint16_t));
460 /* offset the mp of the MAC header length. */
461 mp->b_rptr += sizeof (struct ether_header);
462 if (MBLKL(mp) == 0) {
463 nmp->b_cont = mp->b_cont;
464 freeb(mp);
465 } else {
466 nmp->b_cont = mp;
470 SET_NET_USHORT(evhp->ether_tci, vtag);
471 vtag >>= 16;
472 SET_NET_USHORT(evhp->ether_tpid, vtag);
473 return (nmp);
476 mblk_t *
477 gld_fastpath_ether(gld_t *gld, mblk_t *mp)
479 gld_mac_info_t *macinfo = gld->gld_mac_info;
480 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
481 struct gld_dlsap *gldp = DLSAP(dlp, dlp->dl_dest_addr_offset);
482 unsigned short typelen;
483 mblk_t *nmp;
484 struct ether_header *mh;
485 int hdrlen;
486 uint32_t vptag;
487 gld_vlan_t *gld_vlan;
489 ASSERT(macinfo);
491 /* look in the unitdata request for a sap, else use bound one */
492 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
493 REF_HOST_USHORT(gldp->glda_sap) != 0)
494 typelen = REF_HOST_USHORT(gldp->glda_sap);
495 else
496 typelen = gld->gld_sap;
499 * We only do fast-path for EtherType encoding because this is the only
500 * case where the media header will be consistent from packet to packet.
502 if (typelen <= ETHERMTU)
503 return (NULL);
506 * Initialize the fast path header to include the
507 * basic source address information and type field.
509 hdrlen = sizeof (struct ether_header);
512 * Check to see if VLAN is enabled on this stream
513 * if so then make the header bigger to hold a clone
514 * vlan tag.
516 gld_vlan = (gld_vlan_t *)gld->gld_vlan;
517 if (gld_vlan && (gld_vlan->gldv_id != VLAN_VID_NONE)) {
518 hdrlen += VTAG_SIZE;
519 vptag = gld_vlan->gldv_ptag;
522 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
523 return (NULL);
525 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
527 /* Got the space, now copy in the header components */
529 nmp->b_rptr -= sizeof (typelen);
530 SET_NET_USHORT(*(uint16_t *)nmp->b_rptr, typelen);
533 * If the header is for a VLAN stream, then add
534 * in the VLAN tag to the clone header.
536 if (hdrlen > sizeof (struct ether_header)) {
537 nmp->b_rptr -= sizeof (uint16_t);
538 SET_NET_USHORT(*(uint16_t *)nmp->b_rptr, vptag);
539 vptag >>= 16;
540 nmp->b_rptr -= sizeof (uint16_t);
541 SET_NET_USHORT(*(uint16_t *)nmp->b_rptr, vptag);
543 nmp->b_rptr -= (ETHERADDRL * 2);
544 mh = (struct ether_header *)nmp->b_rptr;
545 mac_copy(gldp->glda_addr, &mh->ether_dhost, macinfo->gldm_addrlen);
547 GLDM_LOCK(macinfo, RW_WRITER);
548 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
549 &mh->ether_shost, macinfo->gldm_addrlen);
550 GLDM_UNLOCK(macinfo);
552 return (nmp);
555 /* == */
556 /* IB */
557 /* == */
559 void
560 gld_init_ib(gld_mac_info_t *macinfo)
563 * Currently, the generic stats maintained by GLD is
564 * sufficient for IPoIB.
567 /* Assumptions we make for this medium */
568 ASSERT(macinfo->gldm_type == DL_IB);
569 ASSERT(macinfo->gldm_addrlen == IPOIB_ADDRL);
570 ASSERT(macinfo->gldm_saplen == -2);
573 /* ARGSUSED */
574 void
575 gld_uninit_ib(gld_mac_info_t *macinfo)
580 * The packet format sent to the driver is:
581 * IPOIB_ADDRL bytes dest addr :: 2b sap :: 2b 0s :: data
582 * The packet format received from the driver is:
583 * IPOIB_GRH_SIZE bytes pseudo GRH :: 2b sap :: 2b 0s :: data.
586 gld_interpret_ib(gld_mac_info_t *macinfo, mblk_t *mp, pktinfo_t *pktinfo,
587 packet_flag_t flags)
589 ipoib_pgrh_t *grh;
590 ipoib_ptxhdr_t *gldp;
591 mblk_t *pmp = NULL;
592 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
595 * Quickly handle receive fastpath for IPQ hack.
597 if (flags == GLD_RXQUICK) {
598 pktinfo->pktLen = msgdsize(mp) - IPOIB_GRH_SIZE;
601 * Check whether the header is contiguous, which
602 * also implicitly makes sure the packet is big enough.
604 if (MBLKL(mp) < (IPOIB_GRH_SIZE + IPOIB_HDRSIZE))
605 return (-1);
608 * Almost all times, unicast will not have
609 * a valid pgrh; quickly identify and ask for
610 * IPQ hack optimization only in that case.
612 grh = (ipoib_pgrh_t *)mp->b_rptr;
613 if (grh->ipoib_vertcflow == 0) {
614 struct ipoib_header *ihp = (struct ipoib_header *)
615 (mp->b_rptr + IPOIB_GRH_SIZE);
617 pktinfo->isForMe = 1;
618 pktinfo->ethertype = REF_NET_USHORT(ihp->ipoib_type);
619 pktinfo->macLen = IPOIB_GRH_SIZE + IPOIB_HDRSIZE;
620 return (0);
621 } else {
622 return (-1);
627 * Handle the GLD_TX, GLD_RX, GLD_RXLOOP cases now.
629 ASSERT(flags != GLD_RXQUICK);
630 bzero((void *)pktinfo, sizeof (*pktinfo));
632 if (flags != GLD_RX) {
634 * GLD_TX and GLD_RXLOOP cases.
636 gldp = (ipoib_ptxhdr_t *)mp->b_rptr;
637 pktinfo->pktLen = msgdsize(mp);
639 /* make sure packet has at least a pseudo header */
640 if (pktinfo->pktLen < sizeof (ipoib_ptxhdr_t))
641 return (-1);
643 /* make sure the mac header falls into contiguous memory */
644 if (MBLKL(mp) < sizeof (ipoib_ptxhdr_t)) {
645 if ((pmp = msgpullup(mp, -1)) == NULL) {
646 #ifdef GLD_DEBUG
647 if (gld_debug & GLDERRS)
648 cmn_err(CE_WARN,
649 "GLD: interpret_ib "
650 "cannot msgpullup");
651 #endif
652 return (-1);
654 /* this mblk contains the whole mac header */
655 mp = pmp;
659 * Check if mac is broadcast or multicast address; all these
660 * types of address have the top 4 bytes as 0x00FFFFFF.
662 if (mac_eq(&gldp->ipoib_dest, macinfo->gldm_broadcast_addr,
663 sizeof (uint32_t))) {
664 if (mac_eq(&gldp->ipoib_dest,
665 macinfo->gldm_broadcast_addr, IPOIB_ADDRL))
666 pktinfo->isBroadcast = 1;
667 else
668 pktinfo->isMulticast = 1;
672 * Only count bytes we will be sending over the wire
673 * or looping back.
675 pktinfo->pktLen -= IPOIB_ADDRL;
676 if (flags == GLD_TX)
677 goto out; /* Got all info we need for xmit case */
680 * Loopback case: this is a dup'ed message.
682 mp->b_rptr += IPOIB_ADDRL;
683 mac_copy(&gldp->ipoib_dest, pktinfo->dhost, IPOIB_ADDRL);
684 mac_copy(mac_pvt->curr_macaddr, pktinfo->shost, IPOIB_ADDRL);
685 } else {
687 * GLD_RX case; process packet sent from driver.
689 ipoib_mac_t *mact, *tact;
690 ib_qpn_t dqpn;
692 pktinfo->pktLen = msgdsize(mp);
693 /* make sure packet has at least pgrh and mac header */
694 if (pktinfo->pktLen < (IPOIB_GRH_SIZE + IPOIB_HDRSIZE))
695 return (-1);
697 /* make sure the header falls into contiguous memory */
698 if (MBLKL(mp) < (IPOIB_GRH_SIZE + IPOIB_HDRSIZE)) {
699 if ((pmp = msgpullup(mp, -1)) == NULL) {
700 #ifdef GLD_DEBUG
701 if (gld_debug & GLDERRS)
702 cmn_err(CE_WARN,
703 "GLD: interpret_ib "
704 "cannot msgpullup2");
705 #endif
706 return (-1);
708 /* this mblk contains the whole mac header */
709 mp = pmp;
712 grh = (ipoib_pgrh_t *)mp->b_rptr;
713 mp->b_rptr += IPOIB_GRH_SIZE;
714 pktinfo->pktLen -= IPOIB_GRH_SIZE;
715 if (grh->ipoib_vertcflow) {
717 * First, copy source address from grh.
719 mact = (ipoib_mac_t *)pktinfo->shost;
720 mac_copy(&grh->ipoib_sqpn, &mact->ipoib_qpn,
721 IPOIB_ADDRL);
724 * Then copy destination address from grh;
725 * first, the 16 bytes of GID.
727 mact = (ipoib_mac_t *)pktinfo->dhost;
728 mac_copy(&grh->ipoib_dgid_pref,
729 &mact->ipoib_gidpref, IPOIB_ADDRL -
730 sizeof (mact->ipoib_qpn));
731 tact = (ipoib_mac_t *)mac_pvt->curr_macaddr;
733 /* Is this a multicast address */
734 if (*(uchar_t *)(grh->ipoib_dgid_pref) == 0xFF) {
736 * Only check for hardware looping in
737 * multicast case. It is assumed higher
738 * layer code (IP) will stop unicast loops;
739 * ie will prevent a transmit to self.
741 if (bcmp(&grh->ipoib_sqpn, tact,
742 IPOIB_ADDRL) == 0)
743 pktinfo->isLooped = 1;
745 tact = (ipoib_mac_t *)macinfo->
746 gldm_broadcast_addr;
747 if (mac_eq(tact->ipoib_gidpref,
748 grh->ipoib_dgid_pref,
749 IPOIB_ADDRL - sizeof (tact->ipoib_qpn)))
750 pktinfo->isBroadcast = 1;
751 else
752 pktinfo->isMulticast = 1;
754 * Now copy the 4 bytes QPN part of the
755 * destination address.
757 dqpn = htonl(IB_MC_QPN);
758 mac_copy(&dqpn, &mact->ipoib_qpn,
759 sizeof (mact->ipoib_qpn));
760 } else {
762 * Now copy the 4 bytes QPN part of the
763 * destination address.
765 mac_copy(&tact->ipoib_qpn, &mact->ipoib_qpn,
766 sizeof (mact->ipoib_qpn));
768 * Any unicast packets received on IBA are
769 * for the node.
771 pktinfo->isForMe = 1;
773 } else {
775 * It can not be a IBA multicast packet.
776 * Must have been unicast to us. We do not
777 * have shost information, which is used in
778 * gld_addudind(); IP/ARP does not care.
780 pktinfo->nosource = 1;
781 mac_copy(mac_pvt->curr_macaddr, pktinfo->dhost,
782 IPOIB_ADDRL);
784 * Any unicast packets received on IBA are
785 * for the node.
787 pktinfo->isForMe = 1;
791 ASSERT((flags == GLD_RX) || (flags == GLD_RXLOOP));
792 ASSERT(GLDM_LOCK_HELD(macinfo));
793 pktinfo->ethertype = REF_NET_USHORT(((ipoib_hdr_t *)
794 (mp->b_rptr))->ipoib_type);
795 pktinfo->macLen = IPOIB_HDRSIZE;
797 out:
798 if (pmp != NULL)
799 freemsg(pmp);
801 return (0);
805 * The packet format sent to the driver is: 2b sap :: 2b 0s :: data
807 void
808 gld_interpret_mdt_ib(gld_mac_info_t *macinfo, mblk_t *mp, pdescinfo_t *pinfo,
809 pktinfo_t *pktinfo, mdt_packet_flag_t flags)
811 gld_mac_pvt_t *mac_pvt;
812 multidata_t *dlmdp;
813 pattrinfo_t attr_info = { PATTR_DSTADDRSAP, };
814 pattr_t *patr;
815 ipoib_ptxhdr_t *dlap = NULL;
818 * Per packet formatting.
820 if (flags == GLD_MDT_TXPKT) {
821 ipoib_hdr_t *hptr;
822 uint_t seg;
824 if (PDESC_HDRL(pinfo) == 0)
825 return;
828 * Update packet's link header.
830 pinfo->hdr_rptr -= IPOIB_HDRSIZE;
831 hptr = (ipoib_hdr_t *)pinfo->hdr_rptr;
832 hptr->ipoib_mbz = htons(0);
833 hptr->ipoib_type = pktinfo->ethertype;
836 * Total #bytes that will be put on wire.
838 pktinfo->pktLen = PDESC_HDRL(pinfo);
839 for (seg = 0; seg < pinfo->pld_cnt; seg++)
840 pktinfo->pktLen += PDESC_PLDL(pinfo, seg);
842 return;
846 * The following two cases of GLD_MDT_TX and GLD_MDT_RXLOOP are per
847 * MDT message processing.
849 dlmdp = mmd_getmultidata(mp);
850 patr = mmd_getpattr(dlmdp, NULL, &attr_info);
851 ASSERT(patr != NULL);
852 ASSERT(macinfo->gldm_saplen == -2);
853 if (patr != NULL)
854 dlap = (ipoib_ptxhdr_t *)((pattr_addr_t *)attr_info.buf)->addr;
856 if (flags == GLD_MDT_TX) {
857 bzero((void *)pktinfo, sizeof (*pktinfo));
858 if (dlap == NULL)
859 return;
862 * Check if mac is broadcast or multicast address; all these
863 * types of address have the top 4 bytes as 0x00FFFFFF.
865 if (mac_eq(dlap, macinfo->gldm_broadcast_addr,
866 sizeof (uint32_t))) {
867 if (mac_eq(dlap, macinfo->gldm_broadcast_addr,
868 IPOIB_ADDRL))
869 pktinfo->isBroadcast = 1;
870 else
871 pktinfo->isMulticast = 1;
873 pktinfo->ethertype = REF_NET_USHORT(dlap->
874 ipoib_rhdr.ipoib_type);
875 } else {
876 ASSERT(flags == GLD_MDT_RXLOOP);
877 pktinfo->macLen = IPOIB_HDRSIZE;
878 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
879 mac_copy(mac_pvt->curr_macaddr, pktinfo->shost, IPOIB_ADDRL);
880 if (dlap == NULL)
881 return;
882 mac_copy(&dlap->ipoib_dest, pktinfo->dhost, IPOIB_ADDRL);
886 mblk_t *
887 gld_unitdata_ib(gld_t *gld, mblk_t *mp)
889 gld_mac_info_t *macinfo = gld->gld_mac_info;
890 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
891 ipoib_ptxhdr_t *gldp = IPOIBDLSAP(dlp, dlp->dl_dest_addr_offset);
892 ipoib_mac_t dhost;
893 unsigned short type;
894 mblk_t *nmp;
895 int hdrlen;
897 ASSERT(macinfo != NULL);
899 /* extract needed info from the mblk before we maybe reuse it */
900 mac_copy(&gldp->ipoib_dest, &dhost, IPOIB_ADDRL);
902 /* look in the unitdata request for a sap, else use bound one */
903 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
904 REF_HOST_USHORT(gldp->ipoib_rhdr.ipoib_type) != 0)
905 type = REF_HOST_USHORT(gldp->ipoib_rhdr.ipoib_type);
906 else
907 type = gld->gld_sap;
909 hdrlen = sizeof (ipoib_ptxhdr_t);
911 /* need a buffer big enough for the headers */
912 nmp = mp->b_cont; /* where the packet payload M_DATA is */
913 if (DB_REF(nmp) == 1 && MBLKHEAD(nmp) >= hdrlen) {
914 /* it fits at the beginning of the first M_DATA block */
915 freeb(mp); /* don't need the M_PROTO anymore */
916 } else if (DB_REF(mp) == 1 && MBLKSIZE(mp) >= hdrlen) {
917 /* we can reuse the dl_unitdata_req M_PROTO mblk */
918 nmp = mp;
919 DB_TYPE(nmp) = M_DATA;
920 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
921 } else {
922 /* we need to allocate one */
923 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
924 return (NULL);
925 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
926 linkb(nmp, mp->b_cont);
927 freeb(mp);
930 /* Got the space, now copy in the header components */
932 nmp->b_rptr -= sizeof (ipoib_ptxhdr_t);
933 gldp = (ipoib_ptxhdr_t *)nmp->b_rptr;
934 SET_NET_USHORT(gldp->ipoib_rhdr.ipoib_type, type);
935 gldp->ipoib_rhdr.ipoib_mbz = 0;
936 mac_copy(&dhost, &gldp->ipoib_dest, IPOIB_ADDRL);
938 return (nmp);
941 mblk_t *
942 gld_fastpath_ib(gld_t *gld, mblk_t *mp)
944 gld_mac_info_t *macinfo = gld->gld_mac_info;
945 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
946 ipoib_ptxhdr_t *gldp = IPOIBDLSAP(dlp, dlp->dl_dest_addr_offset);
947 unsigned short type;
948 mblk_t *nmp;
949 ipoib_ptxhdr_t *tgldp;
950 int hdrlen;
952 ASSERT(macinfo != NULL);
954 /* look in the unitdata request for a sap, else use bound one */
955 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
956 REF_HOST_USHORT(gldp->ipoib_rhdr.ipoib_type) != 0)
957 type = REF_HOST_USHORT(gldp->ipoib_rhdr.ipoib_type);
958 else
959 type = gld->gld_sap;
961 hdrlen = sizeof (ipoib_ptxhdr_t);
963 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
964 return (NULL);
966 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
968 /* Got the space, now copy in the header components */
970 nmp->b_rptr -= sizeof (ipoib_ptxhdr_t);
971 tgldp = (ipoib_ptxhdr_t *)nmp->b_rptr;
972 tgldp->ipoib_rhdr.ipoib_type = htons(type);
973 tgldp->ipoib_rhdr.ipoib_mbz = 0;
974 mac_copy(&gldp->ipoib_dest, &tgldp->ipoib_dest, IPOIB_ADDRL);
976 return (nmp);
979 /* ==== */
980 /* FDDI */
981 /* ==== */
983 void
984 gld_init_fddi(gld_mac_info_t *macinfo)
986 struct gldkstats *sp =
987 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->kstatp->ks_data;
989 /* Assumptions we make for this medium */
990 ASSERT(macinfo->gldm_type == DL_FDDI);
991 ASSERT(macinfo->gldm_addrlen == 6);
992 ASSERT(macinfo->gldm_saplen == -2);
993 #ifndef lint
994 ASSERT(sizeof (struct fddi_mac_frm) == 13);
995 ASSERT(sizeof (mac_addr_t) == 6);
996 #endif
998 /* Wire address format is bit reversed from canonical format */
999 macinfo->gldm_options |= GLDOPT_CANONICAL_ADDR;
1001 kstat_named_init(&sp->glds_fddi_mac_error,
1002 "mac_errors", KSTAT_DATA_UINT32);
1003 kstat_named_init(&sp->glds_fddi_mac_lost,
1004 "mac_lost_errors", KSTAT_DATA_UINT32);
1005 kstat_named_init(&sp->glds_fddi_mac_token,
1006 "mac_tokens", KSTAT_DATA_UINT32);
1007 kstat_named_init(&sp->glds_fddi_mac_tvx_expired,
1008 "mac_tvx_expired", KSTAT_DATA_UINT32);
1009 kstat_named_init(&sp->glds_fddi_mac_late,
1010 "mac_late", KSTAT_DATA_UINT32);
1011 kstat_named_init(&sp->glds_fddi_mac_ring_op,
1012 "mac_ring_ops", KSTAT_DATA_UINT32);
1015 /*ARGSUSED*/
1016 void
1017 gld_uninit_fddi(gld_mac_info_t *macinfo)
1022 gld_interpret_fddi(gld_mac_info_t *macinfo, mblk_t *mp, pktinfo_t *pktinfo,
1023 packet_flag_t flags)
1025 struct fddi_mac_frm *mh;
1026 gld_mac_pvt_t *mac_pvt;
1027 struct llc_snap_hdr *snaphdr;
1028 mblk_t *pmp = NULL;
1031 * Quickly handle receive fastpath; FDDI does not support IPQ hack.
1033 if (flags == GLD_RXQUICK) {
1034 pktinfo->pktLen = msgdsize(mp);
1035 return (-1);
1038 bzero((void *)pktinfo, sizeof (*pktinfo));
1040 pktinfo->pktLen = msgdsize(mp);
1042 /* make sure packet has at least a whole mac header */
1043 if (pktinfo->pktLen < sizeof (struct fddi_mac_frm))
1044 return (-1);
1046 /* make sure the mac header falls into contiguous memory */
1047 if (MBLKL(mp) < sizeof (struct fddi_mac_frm)) {
1048 if ((pmp = msgpullup(mp, -1)) == NULL) {
1049 #ifdef GLD_DEBUG
1050 if (gld_debug & GLDERRS)
1051 cmn_err(CE_WARN,
1052 "GLD: interpret_fddi cannot msgpullup");
1053 #endif
1054 return (-1);
1056 mp = pmp; /* this mblk contains the whole mac header */
1059 mh = (struct fddi_mac_frm *)mp->b_rptr;
1061 /* Check to see if the mac is a broadcast or multicast address. */
1062 /* NB we are still in wire format (non canonical) */
1063 /* mac_eq works because ether_broadcast is the same either way */
1064 if (mac_eq(mh->fddi_dhost, ether_broadcast, macinfo->gldm_addrlen))
1065 pktinfo->isBroadcast = 1;
1066 else if (mh->fddi_dhost[0] & 0x80)
1067 pktinfo->isMulticast = 1;
1069 if (flags == GLD_TX)
1070 goto out; /* Got all info we need for xmit case */
1072 ASSERT(GLDM_LOCK_HELD(macinfo));
1075 * Deal with the mac header
1078 cmac_copy(mh->fddi_dhost, pktinfo->dhost,
1079 macinfo->gldm_addrlen, macinfo);
1080 cmac_copy(mh->fddi_shost, pktinfo->shost,
1081 macinfo->gldm_addrlen, macinfo);
1083 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1084 pktinfo->isLooped = mac_eq(pktinfo->shost,
1085 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
1086 pktinfo->isForMe = mac_eq(pktinfo->dhost,
1087 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
1089 pktinfo->macLen = sizeof (struct fddi_mac_frm);
1092 * Before trying to look beyond the MAC header, make sure the LLC
1093 * header exists, and that both it and any SNAP header are contiguous.
1095 if (MBLKL(mp) < sizeof (struct fddi_mac_frm) + LLC_SNAP_HDR_LEN &&
1096 MBLKL(mp) < pktinfo->pktLen) {
1098 * we don't have the entire packet within the first mblk (and
1099 * therefore we didn't do the msgpullup above), AND the first
1100 * mblk may not contain all the data we need to look at.
1102 ASSERT(pmp == NULL); /* couldn't have done msgpullup above */
1103 if ((pmp = msgpullup(mp, -1)) == NULL) {
1104 #ifdef GLD_DEBUG
1105 if (gld_debug & GLDERRS)
1106 cmn_err(CE_WARN,
1107 "GLD: interpret_fddi cannot msgpullup2");
1108 #endif
1109 goto out; /* can't interpret this pkt further */
1111 mp = pmp; /* this mblk should contain everything needed */
1115 * Check SAP/SNAP information.
1117 if ((mh->fddi_fc & 0x70) == 0x50) {
1118 if (pktinfo->pktLen < pktinfo->macLen + LLC_HDR1_LEN)
1119 goto out;
1121 pktinfo->isLLC = 1;
1123 if (pktinfo->pktLen < pktinfo->macLen + LLC_SNAP_HDR_LEN)
1124 goto out;
1126 snaphdr = (struct llc_snap_hdr *)(mp->b_rptr + pktinfo->macLen);
1127 if (ISETHERTYPE(snaphdr)) {
1128 pktinfo->ethertype = REF_NET_USHORT(snaphdr->type);
1129 pktinfo->hdrLen = LLC_SNAP_HDR_LEN;
1132 out:
1133 if (pmp != NULL)
1134 freemsg(pmp);
1136 return (0);
1139 mblk_t *
1140 gld_unitdata_fddi(gld_t *gld, mblk_t *mp)
1142 gld_mac_info_t *macinfo = gld->gld_mac_info;
1143 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
1144 struct gld_dlsap *gldp = DLSAP(dlp, dlp->dl_dest_addr_offset);
1145 mac_addr_t dhost;
1146 unsigned short type;
1147 mblk_t *nmp;
1148 struct fddi_mac_frm *mh;
1149 int hdrlen;
1151 ASSERT(macinfo);
1153 /* extract needed info from the mblk before we maybe reuse it */
1154 mac_copy(gldp->glda_addr, dhost, macinfo->gldm_addrlen);
1156 /* look in the unitdata request for a sap, else use bound one */
1157 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
1158 REF_HOST_USHORT(gldp->glda_sap) != 0)
1159 type = REF_HOST_USHORT(gldp->glda_sap);
1160 else
1161 type = gld->gld_sap;
1164 hdrlen = sizeof (struct fddi_mac_frm);
1167 * Check whether we need to do EtherType encoding or whether the packet
1168 * is LLC.
1170 if (type > GLD_MAX_802_SAP)
1171 hdrlen += sizeof (struct llc_snap_hdr);
1173 /* need a buffer big enough for the headers */
1174 nmp = mp->b_cont; /* where the packet payload M_DATA is */
1175 if (DB_REF(nmp) == 1 && MBLKHEAD(nmp) >= hdrlen) {
1176 /* it fits at the beginning of the first M_DATA block */
1177 freeb(mp); /* don't need the M_PROTO anymore */
1178 } else if (DB_REF(mp) == 1 && MBLKSIZE(mp) >= hdrlen) {
1179 /* we can reuse the dl_unitdata_req M_PROTO mblk */
1180 nmp = mp;
1181 DB_TYPE(nmp) = M_DATA;
1182 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1183 } else {
1184 /* we need to allocate one */
1185 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
1186 return (NULL);
1187 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1188 linkb(nmp, mp->b_cont);
1189 freeb(mp);
1193 /* Got the space, now copy in the header components */
1194 if (type > GLD_MAX_802_SAP) {
1195 /* create the snap header */
1196 struct llc_snap_hdr *snap;
1197 nmp->b_rptr -= sizeof (struct llc_snap_hdr);
1198 snap = (struct llc_snap_hdr *)(nmp->b_rptr);
1199 *snap = llc_snap_def;
1200 SET_NET_USHORT(snap->type, type);
1203 nmp->b_rptr -= sizeof (struct fddi_mac_frm);
1205 mh = (struct fddi_mac_frm *)nmp->b_rptr;
1207 mh->fddi_fc = 0x50;
1208 cmac_copy(dhost, mh->fddi_dhost, macinfo->gldm_addrlen, macinfo);
1211 * We access the mac address without the mutex to prevent
1212 * mutex contention (BUG 4211361)
1214 cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
1215 mh->fddi_shost, macinfo->gldm_addrlen, macinfo);
1216 return (nmp);
1219 mblk_t *
1220 gld_fastpath_fddi(gld_t *gld, mblk_t *mp)
1222 gld_mac_info_t *macinfo = gld->gld_mac_info;
1223 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
1224 struct gld_dlsap *gldp = DLSAP(dlp, dlp->dl_dest_addr_offset);
1225 unsigned short type;
1226 mblk_t *nmp;
1227 struct fddi_mac_frm *mh;
1228 int hdrlen;
1230 ASSERT(macinfo);
1232 /* look in the unitdata request for a sap, else use bound one */
1233 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
1234 REF_HOST_USHORT(gldp->glda_sap) != 0)
1235 type = REF_HOST_USHORT(gldp->glda_sap);
1236 else
1237 type = gld->gld_sap;
1239 hdrlen = sizeof (struct fddi_mac_frm);
1242 * Check whether we need to do EtherType encoding or whether the packet
1243 * will be LLC.
1245 if (type > GLD_MAX_802_SAP)
1246 hdrlen += sizeof (struct llc_snap_hdr);
1248 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
1249 return (NULL);
1251 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1253 /* Got the space, now copy in the header components */
1255 if (type > GLD_MAX_802_SAP) {
1256 /* create the snap header */
1257 struct llc_snap_hdr *snap;
1258 nmp->b_rptr -= sizeof (struct llc_snap_hdr);
1259 snap = (struct llc_snap_hdr *)(nmp->b_rptr);
1260 *snap = llc_snap_def;
1261 snap->type = htons(type); /* we know it's aligned */
1264 nmp->b_rptr -= sizeof (struct fddi_mac_frm);
1266 mh = (struct fddi_mac_frm *)nmp->b_rptr;
1267 mh->fddi_fc = 0x50;
1268 cmac_copy(gldp->glda_addr, mh->fddi_dhost,
1269 macinfo->gldm_addrlen, macinfo);
1271 GLDM_LOCK(macinfo, RW_WRITER);
1272 cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
1273 mh->fddi_shost, macinfo->gldm_addrlen, macinfo);
1274 GLDM_UNLOCK(macinfo);
1276 return (nmp);
1279 /* ========== */
1280 /* Token Ring */
1281 /* ========== */
1283 #define GLD_SR_VAR(macinfo) \
1284 (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->data)
1286 #define GLD_SR_HASH(macinfo) ((struct srtab **)GLD_SR_VAR(macinfo))
1288 #define GLD_SR_MUTEX(macinfo) \
1289 (&((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->datalock)
1291 static void gld_sr_clear(gld_mac_info_t *);
1292 static void gld_rcc_receive(gld_mac_info_t *, pktinfo_t *, struct gld_ri *,
1293 uchar_t *, int);
1294 static void gld_rcc_send(gld_mac_info_t *, queue_t *, uchar_t *,
1295 struct gld_ri **, uchar_t *);
1297 static mac_addr_t tokenbroadcastaddr2 = { 0xc0, 0x00, 0xff, 0xff, 0xff, 0xff };
1298 static struct gld_ri ri_ste_def;
1300 void
1301 gld_init_tr(gld_mac_info_t *macinfo)
1303 struct gldkstats *sp =
1304 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->kstatp->ks_data;
1306 /* avoid endian-dependent code by initializing here instead of static */
1307 ri_ste_def.len = 2;
1308 ri_ste_def.rt = RT_STE;
1309 ri_ste_def.mtu = RT_MTU_MAX;
1310 ri_ste_def.dir = 0;
1311 ri_ste_def.res = 0;
1313 /* Assumptions we make for this medium */
1314 ASSERT(macinfo->gldm_type == DL_TPR);
1315 ASSERT(macinfo->gldm_addrlen == 6);
1316 ASSERT(macinfo->gldm_saplen == -2);
1317 #ifndef lint
1318 ASSERT(sizeof (struct tr_mac_frm_nori) == 14);
1319 ASSERT(sizeof (mac_addr_t) == 6);
1320 #endif
1322 mutex_init(GLD_SR_MUTEX(macinfo), NULL, MUTEX_DRIVER, NULL);
1324 GLD_SR_VAR(macinfo) = kmem_zalloc(sizeof (struct srtab *)*SR_HASH_SIZE,
1325 KM_SLEEP);
1327 /* Default is RDE enabled for this medium */
1328 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_enabled =
1329 ddi_getprop(DDI_DEV_T_NONE, macinfo->gldm_devinfo, 0,
1330 "gld_rde_enable", 1);
1333 * Default is to use STE for unknown paths if RDE is enabled.
1334 * If RDE is disabled, default is to use NULL RIF fields.
1336 * It's possible to force use of STE for ALL packets:
1337 * disable RDE but enable STE. This may be useful for
1338 * non-transparent bridges, when it is not desired to run
1339 * the RDE algorithms.
1341 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_str_indicator_ste =
1342 ddi_getprop(DDI_DEV_T_NONE, macinfo->gldm_devinfo, 0,
1343 "gld_rde_str_indicator_ste",
1344 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_enabled);
1346 /* Default 10 second route timeout on lack of activity */
1348 int t = ddi_getprop(DDI_DEV_T_NONE, macinfo->gldm_devinfo, 0,
1349 "gld_rde_timeout", 10);
1350 if (t < 1)
1351 t = 1; /* Let's be reasonable */
1352 if (t > 600)
1353 t = 600; /* Let's be reasonable */
1354 /* We're using ticks (lbolts) for our timeout -- convert from seconds */
1355 t = drv_usectohz(1000000 * t);
1356 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_timeout = t;
1359 kstat_named_init(&sp->glds_dot5_line_error,
1360 "line_errors", KSTAT_DATA_UINT32);
1361 kstat_named_init(&sp->glds_dot5_burst_error,
1362 "burst_errors", KSTAT_DATA_UINT32);
1363 kstat_named_init(&sp->glds_dot5_signal_loss,
1364 "signal_losses", KSTAT_DATA_UINT32);
1367 * only initialize the new statistics if the driver
1368 * knows about them.
1370 if (macinfo->gldm_driver_version != GLD_VERSION_200)
1371 return;
1373 kstat_named_init(&sp->glds_dot5_ace_error,
1374 "ace_errors", KSTAT_DATA_UINT32);
1375 kstat_named_init(&sp->glds_dot5_internal_error,
1376 "internal_errors", KSTAT_DATA_UINT32);
1377 kstat_named_init(&sp->glds_dot5_lost_frame_error,
1378 "lost_frame_errors", KSTAT_DATA_UINT32);
1379 kstat_named_init(&sp->glds_dot5_frame_copied_error,
1380 "frame_copied_errors", KSTAT_DATA_UINT32);
1381 kstat_named_init(&sp->glds_dot5_token_error,
1382 "token_errors", KSTAT_DATA_UINT32);
1383 kstat_named_init(&sp->glds_dot5_freq_error,
1384 "freq_errors", KSTAT_DATA_UINT32);
1387 void
1388 gld_uninit_tr(gld_mac_info_t *macinfo)
1390 mutex_destroy(GLD_SR_MUTEX(macinfo));
1391 gld_sr_clear(macinfo);
1392 kmem_free(GLD_SR_VAR(macinfo), sizeof (struct srtab *) * SR_HASH_SIZE);
1396 gld_interpret_tr(gld_mac_info_t *macinfo, mblk_t *mp, pktinfo_t *pktinfo,
1397 packet_flag_t flags)
1399 struct tr_mac_frm *mh;
1400 gld_mac_pvt_t *mac_pvt;
1401 struct llc_snap_hdr *snaphdr;
1402 mblk_t *pmp = NULL;
1403 struct gld_ri *rh;
1406 * Quickly handle receive fastpath; TR does not support IPQ hack.
1408 if (flags == GLD_RXQUICK) {
1409 pktinfo->pktLen = msgdsize(mp);
1410 return (-1);
1413 bzero((void *)pktinfo, sizeof (*pktinfo));
1415 pktinfo->pktLen = msgdsize(mp);
1417 /* make sure packet has at least a whole mac header */
1418 if (pktinfo->pktLen < sizeof (struct tr_mac_frm_nori))
1419 return (-1);
1421 /* make sure the mac header falls into contiguous memory */
1422 if (MBLKL(mp) < sizeof (struct tr_mac_frm_nori)) {
1423 if ((pmp = msgpullup(mp, -1)) == NULL) {
1424 #ifdef GLD_DEBUG
1425 if (gld_debug & GLDERRS)
1426 cmn_err(CE_WARN,
1427 "GLD: interpret_tr cannot msgpullup");
1428 #endif
1429 return (-1);
1431 mp = pmp; /* this mblk contains the whole mac header */
1434 mh = (struct tr_mac_frm *)mp->b_rptr;
1436 /* Check to see if the mac is a broadcast or multicast address. */
1437 if (mac_eq(mh->tr_dhost, ether_broadcast, macinfo->gldm_addrlen) ||
1438 mac_eq(mh->tr_dhost, tokenbroadcastaddr2, macinfo->gldm_addrlen))
1439 pktinfo->isBroadcast = 1;
1440 else if (mh->tr_dhost[0] & 0x80)
1441 pktinfo->isMulticast = 1;
1443 if (flags == GLD_TX)
1444 goto out; /* Got all info we need for xmit case */
1446 ASSERT(GLDM_LOCK_HELD(macinfo));
1449 * Deal with the mac header
1452 mac_copy(mh->tr_dhost, pktinfo->dhost, macinfo->gldm_addrlen);
1453 mac_copy(mh->tr_shost, pktinfo->shost, macinfo->gldm_addrlen);
1454 pktinfo->shost[0] &= ~0x80; /* turn off RIF indicator */
1456 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1457 pktinfo->isLooped = mac_eq(pktinfo->shost,
1458 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
1459 pktinfo->isForMe = mac_eq(pktinfo->dhost,
1460 mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
1462 rh = (struct gld_ri *)NULL;
1463 pktinfo->macLen = sizeof (struct tr_mac_frm_nori);
1466 * Before trying to look beyond the MAC header, make sure the data
1467 * structures are all contiguously where we can conveniently look at
1468 * them. We'll use a worst-case estimate of how many bytes into the
1469 * packet data we'll be needing to look. Things will be more efficient
1470 * if the driver puts at least this much into the first mblk.
1472 * Even after this, we still will have to do checks against the total
1473 * length of the packet. A bad incoming packet may not hold all the
1474 * data structures it says it does.
1476 if (MBLKL(mp) < sizeof (struct tr_mac_frm) +
1477 LLC_HDR1_LEN + sizeof (struct rde_pdu) &&
1478 MBLKL(mp) < pktinfo->pktLen) {
1480 * we don't have the entire packet within the first mblk (and
1481 * therefore we didn't do the msgpullup above), AND the first
1482 * mblk may not contain all the data we need to look at.
1484 ASSERT(pmp == NULL); /* couldn't have done msgpullup above */
1485 if ((pmp = msgpullup(mp, -1)) == NULL) {
1486 #ifdef GLD_DEBUG
1487 if (gld_debug & GLDERRS)
1488 cmn_err(CE_WARN,
1489 "GLD: interpret_tr cannot msgpullup2");
1490 #endif
1491 goto out; /* can't interpret this pkt further */
1493 mp = pmp; /* this mblk should contain everything needed */
1494 mh = (struct tr_mac_frm *)mp->b_rptr; /* to look at RIF */
1497 if (mh->tr_shost[0] & 0x80) {
1498 /* Routing Information Field (RIF) is present */
1499 if (pktinfo->pktLen < sizeof (struct tr_mac_frm_nori) + 2)
1500 goto out; /* RIF should have been there! */
1501 rh = (struct gld_ri *)&mh->tr_ri;
1502 if ((rh->len & 1) || rh->len < 2) {
1503 /* Bogus RIF, don't handle this packet */
1504 #ifdef GLD_DEBUG
1505 if (gld_debug & GLDERRS)
1506 cmn_err(CE_WARN,
1507 "GLD: received TR packet with "
1508 "bogus RIF length %d",
1509 rh->len);
1510 #endif
1511 goto out;
1513 if (pktinfo->pktLen < sizeof (struct tr_mac_frm_nori) + rh->len)
1514 goto out; /* RIF should have been there! */
1515 pktinfo->macLen += rh->len;
1518 if ((mh->tr_fc & 0xc0) == 0x40) {
1519 if (pktinfo->pktLen < pktinfo->macLen + LLC_HDR1_LEN)
1520 goto out;
1522 pktinfo->isLLC = 1;
1524 if (pktinfo->pktLen < pktinfo->macLen + LLC_SNAP_HDR_LEN)
1525 goto out;
1527 snaphdr = (struct llc_snap_hdr *)(mp->b_rptr + pktinfo->macLen);
1528 if (ISETHERTYPE(snaphdr)) {
1529 pktinfo->ethertype = REF_NET_USHORT(snaphdr->type);
1530 pktinfo->hdrLen = LLC_SNAP_HDR_LEN;
1533 /* Inform the Route Control Component of received LLC frame */
1534 gld_rcc_receive(macinfo, pktinfo, rh,
1535 mp->b_rptr + pktinfo->macLen,
1536 pktinfo->pktLen - pktinfo->macLen);
1538 out:
1539 if (pmp != NULL)
1540 freemsg(pmp);
1542 return (0);
1545 mblk_t *
1546 gld_unitdata_tr(gld_t *gld, mblk_t *mp)
1548 gld_mac_info_t *macinfo = gld->gld_mac_info;
1549 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
1550 struct gld_dlsap *gldp = DLSAP(dlp, dlp->dl_dest_addr_offset);
1551 mac_addr_t dhost;
1552 unsigned short type;
1553 mblk_t *nmp, *llcmp, *pmp = NULL;
1554 struct tr_mac_frm_nori *mh;
1555 int hdrlen;
1556 struct gld_ri *rh;
1558 ASSERT(macinfo);
1560 /* extract needed info from the mblk before we maybe reuse it */
1561 mac_copy(gldp->glda_addr, dhost, macinfo->gldm_addrlen);
1563 /* look in the unitdata request for a sap, else use bound one */
1564 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
1565 REF_HOST_USHORT(gldp->glda_sap) != 0)
1566 type = REF_HOST_USHORT(gldp->glda_sap);
1567 else
1568 type = gld->gld_sap;
1570 /* includes maximum possible Routing Information Field (RIF) size */
1571 hdrlen = sizeof (struct tr_mac_frm);
1574 * Check whether we need to do EtherType encoding or whether the packet
1575 * is LLC.
1577 if (type > GLD_MAX_802_SAP)
1578 hdrlen += sizeof (struct llc_snap_hdr);
1580 /* need a buffer big enough for the headers */
1581 llcmp = nmp = mp->b_cont; /* where the packet payload M_DATA is */
1584 * We are going to need to look at the LLC header, so make sure it
1585 * is contiguously in a single mblk. If we're the ones who create
1586 * the LLC header (below, in the case where sap > 0xff) then we don't
1587 * have to worry about it here.
1589 ASSERT(nmp != NULL); /* gld_unitdata guarantees msgdsize > 0 */
1590 if (type <= GLD_MAX_802_SAP) {
1591 if (MBLKL(llcmp) < LLC_HDR1_LEN) {
1592 llcmp = pmp = msgpullup(nmp, LLC_HDR1_LEN);
1593 if (pmp == NULL) {
1594 #ifdef GLD_DEBUG
1595 if (gld_debug & GLDERRS)
1596 cmn_err(CE_WARN,
1597 "GLD: unitdata_tr "
1598 "cannot msgpullup");
1599 #endif
1600 return (NULL);
1605 if (DB_REF(nmp) == 1 && MBLKHEAD(nmp) >= hdrlen) {
1606 /* it fits at the beginning of the first M_DATA block */
1607 freeb(mp); /* don't need the M_PROTO anymore */
1608 } else if (DB_REF(mp) == 1 && MBLKSIZE(mp) >= hdrlen) {
1609 /* we can reuse the dl_unitdata_req M_PROTO mblk */
1610 nmp = mp;
1611 DB_TYPE(nmp) = M_DATA;
1612 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1613 } else {
1614 /* we need to allocate one */
1615 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL) {
1616 if (pmp != NULL)
1617 freemsg(pmp);
1618 return (NULL);
1620 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1621 linkb(nmp, mp->b_cont);
1622 freeb(mp);
1625 /* Got the space, now copy in the header components */
1626 if (type > GLD_MAX_802_SAP) {
1627 /* create the snap header */
1628 struct llc_snap_hdr *snap;
1629 llcmp = nmp; /* LLC header is going to be in this mblk */
1630 nmp->b_rptr -= sizeof (struct llc_snap_hdr);
1631 snap = (struct llc_snap_hdr *)(nmp->b_rptr);
1632 *snap = llc_snap_def;
1633 SET_NET_USHORT(snap->type, type);
1636 /* Hold SR tables still while we maybe point at an entry */
1637 mutex_enter(GLD_SR_MUTEX(macinfo));
1639 gld_rcc_send(macinfo, WR(gld->gld_qptr), dhost, &rh, llcmp->b_rptr);
1641 if (rh != NULL) {
1642 /* copy in the RIF */
1643 ASSERT(rh->len <= sizeof (struct gld_ri));
1644 nmp->b_rptr -= rh->len;
1645 bcopy((caddr_t)rh, (caddr_t)nmp->b_rptr, rh->len);
1648 mutex_exit(GLD_SR_MUTEX(macinfo));
1650 /* no longer need the pulled-up mblk */
1651 if (pmp != NULL)
1652 freemsg(pmp);
1655 * fill in token ring header
1657 nmp->b_rptr -= sizeof (struct tr_mac_frm_nori);
1658 mh = (struct tr_mac_frm_nori *)nmp->b_rptr;
1659 mh->tr_ac = 0x10;
1660 mh->tr_fc = 0x40;
1661 mac_copy(dhost, mh->tr_dhost, macinfo->gldm_addrlen);
1664 * We access the mac address without the mutex to prevent
1665 * mutex contention (BUG 4211361)
1667 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
1668 mh->tr_shost, macinfo->gldm_addrlen);
1670 if (rh != NULL)
1671 mh->tr_shost[0] |= 0x80;
1672 else
1673 mh->tr_shost[0] &= ~0x80;
1675 return (nmp);
1679 * We cannot have our client sending us "fastpath" M_DATA messages,
1680 * because to do that we must provide a fixed MAC header to
1681 * be prepended to each outgoing packet. But with Source Routing
1682 * media, the length and content of the MAC header changes as the
1683 * routes change, so there is no fixed header we can provide. So
1684 * we decline to accept M_DATA messages if Source Routing is enabled.
1686 mblk_t *
1687 gld_fastpath_tr(gld_t *gld, mblk_t *mp)
1689 gld_mac_info_t *macinfo = gld->gld_mac_info;
1690 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
1691 struct gld_dlsap *gldp = DLSAP(dlp, dlp->dl_dest_addr_offset);
1692 unsigned short type;
1693 mblk_t *nmp;
1694 struct tr_mac_frm_nori *mh;
1695 int hdrlen;
1697 ASSERT(macinfo);
1700 * If we are doing Source Routing, then we cannot provide a fixed
1701 * MAC header, so fail.
1703 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_enabled)
1704 return (NULL);
1706 /* look in the unitdata request for a sap, else use bound one */
1707 if (dlp->dl_dest_addr_length >= DLSAPLENGTH(macinfo) &&
1708 REF_HOST_USHORT(gldp->glda_sap) != 0)
1709 type = REF_HOST_USHORT(gldp->glda_sap);
1710 else
1711 type = gld->gld_sap;
1713 hdrlen = sizeof (struct tr_mac_frm_nori);
1715 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_str_indicator_ste)
1716 hdrlen += ri_ste_def.len;
1719 * Check whether we need to do EtherType encoding or whether the packet
1720 * will be LLC.
1722 if (type > GLD_MAX_802_SAP)
1723 hdrlen += sizeof (struct llc_snap_hdr);
1725 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL)
1726 return (NULL);
1728 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1730 /* Got the space, now copy in the header components */
1732 if (type > GLD_MAX_802_SAP) {
1733 /* create the snap header */
1734 struct llc_snap_hdr *snap;
1735 nmp->b_rptr -= sizeof (struct llc_snap_hdr);
1736 snap = (struct llc_snap_hdr *)(nmp->b_rptr);
1737 *snap = llc_snap_def;
1738 snap->type = htons(type); /* we know it's aligned */
1741 /* RDE is disabled, use NULL RIF, or STE RIF */
1742 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_str_indicator_ste) {
1743 nmp->b_rptr -= ri_ste_def.len;
1744 bcopy((caddr_t)&ri_ste_def, (caddr_t)nmp->b_rptr,
1745 ri_ste_def.len);
1749 * fill in token ring header
1751 nmp->b_rptr -= sizeof (struct tr_mac_frm_nori);
1752 mh = (struct tr_mac_frm_nori *)nmp->b_rptr;
1753 mh->tr_ac = 0x10;
1754 mh->tr_fc = 0x40;
1755 mac_copy(gldp->glda_addr, mh->tr_dhost, macinfo->gldm_addrlen);
1757 GLDM_LOCK(macinfo, RW_WRITER);
1758 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
1759 mh->tr_shost, macinfo->gldm_addrlen);
1760 GLDM_UNLOCK(macinfo);
1762 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_str_indicator_ste)
1763 mh->tr_shost[0] |= 0x80;
1764 else
1765 mh->tr_shost[0] &= ~0x80;
1767 return (nmp);
1771 * Route Determination Entity (ISO 8802-2 / IEEE 802.2 : 1994, Section 9)
1773 * RDE is an LLC layer entity. GLD is a MAC layer entity. The proper
1774 * solution to this architectural anomaly is to move RDE support out of GLD
1775 * and into LLC where it belongs. In particular, only LLC has the knowledge
1776 * necessary to reply to XID and TEST packets. If and when it comes time to
1777 * move RDE out of GLD to LLC, the LLC-to-GLD interface should be modified
1778 * to use MA_UNITDATA structures rather than DL_UNITDATA structures. Of
1779 * course, GLD will still have to continue to also support the DL_ structures
1780 * as long as IP is not layered over LLC. Another, perhaps better, idea
1781 * would be to make RDE an autopush module on top of the token ring drivers:
1782 * RDE would sit between LLC and GLD. It would then also sit between IP and
1783 * GLD, providing services to all clients of GLD/tokenring. In that case,
1784 * GLD would still have to continue to support the DL_ interface for non-
1785 * Token Ring interfaces, using the MA_ interface only for media supporting
1786 * Source Routing media.
1788 * At present, Token Ring is the only source routing medium we support.
1789 * Since Token Ring is not at this time a strategic network medium for Sun,
1790 * rather than devote a large amount of resources to creating a proper
1791 * architecture and implementation of RDE, we do the minimum necessary to
1792 * get it to work. The interface between the above token ring code and the
1793 * below RDE code is designed to make it relatively easy to change to an
1794 * MA_UNITDATA model later should this ever become a priority.
1797 static void gld_send_rqr(gld_mac_info_t *, uchar_t *, struct gld_ri *,
1798 struct rde_pdu *, int);
1799 static void gld_rde_pdu_req(gld_mac_info_t *, queue_t *, uchar_t *,
1800 struct gld_ri *, uchar_t, uchar_t, uchar_t);
1801 static void gld_get_route(gld_mac_info_t *, queue_t *, uchar_t *,
1802 struct gld_ri **, uchar_t, uchar_t);
1803 static void gld_reset_route(gld_mac_info_t *, queue_t *,
1804 uchar_t *, uchar_t, uchar_t);
1805 static void gld_rde_pdu_ind(gld_mac_info_t *, struct gld_ri *, struct rde_pdu *,
1806 int);
1807 static void gld_rif_ind(gld_mac_info_t *, struct gld_ri *, uchar_t *,
1808 uchar_t, uchar_t);
1809 static struct srtab **gld_sr_hash(struct srtab **, uchar_t *, int);
1810 static struct srtab *gld_sr_lookup_entry(gld_mac_info_t *, uchar_t *);
1811 static struct srtab *gld_sr_create_entry(gld_mac_info_t *, uchar_t *);
1814 * This routine implements a modified subset of the 802.2 RDE RCC receive
1815 * actions:
1816 * we implement RCC receive events 3 to 12 (ISO 8802-2:1994 9.6.3.4);
1817 * we omit special handling for the NULL SAP;
1818 * we omit XID/TEST handling;
1819 * we pass all packets (including RDE) upstream to LLC.
1821 static void
1822 gld_rcc_receive(gld_mac_info_t *macinfo, pktinfo_t *pktinfo, struct gld_ri *rh,
1823 uchar_t *llcpkt, int llcpktlen)
1825 struct llc_snap_hdr *snaphdr = (struct llc_snap_hdr *)(llcpkt);
1827 if (!((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_enabled)
1828 return;
1831 * First, ensure this packet wasn't something we received just
1832 * because we were in promiscuous mode. Since none of the below
1833 * code wants to see group addressed packets anyway, we can do
1834 * this check up front. Since we're doing that, we can omit the
1835 * checks for group addressed packets below.
1837 if (!pktinfo->isForMe)
1838 return; /* Event 6 */
1840 /* Process a subset of Route Determination Entity (RDE) packets */
1841 if (snaphdr->d_lsap == LSAP_RDE) {
1842 struct rde_pdu *pdu = (struct rde_pdu *)(llcpkt + LLC_HDR1_LEN);
1843 int pdulen = llcpktlen - LLC_HDR1_LEN;
1845 /* sanity check the PDU */
1846 if ((pdulen < sizeof (struct rde_pdu)) ||
1847 (snaphdr->s_lsap != LSAP_RDE))
1848 return;
1850 /* we only handle route discovery PDUs, not XID/TEST/other */
1851 if (snaphdr->control != CNTL_LLC_UI)
1852 return;
1854 switch (pdu->rde_ptype) {
1855 case RDE_RQC: /* Route Query Command; Events 8 - 11 */
1856 gld_send_rqr(macinfo, pktinfo->shost, rh, pdu, pdulen);
1857 /* FALLTHROUGH */
1858 case RDE_RQR: /* Route Query Response; Event 12 */
1859 case RDE_RS: /* Route Selected; Event 7 */
1860 gld_rde_pdu_ind(macinfo, rh, pdu, pdulen);
1861 break;
1862 default: /* ignore if unrecognized ptype */
1863 return;
1866 return;
1869 /* Consider routes seen in other IA SRF packets */
1871 if (rh == NULL)
1872 return; /* no RIF; Event 3 */
1874 if ((rh->rt & 0x04) != 0)
1875 return; /* not SRF; Event 5 */
1877 gld_rif_ind(macinfo, rh, pktinfo->shost, snaphdr->s_lsap,
1878 snaphdr->d_lsap); /* Event 4 */
1882 * Send RQR: 802.2 9.6.3.4.2(9) RCC Receive Events 8-11
1884 * The routing processing really doesn't belong here; it should be handled in
1885 * the LLC layer above. If that were the case then RDE could just send down
1886 * an extra MA_UNITDATA_REQ with the info needed to construct the packet. But
1887 * at the time we get control here, it's not a particularly good time to be
1888 * constructing packets and trying to send them. Specifically, at this layer
1889 * we need to construct the full media packet, which means the below routine
1890 * knows that it is dealing with Token Ring media. If this were instead done
1891 * via a proper MA_UNITDATA interface, the RDE stuff could all be completely
1892 * media independent. But since TR is the only source routing medium we
1893 * support, this works even though it is not clean.
1895 * We "know" that the only time we can get here is from the "interpret"
1896 * routine, and only when it was called at receive time.
1898 static void
1899 gld_send_rqr(gld_mac_info_t *macinfo, uchar_t *shost, struct gld_ri *rh,
1900 struct rde_pdu *pdu, int pdulen)
1902 mblk_t *nmp;
1903 int nlen;
1904 struct tr_mac_frm_nori *nmh;
1905 struct gld_ri *nrh;
1906 struct llc_snap_hdr *nsnaphdr;
1907 struct rde_pdu *npdu;
1909 /* We know and assume we're on the receive path */
1910 ASSERT(GLDM_LOCK_HELD(macinfo));
1912 if (pdulen < sizeof (struct rde_pdu))
1913 return; /* Bad incoming PDU */
1915 nlen = sizeof (struct tr_mac_frm) + LLC_HDR1_LEN +
1916 sizeof (struct rde_pdu);
1918 if ((nmp = allocb(nlen, BPRI_MED)) == NULL)
1919 return;
1921 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
1923 nmp->b_rptr -= sizeof (struct rde_pdu);
1924 npdu = (struct rde_pdu *)(nmp->b_rptr);
1925 *npdu = *pdu; /* copy orig/target macaddr/saps */
1926 npdu->rde_ver = 1;
1927 npdu->rde_ptype = RDE_RQR;
1928 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
1929 npdu->rde_target_mac, macinfo->gldm_addrlen);
1931 nmp->b_rptr -= LLC_HDR1_LEN;
1932 nsnaphdr = (struct llc_snap_hdr *)(nmp->b_rptr);
1933 nsnaphdr->s_lsap = nsnaphdr->d_lsap = LSAP_RDE;
1934 nsnaphdr->control = CNTL_LLC_UI;
1936 if (rh == NULL || (rh->rt & 0x06) == 0x06 ||
1937 rh->len > sizeof (struct gld_ri)) {
1938 /* no RIF (Event 8), or RIF type STE (Event 9): send ARE RQR */
1939 nmp->b_rptr -= 2;
1940 nrh = (struct gld_ri *)(nmp->b_rptr);
1941 nrh->len = 2;
1942 nrh->rt = RT_ARE;
1943 nrh->dir = 0;
1944 nrh->res = 0;
1945 nrh->mtu = RT_MTU_MAX;
1946 } else {
1948 * RIF must be ARE (Event 10) or SRF (Event 11):
1949 * send SRF (reverse) RQR
1951 ASSERT(rh->len <= sizeof (struct gld_ri));
1952 nmp->b_rptr -= rh->len;
1953 nrh = (struct gld_ri *)(nmp->b_rptr);
1954 bcopy(rh, nrh, rh->len); /* copy incoming RIF */
1955 nrh->rt = RT_SRF; /* make it SRF */
1956 nrh->dir ^= 1; /* reverse direction */
1959 nmp->b_rptr -= sizeof (struct tr_mac_frm_nori);
1960 nmh = (struct tr_mac_frm_nori *)(nmp->b_rptr);
1961 nmh->tr_ac = 0x10;
1962 nmh->tr_fc = 0x40;
1963 mac_copy(shost, nmh->tr_dhost, macinfo->gldm_addrlen);
1964 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
1965 nmh->tr_shost, macinfo->gldm_addrlen);
1966 nmh->tr_shost[0] |= 0x80; /* indicate RIF present */
1969 * Packet assembled; send it.
1971 * As noted before, this is not really a good time to be trying to
1972 * send out packets. We have no obvious queue to use if the packet
1973 * can't be sent right away. We pick one arbitrarily.
1976 gld_vlan_t *vlan;
1977 queue_t *q;
1979 if ((vlan = gld_find_vlan(macinfo, VLAN_VID_NONE)) == NULL) {
1980 /* oops, no vlan on the list for this macinfo! */
1981 /* this should not happen */
1982 freeb(nmp);
1983 return;
1985 q = vlan->gldv_str_next->gld_qptr;
1988 * Queue the packet and let gld_wsrv
1989 * handle it, thus preventing a panic
1990 * caused by v2 TR in promiscuous mode
1991 * where it attempts to get the mutex
1992 * in this thread while already holding
1993 * it.
1995 (void) putbq(WR(q), nmp);
1996 qenable(WR(q));
2001 * This routine implements a modified subset of the 802.2 RDE RCC send actions:
2002 * we implement RCC send events 5 to 10 (ISO 8802-2:1994 9.6.3.5);
2003 * we omit special handling for the NULL SAP;
2004 * events 11 to 12 are handled by gld_rde_pdu_req below;
2005 * we require an immediate response to our GET_ROUTE_REQUEST.
2007 static void
2008 gld_rcc_send(gld_mac_info_t *macinfo, queue_t *q, uchar_t *dhost,
2009 struct gld_ri **rhp, uchar_t *llcpkt)
2011 struct llc_snap_hdr *snaphdr = (struct llc_snap_hdr *)(llcpkt);
2014 * Our caller has to take the mutex because: to avoid an extra bcopy
2015 * of the RIF on every transmit, we pass back a pointer to our sr
2016 * table entry via rhp. The caller has to keep the mutex until it has a
2017 * chance to copy the RIF out into the outgoing packet, so that we
2018 * don't modify the entry while it's being copied. This is a
2019 * little ugly, but saves the extra bcopy.
2021 ASSERT(mutex_owned(GLD_SR_MUTEX(macinfo)));
2023 *rhp = (struct gld_ri *)NULL; /* start off clean (no RIF) */
2025 if (!((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_enabled) {
2026 /* RDE is disabled -- use NULL or STE always */
2027 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->
2028 rde_str_indicator_ste)
2029 *rhp = &ri_ste_def; /* STE option */
2030 return;
2033 if (!(dhost[0] & 0x80)) {
2034 /* individual address; Events 7 - 10 */
2035 if ((snaphdr->control & 0xef) == 0xe3) {
2036 /* TEST command, reset the route */
2037 gld_reset_route(macinfo, q,
2038 dhost, snaphdr->d_lsap, snaphdr->s_lsap);
2040 gld_get_route(macinfo, q,
2041 dhost, rhp, snaphdr->d_lsap, snaphdr->s_lsap);
2044 if (*rhp == NULL) {
2046 * group address (Events 5 - 6),
2047 * or no route available (Events 8 - 9):
2048 * Need to send NSR or STE, as configured.
2050 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->
2051 rde_str_indicator_ste)
2052 *rhp = &ri_ste_def; /* STE option */
2057 * RCC send events 11 - 12
2059 * At present we only handle the RQC ptype.
2061 * We "know" that the only time we can get here is from the "unitdata"
2062 * routine, called at wsrv time.
2064 * If we ever implement the RS ptype (Event 13), this may no longer be true!
2066 static void
2067 gld_rde_pdu_req(gld_mac_info_t *macinfo, queue_t *q, uchar_t *dhost,
2068 struct gld_ri *rh, uchar_t dsap, uchar_t ssap, uchar_t ptype)
2070 mblk_t *nmp;
2071 int nlen;
2072 struct tr_mac_frm_nori *nmh;
2073 struct gld_ri *nrh;
2074 struct llc_snap_hdr *nsnaphdr;
2075 struct rde_pdu *npdu;
2076 int srpresent = 0;
2078 /* if you change this to process other types, review all code below */
2079 ASSERT(ptype == RDE_RQC);
2080 ASSERT(rh == NULL); /* RQC never uses SRF */
2082 nlen = sizeof (struct tr_mac_frm) + LLC_HDR1_LEN +
2083 sizeof (struct rde_pdu);
2085 if ((nmp = allocb(nlen, BPRI_MED)) == NULL)
2086 return;
2088 nmp->b_rptr = nmp->b_wptr = DB_LIM(nmp);
2090 nmp->b_rptr -= sizeof (struct rde_pdu);
2091 npdu = (struct rde_pdu *)(nmp->b_rptr);
2092 npdu->rde_ver = 1;
2093 npdu->rde_ptype = ptype;
2094 mac_copy(dhost, &npdu->rde_target_mac, 6);
2097 * access the mac address without a mutex - take a risk -
2098 * to prevent mutex contention (BUG 4211361)
2100 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
2101 &npdu->rde_orig_mac, 6);
2102 npdu->rde_target_sap = dsap;
2103 npdu->rde_orig_sap = ssap;
2105 nmp->b_rptr -= LLC_HDR1_LEN;
2106 nsnaphdr = (struct llc_snap_hdr *)(nmp->b_rptr);
2107 nsnaphdr->s_lsap = nsnaphdr->d_lsap = LSAP_RDE;
2108 nsnaphdr->control = CNTL_LLC_UI;
2110 #if 0 /* we don't need this for now */
2111 if (rh != NULL) {
2112 /* send an SRF frame with specified RIF */
2113 ASSERT(rh->len <= sizeof (struct gld_ri));
2114 nmp->b_rptr -= rh->len;
2115 nrh = (struct gld_ri *)(nmp->b_rptr);
2116 bcopy(rh, nrh, rh->len);
2117 ASSERT(nrh->rt == RT_SRF);
2118 srpresent = 1;
2119 } else
2120 #endif
2122 /* Need to send NSR or STE, as configured. */
2123 if (((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_str_indicator_ste) {
2124 /* send an STE frame */
2125 nmp->b_rptr -= 2;
2126 nrh = (struct gld_ri *)(nmp->b_rptr);
2127 nrh->len = 2;
2128 nrh->rt = RT_STE;
2129 nrh->dir = 0;
2130 nrh->res = 0;
2131 nrh->mtu = RT_MTU_MAX;
2132 srpresent = 1;
2133 } /* else send an NSR frame */
2135 nmp->b_rptr -= sizeof (struct tr_mac_frm_nori);
2136 nmh = (struct tr_mac_frm_nori *)(nmp->b_rptr);
2137 nmh->tr_ac = 0x10;
2138 nmh->tr_fc = 0x40;
2139 mac_copy(dhost, nmh->tr_dhost, macinfo->gldm_addrlen);
2141 * access the mac address without a mutex - take a risk -
2142 * to prevent mutex contention - BUG 4211361
2144 mac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
2145 nmh->tr_shost, macinfo->gldm_addrlen);
2147 if (srpresent)
2148 nmh->tr_shost[0] |= 0x80;
2149 else
2150 nmh->tr_shost[0] &= ~0x80;
2153 * Packet assembled; send it.
2155 * Since we own the SR_MUTEX, we don't want to take the maclock
2156 * mutex (since they are acquired in the opposite order on the
2157 * receive path, so deadlock could occur). We could rearrange
2158 * the code in gld_get_route() and drop the SR_MUTEX around the
2159 * call to gld_rde_pdu_req(), but that's kind of ugly. Rather,
2160 * we just refrain from calling gld_start() from here, and
2161 * instead just queue the packet for wsrv to send next. Besides,
2162 * it's more important to get the packet we're working on out
2163 * quickly than this RQC.
2165 (void) putbq(WR(q), nmp);
2166 qenable(WR(q));
2170 * Route Determination Component (RDC)
2172 * We do not implement separate routes for each SAP, as specified by
2173 * ISO 8802-2; instead we implement only one route per remote mac address.
2175 static void
2176 gld_get_route(gld_mac_info_t *macinfo, queue_t *q, uchar_t *dhost,
2177 struct gld_ri **rhp, uchar_t dsap, uchar_t ssap)
2179 struct srtab *sr;
2180 clock_t t = ddi_get_lbolt();
2182 ASSERT(mutex_owned(GLD_SR_MUTEX(macinfo)));
2184 sr = gld_sr_lookup_entry(macinfo, dhost);
2186 if (sr == NULL) {
2188 * we have no entry -- never heard of this address:
2189 * create an empty entry and initiate RQC
2191 sr = gld_sr_create_entry(macinfo, dhost);
2192 gld_rde_pdu_req(macinfo, q, dhost, (struct gld_ri *)NULL,
2193 dsap, ssap, RDE_RQC);
2194 if (sr)
2195 sr->sr_timer = t;
2196 *rhp = NULL; /* we have no route yet */
2197 return;
2200 /* we have an entry; see if we know a route yet */
2202 if (sr->sr_ri.len == 0) {
2203 /* Have asked RQC, but no reply (yet) */
2204 if (t - sr->sr_timer >
2205 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_timeout) {
2206 /* RQR overdue, resend RQC */
2207 gld_rde_pdu_req(macinfo, q, dhost,
2208 (struct gld_ri *)NULL, dsap, ssap, RDE_RQC);
2209 sr->sr_timer = t;
2211 *rhp = NULL; /* we have no route yet */
2212 return;
2215 /* we know a route, or it's local */
2217 /* if it might be stale, reset and get a new one */
2218 if (t - sr->sr_timer >
2219 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->rde_timeout) {
2220 gld_rde_pdu_req(macinfo, q, dhost,
2221 (struct gld_ri *)NULL, dsap, ssap, RDE_RQC);
2222 sr->sr_ri.len = 0;
2223 sr->sr_timer = t;
2224 *rhp = NULL; /* we have no route */
2225 return;
2228 if (sr->sr_ri.len == 2) {
2229 /* the remote site is on our local ring -- no route needed */
2230 *rhp = NULL;
2231 return;
2234 *rhp = &sr->sr_ri; /* we have a route, return it */
2238 * zap the specified entry and reinitiate RQC
2240 static void
2241 gld_reset_route(gld_mac_info_t *macinfo, queue_t *q,
2242 uchar_t *dhost, uchar_t dsap, uchar_t ssap)
2244 struct srtab *sr;
2246 ASSERT(mutex_owned(GLD_SR_MUTEX(macinfo)));
2248 sr = gld_sr_create_entry(macinfo, dhost);
2249 gld_rde_pdu_req(macinfo, q, dhost, (struct gld_ri *)NULL,
2250 dsap, ssap, RDE_RQC);
2251 if (sr == NULL)
2252 return;
2254 sr->sr_ri.len = 0;
2255 sr->sr_timer = ddi_get_lbolt();
2259 * This routine is called when an RDE PDU is received from our peer.
2260 * If it is an RS (Route Selected) PDU, we adopt the specified route.
2261 * If it is an RQR (reply to our previous RQC), we evaluate the
2262 * specified route in comparison with our current known route, if any,
2263 * and we keep the "better" of the two routes.
2265 static void
2266 gld_rde_pdu_ind(gld_mac_info_t *macinfo, struct gld_ri *rh, struct rde_pdu *pdu,
2267 int pdulen)
2269 struct srtab *sr;
2270 uchar_t *otherhost;
2272 if (pdulen < sizeof (struct rde_pdu))
2273 return; /* Bad incoming PDU */
2275 if (pdu->rde_ptype == RDE_RQC)
2276 return; /* ignore RQC */
2278 if (pdu->rde_ptype != RDE_RQR && pdu->rde_ptype != RDE_RS) {
2279 #ifdef GLD_DEBUG
2280 if (gld_debug & GLDERRS)
2281 cmn_err(CE_WARN, "gld: bogus RDE ptype 0x%x received",
2282 pdu->rde_ptype);
2283 #endif
2284 return;
2287 if (rh == NULL) {
2288 #ifdef GLD_DEBUG
2289 if (gld_debug & GLDERRS)
2290 cmn_err(CE_WARN,
2291 "gld: bogus NULL RIF, ptype 0x%x received",
2292 pdu->rde_ptype);
2293 #endif
2294 return;
2297 ASSERT(rh->len >= 2);
2298 ASSERT(rh->len <= sizeof (struct gld_ri));
2299 ASSERT((rh->len & 1) == 0);
2301 if (pdu->rde_ptype == RDE_RQR) {
2302 /* A reply to our RQC has its address as target mac */
2303 otherhost = pdu->rde_target_mac;
2304 } else {
2305 ASSERT(pdu->rde_ptype == RDE_RS);
2306 /* An RS has its address as orig mac */
2307 otherhost = pdu->rde_orig_mac;
2310 mutex_enter(GLD_SR_MUTEX(macinfo));
2312 if ((sr = gld_sr_create_entry(macinfo, otherhost)) == NULL) {
2313 mutex_exit(GLD_SR_MUTEX(macinfo));
2314 return; /* oh well, out of memory */
2317 if (pdu->rde_ptype == RDE_RQR) {
2318 /* see if new route is better than what we may already have */
2319 if (sr->sr_ri.len != 0 &&
2320 sr->sr_ri.len <= rh->len) {
2321 mutex_exit(GLD_SR_MUTEX(macinfo));
2322 return; /* we have one, and new one is no shorter */
2326 /* adopt the new route */
2327 bcopy((caddr_t)rh, (caddr_t)&sr->sr_ri, rh->len); /* copy incom RIF */
2328 sr->sr_ri.rt = RT_SRF; /* make it a clean SRF */
2329 sr->sr_ri.dir ^= 1; /* reverse direction */
2330 sr->sr_timer = ddi_get_lbolt();
2332 mutex_exit(GLD_SR_MUTEX(macinfo));
2336 * This routine is called when a packet with a RIF is received. Our
2337 * policy is to adopt the route.
2339 /* ARGSUSED3 */
2340 static void
2341 gld_rif_ind(gld_mac_info_t *macinfo, struct gld_ri *rh, uchar_t *shost,
2342 uchar_t ssap, uchar_t dsap)
2344 struct srtab *sr;
2346 ASSERT(rh != NULL); /* ensure RIF */
2347 ASSERT((rh->rt & 0x04) == 0); /* ensure SRF */
2348 ASSERT(rh->len >= 2);
2349 ASSERT(rh->len <= sizeof (struct gld_ri));
2350 ASSERT((rh->len & 1) == 0);
2352 mutex_enter(GLD_SR_MUTEX(macinfo));
2354 if ((sr = gld_sr_create_entry(macinfo, shost)) == NULL) {
2355 mutex_exit(GLD_SR_MUTEX(macinfo));
2356 return; /* oh well, out of memory */
2359 /* we have an entry; fill it in */
2360 bcopy((caddr_t)rh, (caddr_t)&sr->sr_ri, rh->len); /* copy incom RIF */
2361 sr->sr_ri.rt = RT_SRF; /* make it a clean SRF */
2362 sr->sr_ri.dir ^= 1; /* reverse direction */
2363 sr->sr_timer = ddi_get_lbolt();
2365 mutex_exit(GLD_SR_MUTEX(macinfo));
2368 static struct srtab **
2369 gld_sr_hash(struct srtab **sr_hash_tbl, uchar_t *addr, int addr_length)
2371 uint_t hashval = 0;
2373 while (--addr_length >= 0)
2374 hashval ^= *addr++;
2376 return (&sr_hash_tbl[hashval % SR_HASH_SIZE]);
2379 static struct srtab *
2380 gld_sr_lookup_entry(gld_mac_info_t *macinfo, uchar_t *macaddr)
2382 struct srtab *sr;
2384 ASSERT(mutex_owned(GLD_SR_MUTEX(macinfo)));
2386 for (sr = *gld_sr_hash(GLD_SR_HASH(macinfo), macaddr,
2387 macinfo->gldm_addrlen); sr; sr = sr->sr_next)
2388 if (mac_eq(macaddr, sr->sr_mac, macinfo->gldm_addrlen))
2389 return (sr);
2391 return ((struct srtab *)0);
2394 static struct srtab *
2395 gld_sr_create_entry(gld_mac_info_t *macinfo, uchar_t *macaddr)
2397 struct srtab *sr;
2398 struct srtab **srp;
2400 ASSERT(!(macaddr[0] & 0x80)); /* no group addresses here */
2401 ASSERT(mutex_owned(GLD_SR_MUTEX(macinfo)));
2403 srp = gld_sr_hash(GLD_SR_HASH(macinfo), macaddr, macinfo->gldm_addrlen);
2405 for (sr = *srp; sr; sr = sr->sr_next)
2406 if (mac_eq(macaddr, sr->sr_mac, macinfo->gldm_addrlen))
2407 return (sr);
2409 if (!(sr = kmem_zalloc(sizeof (struct srtab), KM_NOSLEEP))) {
2410 #ifdef GLD_DEBUG
2411 if (gld_debug & GLDERRS)
2412 cmn_err(CE_WARN,
2413 "gld: gld_sr_create_entry kmem_alloc failed");
2414 #endif
2415 return ((struct srtab *)0);
2418 bcopy((caddr_t)macaddr, (caddr_t)sr->sr_mac, macinfo->gldm_addrlen);
2420 sr->sr_next = *srp;
2421 *srp = sr;
2422 return (sr);
2425 static void
2426 gld_sr_clear(gld_mac_info_t *macinfo)
2428 int i;
2429 struct srtab **sr_hash_tbl = GLD_SR_HASH(macinfo);
2430 struct srtab **srp, *sr;
2433 * Walk through the table, deleting all entries.
2435 * Only called from uninit, so don't need the mutex.
2437 for (i = 0; i < SR_HASH_SIZE; i++) {
2438 for (srp = &sr_hash_tbl[i]; (sr = *srp) != NULL; ) {
2439 *srp = sr->sr_next;
2440 kmem_free((char *)sr, sizeof (struct srtab));
2445 #ifdef DEBUG
2446 void
2447 gld_sr_dump(gld_mac_info_t *macinfo)
2449 int i, j;
2450 struct srtab **sr_hash_tbl;
2451 struct srtab *sr;
2453 sr_hash_tbl = GLD_SR_HASH(macinfo);
2454 if (sr_hash_tbl == NULL)
2455 return;
2457 mutex_enter(GLD_SR_MUTEX(macinfo));
2460 * Walk through the table, printing all entries
2462 cmn_err(CE_NOTE, "GLD Source Routing Table (0x%p):", (void *)macinfo);
2463 cmn_err(CE_CONT, "Addr len,rt,dir,mtu,res rng,brg0 rng,brg1...\n");
2464 for (i = 0; i < SR_HASH_SIZE; i++) {
2465 for (sr = sr_hash_tbl[i]; sr; sr = sr->sr_next) {
2466 cmn_err(CE_CONT,
2467 "%x:%x:%x:%x:%x:%x %d,%x,%x,%x,%x ",
2468 sr->sr_mac[0], sr->sr_mac[1], sr->sr_mac[2],
2469 sr->sr_mac[3], sr->sr_mac[4], sr->sr_mac[5],
2470 sr->sr_ri.len, sr->sr_ri.rt, sr->sr_ri.dir,
2471 sr->sr_ri.mtu, sr->sr_ri.res);
2472 if (sr->sr_ri.len)
2473 for (j = 0; j < (sr->sr_ri.len - 2) / 2; j++)
2474 cmn_err(CE_CONT, "%x ",
2475 REF_NET_USHORT(*(unsigned short *)
2476 &sr->sr_ri.rd[j]));
2477 cmn_err(CE_CONT, "\n");
2481 mutex_exit(GLD_SR_MUTEX(macinfo));
2483 #endif