3 #include <netinet/in_systm.h>
4 #include <netinet/ip.h>
5 #include <netinet/ip_icmp.h>
6 #include <netinet/udp.h>
10 struct rx_packet
*rx_mallocedP
= 0;
11 struct rx_cbuf
*rx_mallocedC
= 0;
13 /* string to send to rxdebug */
14 #define CML_VERSION_NUMBER_SIZE 65
15 static char cml_version_number
[CML_VERSION_NUMBER_SIZE
]= PACKAGE
"-" VERSION
;
17 extern int (*rx_almostSent
) ();
21 put32 (unsigned char **p
, uint32_t u
)
23 (*p
)[0] = (u
>> 24) & 0xFF;
24 (*p
)[1] = (u
>> 16) & 0xFF;
25 (*p
)[2] = (u
>> 8) & 0xFF;
26 (*p
)[3] = (u
>> 0) & 0xFF;
31 get32 (unsigned char **p
)
35 u
= ((*p
)[0] << 24) | ((*p
)[1] << 16) | ((*p
)[2] << 8) | (*p
)[3];
41 rxi_ReadIcmp(osi_socket asocket
)
43 unsigned char buffer
[2000];
49 length
= recv(asocket
, buffer
, sizeof(buffer
), 0);
50 if (length
< 0 || length
> sizeof(buffer
))
55 if (length
< (5 * 4) + 8 + (5 * 4) + 8)
57 if (*p
!= 0x45) /* header comes with options, punt */
61 #define ICMP_UNREACH 3
63 #ifndef ICMP_UNREACH_NEEDFRAG
64 #define ICMP_UNREACH_NEEDFRAG 4
66 if (p
[5 * 4] != ICMP_UNREACH
)
68 if (p
[6 * 4] == ICMP_UNREACH_NEEDFRAG
) /* ignore need frag for now */
70 if (p
[(5 * 4) + 8] != 0x45) /* header comes with options, punt */
73 p
= &p
[(5 * 4) + 8 + (4 * 4)];
75 port
= (p
[2] << 8) | p
[3];
77 rxi_KillConnection(h
, port
);
81 * some rules about packets:
82 * 1. When a packet is allocated, the final iov_buf contains room for
83 * a security trailer, but iov_len masks that fact. If the security
84 * package wants to add the trailer, it may do so, and then extend
85 * iov_len appropriately. For this reason, packet's niovecs and
86 * iov_len fields should be accurate before calling PreparePacket.
91 * all packet buffers (iov_base) are integral multiples of
93 * offset is an integral multiple of the word size.
97 rx_SlowGetLong(struct rx_packet
*packet
, int offset
)
101 for (l
= 0, i
= 1; i
< packet
->niovecs
; i
++) {
102 if (l
+ packet
->wirevec
[i
].iov_len
> offset
) {
103 return *((uint32_t *)
104 ((char *)packet
->wirevec
[i
].iov_base
+ (offset
- l
)));
106 l
+= packet
->wirevec
[i
].iov_len
;
113 * all packet buffers (iov_base) are integral multiples of the word
115 * offset is an integral multiple of the word size.
118 rx_SlowPutLong(struct rx_packet
*packet
, int offset
, uint32_t data
)
122 for (l
= 0, i
= 1; i
< packet
->niovecs
; i
++) {
123 if (l
+ packet
->wirevec
[i
].iov_len
> offset
) {
124 *((uint32_t *) ((char *)packet
->wirevec
[i
].iov_base
+ (offset
- l
))) = data
;
127 l
+= packet
->wirevec
[i
].iov_len
;
134 * all packet buffers (iov_base) are integral multiples of the
136 * offset is an integral multiple of the word size.
138 * all buffers are contiguously arrayed in the iovec from 0..niovecs-1
141 rx_SlowReadPacket(struct rx_packet
*packet
, int offset
, int resid
, void *out
)
144 unsigned char *p
= out
;
147 for(i
= 1; (i
< packet
->niovecs
) && (offset
+ (ssize_t
)resid
> 0); i
++) {
148 if(offset
< packet
->wirevec
[i
].iov_len
) {
149 /* at this point the intersection of this iovec and
150 [offset, offset+resid) is non-empty, so we can copy
151 min(base + len, base + offset + resid) -
152 max(base, base + offset) bytes
154 bytes
= min(packet
->wirevec
[i
].iov_len
, offset
+ resid
) -
157 (char *)packet
->wirevec
[i
].iov_base
+ max(offset
, 0),
161 offset
-= packet
->wirevec
[i
].iov_len
;
163 return p
- (unsigned char *)out
;
169 * all packet buffers (iov_base) are integral multiples of the
171 * offset is an integral multiple of the word size.
174 rx_SlowWritePacket(struct rx_packet
*packet
, int offset
, int resid
, void *in
)
177 unsigned char *p
= in
;
180 for(i
= 1; i
< RX_MAXWVECS
&& offset
+ resid
> 0; i
++) {
181 if(i
>= packet
->niovecs
)
182 if(rxi_AllocDataBuf(packet
, resid
))
184 if(offset
< packet
->wirevec
[i
].iov_len
) {
185 /* at this point the intersection of this iovec and
186 [offset, offset+resid) is non-empty, so we can copy
187 min(base + len, base + offset + resid) -
188 max(base, base + offset) bytes
190 bytes
= min(packet
->wirevec
[i
].iov_len
, offset
+ resid
) -
192 memcpy((char *)(packet
->wirevec
[i
].iov_base
) + max(offset
, 0),
196 offset
-= packet
->wirevec
[i
].iov_len
;
198 return p
- (unsigned char *)in
;
203 freeCBuf(struct rx_cbuf
*c
)
207 dpf(("Free cbuf %x\n", c
));
210 MObtainWriteLock(&rx_freePktQ_lock
);
212 queue_Append(&rx_freeCbufQueue
, c
);
215 MReleaseWriteLock(&rx_freePktQ_lock
);
221 static struct rx_cbuf
*
229 MObtainWriteLock(&rx_freePktQ_lock
);
231 if (queue_IsEmpty(&rx_freeCbufQueue
)) {
234 rxi_NeedMoreCbufs
= TRUE
;
237 rxi_MoreCbufs(rx_Window
);
241 c
= queue_First(&rx_freeCbufQueue
, rx_cbuf
);
243 dpf(("Alloc cb %x\n", c
));
250 MReleaseWriteLock(&rx_freePktQ_lock
);
256 /* Allocate more CBufs iff we need them */
258 * In kernel, can't page in memory with interrupts disabled, so we
259 * don't use the event mechanism.
262 rx_CheckCbufs(unsigned long when
)
263 /* time when I should be called next */
269 if (rxi_NeedMoreCbufs
) {
270 rxi_MoreCbufs(rx_Window
);
273 now
.sec
+= RX_CBUF_TIME
;
274 rxevent_Post(&now
, rx_CheckCbufs
, (void *)now
.sec
, NULL
);
279 * this one is kind of awful.
280 * In rxkad, the packet has been all shortened, and everything, ready for
281 * sending. All of a sudden, we discover we need some of that space back.
282 * This isn't terribly general, because it knows that the packets are only
283 * rounded up to the EBS (userdata + security header).
286 rxi_RoundUpPacket(struct rx_packet
*p
, unsigned int nb
)
291 if (p
->wirevec
[i
].iov_base
== (caddr_t
) p
->localdata
) {
292 if (p
->wirevec
[i
].iov_len
<= RX_FIRSTBUFFERSIZE
- nb
) {
293 p
->wirevec
[i
].iov_len
+= nb
;
297 if (p
->wirevec
[i
].iov_len
<= RX_CBUFFERSIZE
- nb
) {
298 p
->wirevec
[i
].iov_len
+= nb
;
306 /* get sufficient space to store nb bytes of data (or more), and hook
307 * it into the supplied packet. Return nbytes<=0 if successful, otherwise
308 * returns the number of bytes >0 which it failed to come up with.
309 * Don't need to worry about locking on packet, since only
310 * one thread can manipulate one at a time. Locking on cbufs is handled
312 /* MTUXXX don't need to go throught the for loop if we can trust niovecs */
314 rxi_AllocDataBuf(struct rx_packet
*p
, int nb
)
318 for (i
= 0; nb
> 0 && i
< RX_MAXWVECS
; i
++) {
319 if (p
->wirevec
[i
].iov_base
)
324 p
->wirevec
[i
].iov_len
= RX_FIRSTBUFFERSIZE
;
325 p
->wirevec
[i
].iov_base
= (caddr_t
) p
->localdata
;
326 nb
-= RX_FIRSTBUFFERSIZE
;
327 p
->length
+= RX_FIRSTBUFFERSIZE
;
333 if ((cb
= allocCBuf()) != NULL
) {
334 p
->wirevec
[i
].iov_base
= (caddr_t
) cb
->data
;
335 p
->wirevec
[i
].iov_len
= RX_CBUFFERSIZE
;
336 nb
-= RX_CBUFFERSIZE
;
337 p
->length
+= RX_CBUFFERSIZE
;
350 rxi_FreeDataBufs(struct rx_packet
*p
, int first
)
354 if (first
!= 1) /* MTUXXX */
355 osi_Panic("FreeDataBufs 1: first must be 1");
357 for (i
= first
; i
< RX_MAXWVECS
; i
++) {
358 if (p
->wirevec
[i
].iov_base
) {
359 if (p
->wirevec
[i
].iov_base
!= (caddr_t
) p
->localdata
) {
360 freeCBuf((struct rx_cbuf
*)((char *)p
->wirevec
[i
].iov_base
-
361 sizeof(struct rx_queue
)));
363 p
->wirevec
[i
].iov_base
= NULL
;
364 } else if (i
== 1) /* MTUXXX */
365 osi_Panic("FreeDataBufs 4: vec 1 must not be NULL");
367 p
->wirevec
[i
].iov_len
= 0;
375 * add n more fragment buffers (continuation buffers)
376 * Must be called at user priority or will crash RS/6000s
381 struct rx_cbuf
*c
, *e
;
389 getme
= n
* sizeof(struct rx_cbuf
);
390 c
= rx_mallocedC
= (struct rx_cbuf
*) osi_Alloc(getme
);
396 PIN(c
, getme
); /* XXXXX */
398 MObtainWriteLock(&rx_freePktQ_lock
);
400 for (e
= c
+ n
; c
< e
; c
++) {
401 queue_Append(&rx_freeCbufQueue
, c
);
403 rxi_NeedMoreCbufs
= FALSE
;
407 MReleaseWriteLock(&rx_freePktQ_lock
);
413 /* Add more packet buffers */
415 rxi_MorePackets(int apackets
)
417 struct rx_packet
*p
, *e
;
422 getme
= apackets
* sizeof(struct rx_packet
);
423 p
= rx_mallocedP
= (struct rx_packet
*) osi_Alloc(getme
);
425 PIN(p
, getme
); /* XXXXX */
426 memset((char *) p
, 0, getme
);
428 MObtainWriteLock(&rx_freePktQ_lock
);
430 for (e
= p
+ apackets
; p
< e
; p
++) {
431 p
->wirevec
[0].iov_base
= (char *) (p
->wirehead
);
432 p
->wirevec
[0].iov_len
= RX_HEADER_SIZE
;
433 p
->wirevec
[1].iov_base
= (char *) (p
->localdata
);
434 p
->wirevec
[1].iov_len
= RX_FIRSTBUFFERSIZE
;
437 queue_Append(&rx_freePacketQueue
, p
);
439 rx_nFreePackets
+= apackets
;
441 MReleaseWriteLock(&rx_freePktQ_lock
);
445 * allocate enough cbufs that 1/4 of the packets will be able to hold
446 * maximal amounts of data
448 /* MTUXXX enable this -- currently disabled for testing
449 rxi_MoreCbufs((apackets/4)*(rx_maxReceiveSize - RX_FIRSTBUFFERSIZE)/RX_CBUFFERSIZE);
455 rxi_FreeAllPackets(void)
457 /* must be called at proper interrupt level, etcetera */
458 /* MTUXXX need to free all Cbufs */
459 osi_Free(rx_mallocedP
, (rx_Window
+ 2) * sizeof(struct rx_packet
));
460 UNPIN(rx_mallocedP
, (rx_Window
+ 2) * sizeof(struct rx_packet
));
467 * In the packet freeing routine below, the assumption is that
468 * we want all of the packets to be used equally frequently, so that we
469 * don't get packet buffers paging out. It would be just as valid to
470 * assume that we DO want them to page out if not many are being used.
471 * In any event, we assume the former, and append the packets to the end
475 * This explanation is bogus. The free list doesn't remain in any kind of
476 * useful order for long: the packets in use get pretty much randomly scattered
477 * across all the pages. In order to permit unused {packets,bufs} to page
478 * out, they must be stored so that packets which are adjacent in memory are
479 * adjacent in the free list. An array springs rapidly to mind.
483 * Free the packet p. P is assumed not to be on any queue, i.e.
484 * remove it yourself first if you call this routine.
487 rxi_FreePacket(struct rx_packet
*p
)
490 dpf(("Free %x\n", p
));
492 rxi_FreeDataBufs(p
, 1); /* this gets the locks below, so must
497 MObtainWriteLock(&rx_freePktQ_lock
);
500 queue_Append(&rx_freePacketQueue
, p
);
501 /* Wakeup anyone waiting for packets */
504 MReleaseWriteLock(&rx_freePktQ_lock
);
511 * rxi_AllocPacket sets up p->length so it reflects the number of
512 * bytes in the packet at this point, **not including** the header.
513 * The header is absolutely necessary, besides, this is the way the
514 * length field is usually used
517 rxi_AllocPacket(int class)
521 if (rxi_OverQuota(class)) {
522 rx_stats
.noPackets
[class]++;
525 rx_stats
.packetRequests
++;
527 MObtainWriteLock(&rx_freePktQ_lock
);
529 if (queue_IsEmpty(&rx_freePacketQueue
))
530 osi_Panic("rxi_AllocPacket error");
532 p
= queue_First(&rx_freePacketQueue
, rx_packet
);
534 dpf(("Alloc %x, class %d\n",
535 queue_First(&rx_freePacketQueue
, rx_packet
), class));
538 MReleaseWriteLock(&rx_freePktQ_lock
);
541 * have to do this here because rx_FlushWrite fiddles with the iovs in
542 * order to truncate outbound packets. In the near future, may need to
543 * allocate bufs from a static pool here, and/or in AllocSendPacket
545 p
->wirevec
[0].iov_base
= (char *) (p
->wirehead
);
546 p
->wirevec
[0].iov_len
= RX_HEADER_SIZE
;
547 p
->wirevec
[1].iov_base
= (char *) (p
->localdata
);
548 p
->wirevec
[1].iov_len
= RX_FIRSTBUFFERSIZE
;
550 p
->length
= RX_FIRSTBUFFERSIZE
;
556 * This guy comes up with as many buffers as it {takes,can get} given
557 * the MTU for this call. It also sets the packet length before
558 * returning. caution: this is often called at NETPRI
561 rxi_AllocSendPacket(struct rx_call
*call
, int want
)
563 struct rx_packet
*p
= (struct rx_packet
*) 0;
567 mud
= call
->conn
->maxPacketSize
- RX_HEADER_SIZE
;
569 while (!(call
->error
)) {
570 /* if an error occurred, or we get the packet we want, we're done */
571 if ((p
= rxi_AllocPacket(RX_PACKET_CLASS_SEND
)) != NULL
) {
573 want
+= rx_GetSecurityHeaderSize(rx_ConnectionOf(call
)) +
574 rx_GetSecurityMaxTrailerSize(rx_ConnectionOf(call
));
575 want
= MIN(want
, mud
);
577 if (want
> p
->length
)
578 (void) rxi_AllocDataBuf(p
, (want
- p
->length
));
583 p
->length
-= rx_GetSecurityHeaderSize(rx_ConnectionOf(call
)) +
584 rx_GetSecurityMaxTrailerSize(rx_ConnectionOf(call
));
586 if (p
->length
<= 0) {
594 * no error occurred, and we didn't get a packet, so we sleep. At
595 * this point, we assume that packets will be returned sooner or
596 * later, as packets are acknowledged, and so we just wait.
599 RX_MUTEX_ENTER(&rx_waitingForPackets_lock
);
600 rx_waitingForPackets
= 1;
601 call
->flags
|= RX_CALL_WAIT_PACKETS
;
603 #ifdef RX_ENABLE_LOCKS
604 cv_wait(&rx_waitingForPackets_cv
, &rx_waitingForPackets_lock
);
606 osi_rxSleep(&rx_waitingForPackets
);
608 call
->flags
&= ~RX_CALL_WAIT_PACKETS
;
609 RX_MUTEX_EXIT(&rx_waitingForPackets_lock
);
618 /* count the number of used FDs */
627 for (i
= 0; i
< amax
; i
++) {
628 code
= fstat(i
, &tstat
);
636 * This function reads a single packet from the interface into the
637 * supplied packet buffer (*p). Return 0 if the packet is bogus. The
638 * (host,port) of the sender are stored in the supplied variables, and
639 * the data length of the packet is stored in the packet structure.
640 * The header is decoded.
643 rxi_ReadPacket(int asocket
, struct rx_packet
*p
,
644 uint32_t *host
, uint16_t *port
)
646 struct sockaddr_in from
;
652 uint32_t dummy
; /* was using rlen but had aliasing
655 rx_computelen(p
, tlen
);
656 rx_SetDataSize(p
, tlen
); /* this is the size of the user data
659 tlen
+= RX_HEADER_SIZE
; /* now this is the size of the entire
661 rlen
= rx_maxReceiveSize
; /* this is what I am advertising.
662 * Only check it once in order to
666 _tlen
= rxi_AllocDataBuf(p
, _tlen
);
668 _tlen
= rlen
- _tlen
;
673 tlen
=(tlen
>_tlen
)?tlen
:_tlen
;
676 * set up this one iovec for padding, it's just to make sure that the
677 * read doesn't return more data than we expect, and is done to get
678 * around our problems caused by the lack of a length field in the rx
681 p
->wirevec
[p
->niovecs
].iov_base
= (caddr_t
) & dummy
;
682 p
->wirevec
[p
->niovecs
++].iov_len
= 4;
684 memset(&msg
, 0, sizeof(msg
));
685 msg
.msg_name
= (char *) &from
;
686 msg
.msg_namelen
= sizeof(struct sockaddr_in
);
687 msg
.msg_iov
= p
->wirevec
;
688 msg
.msg_iovlen
= p
->niovecs
;
690 msg
.msg_accrights
= NULL
;
691 msg
.msg_accrightslen
= 0;
693 nbytes
= recvmsg(asocket
, &msg
, 0);
695 /* restore the vec to its correct state */
696 p
->wirevec
[--p
->niovecs
].iov_base
= NULL
;
697 p
->wirevec
[p
->niovecs
].iov_len
= 0;
704 p
->length
= (nbytes
- RX_HEADER_SIZE
);
705 if ((nbytes
> tlen
) || (nbytes
< (int)RX_HEADER_SIZE
)) { /* Bogus packet */
707 rxi_MoreCbufs(rx_Window
);
709 rxi_AllocDataBuf(p
, nbytes
- tlen
);
710 else if (nbytes
< 0 && errno
== EWOULDBLOCK
)
711 rx_stats
.noPacketOnRead
++;
713 rx_stats
.bogusPacketOnRead
++;
714 rx_stats
.bogusHost
= from
.sin_addr
.s_addr
;
715 dpf(("B: bogus packet from [%x,%d] nb=%d", from
.sin_addr
.s_addr
,
716 from
.sin_port
, nbytes
));
720 /* Extract packet header. */
721 rxi_DecodePacketHeader(p
);
723 *host
= from
.sin_addr
.s_addr
;
724 *port
= from
.sin_port
;
725 if (p
->header
.type
> 0 && p
->header
.type
< RX_N_PACKET_TYPES
)
726 rx_stats
.packetsRead
[p
->header
.type
- 1]++;
732 /* Send a udp datagram */
734 osi_NetSend(osi_socket asocket
, char *addr
, struct iovec
*dvec
,
735 int nvecs
, int length
)
739 memset(&msg
, 0, sizeof(msg
));
741 msg
.msg_iovlen
= nvecs
;
744 msg
.msg_namelen
= sizeof(struct sockaddr_in
);
746 msg
.msg_accrights
= NULL
;
747 msg
.msg_accrightslen
= 0;
750 while (sendmsg(asocket
, &msg
, 0) == -1) {
754 rx_stats
.sendSelects
++;
755 if (errno
!= EWOULDBLOCK
757 && errno
!= ECONNREFUSED
) {
758 osi_Msg(("rx failed to send packet: %s ", strerror(errno
)));
762 dpf(("rx_send failed with %d\n", errno
));
765 if (asocket
>= FD_SETSIZE
)
766 osi_Panic("osi_NetSend: fd too large");
767 FD_SET(asocket
, &sfds
);
768 while ((error
= select(asocket
+ 1, 0, &sfds
, 0, 0)) != 1) {
769 if (error
>= 0 || errno
!= EINTR
)
770 osi_Panic("osi_NetSend: select error %d.%d", err
, errno
);
779 * osi_NetSend is defined in afs/afs_osinet.c
780 * message receipt is done in rxk_input or rx_put.
785 * Copy an mblock to the contiguous area pointed to by cp.
786 * MTUXXX Supposed to skip <off> bytes and copy <len> bytes,
787 * but it doesn't really.
788 * Returns the number of bytes not transferred.
789 * The message is NOT changed.
792 cpytoc(mblk_t
*mp
, int off
, int len
, char *cp
)
796 for (; mp
&& len
> 0; mp
= mp
->b_cont
) {
797 if (mp
->b_datap
->db_type
!= M_DATA
) {
800 n
= MIN(len
, (mp
->b_wptr
- mp
->b_rptr
));
801 memcpy(cp
, mp
->b_rptr
, n
);
810 * MTUXXX Supposed to skip <off> bytes and copy <len> bytes,
811 * but it doesn't really.
812 * This sucks, anyway, do it like m_cpy.... below
815 cpytoiovec(mblk_t
*mp
, int off
, int len
,
816 struct iovec
*iovs
, int niovs
)
820 for (i
= -1, t
= 0; i
< niovs
&& mp
&& len
> 0; mp
= mp
->b_cont
) {
821 if (mp
->b_datap
->db_type
!= M_DATA
) {
824 n
= MIN(len
, (mp
->b_wptr
- mp
->b_rptr
));
833 memcpy(iovs
[i
].iov_base
+ o
, mp
->b_rptr
, m
);
843 #define m_cpytoc(a, b, c, d) cpytoc(a, b, c, d)
844 #define m_cpytoiovec(a, b, c, d, e) cpytoiovec(a, b, c, d, e)
848 m_cpytoiovec(struct mbuf
*m
, int off
, int len
, struct iovec iovs
[], int niovs
)
851 unsigned int l1
, l2
, i
, t
;
853 if (m
== NULL
|| off
< 0 || len
< 0 || iovs
== NULL
)
854 panic("m_cpytoiovec"); /* MTUXXX probably don't need this
858 if (m
->m_len
<= off
) {
868 p1
= mtod(m
, caddr_t
) + off
;
871 p2
= iovs
[0].iov_base
;
872 l2
= iovs
[0].iov_len
;
875 t
= MIN(l1
, MIN(l2
, (unsigned int) len
));
886 p1
= mtod(m
, caddr_t
);
892 p2
= iovs
[i
].iov_base
;
893 l2
= iovs
[i
].iov_len
;
900 #endif /* AFS_SUN5_ENV */
903 rx_mb_to_packet(char *amb
, void (*free
)(), int hdr_len
, int data_len
,
904 struct rx_packet
*phandle
)
908 code
= m_cpytoiovec(amb
, hdr_len
, data_len
, phandle
->wirevec
,
915 #define CountFDs(amax) amax
920 /* send a response to a debug packet */
923 rxi_ReceiveDebugPacket(struct rx_packet
*ap
, osi_socket asocket
,
924 uint32_t ahost
, uint16_t aport
)
926 struct rx_debugIn tin
;
929 rx_packetread(ap
, 0, sizeof(struct rx_debugIn
), (char *) &tin
);
932 * all done with packet, now set length to the truth, so we can reuse
935 rx_computelen(ap
, ap
->length
);
937 tin
.type
= ntohl(tin
.type
);
938 tin
.index
= ntohl(tin
.index
);
940 case RX_DEBUGI_GETSTATS
:{
941 struct rx_debugStats tstat
;
943 /* get basic stats */
944 memset((char *) &tstat
, 0, sizeof(tstat
)); /* make sure spares are
946 tstat
.version
= RX_DEBUGI_VERSION
;
947 #ifndef RX_ENABLE_LOCKS
948 tstat
.waitingForPackets
= rx_waitingForPackets
;
950 tstat
.nFreePackets
= htonl(rx_nFreePackets
);
951 tstat
.callsExecuted
= htonl(rxi_nCalls
);
952 tstat
.packetReclaims
= htonl(0);
953 tstat
.usedFDs
= CountFDs(64);
954 tstat
.nWaiting
= htonl(rx_nWaiting
);
957 tl
= sizeof(struct rx_debugStats
) - ap
->length
;
959 tl
= rxi_AllocDataBuf(ap
, tl
);
962 rx_packetwrite(ap
, 0, sizeof(struct rx_debugStats
), (char *) &tstat
);
963 ap
->length
= sizeof(struct rx_debugStats
);
964 rxi_SendDebugPacket(ap
, asocket
, ahost
, aport
);
965 rx_computelen(ap
, ap
->length
);
970 case RX_DEBUGI_GETALLCONN
:
971 case RX_DEBUGI_GETCONN
:{
973 struct rx_connection
*tc
;
974 struct rx_call
*tcall
;
975 struct rx_debugConn tconn
;
976 int all
= (tin
.type
== RX_DEBUGI_GETALLCONN
);
979 tl
= sizeof(struct rx_debugConn
) - ap
->length
;
981 tl
= rxi_AllocDataBuf(ap
, tl
);
985 memset((char *) &tconn
, 0, sizeof(tconn
)); /* make sure spares are
987 /* get N'th (maybe) "interesting" connection info */
988 for (i
= 0; i
< rx_hashTableSize
; i
++) {
989 for (tc
= rx_connHashTable
[i
]; tc
; tc
= tc
->next
) {
990 if ((all
|| rxi_IsConnInteresting(tc
)) && tin
.index
-- <= 0) {
991 tconn
.host
= tc
->peer
->host
;
992 tconn
.port
= tc
->peer
->port
;
993 tconn
.cid
= htonl(tc
->cid
);
994 tconn
.epoch
= htonl(tc
->epoch
);
995 tconn
.serial
= htonl(tc
->serial
);
996 for (j
= 0; j
< RX_MAXCALLS
; j
++) {
997 tconn
.callNumber
[j
] = htonl(tc
->callNumber
[j
]);
998 if ((tcall
= tc
->call
[j
]) != NULL
) {
999 tconn
.callState
[j
] = tcall
->state
;
1000 tconn
.callMode
[j
] = tcall
->mode
;
1001 tconn
.callFlags
[j
] = tcall
->flags
;
1002 if (queue_IsNotEmpty(&tcall
->rq
))
1003 tconn
.callOther
[j
] |= RX_OTHER_IN
;
1004 if (queue_IsNotEmpty(&tcall
->tq
))
1005 tconn
.callOther
[j
] |= RX_OTHER_OUT
;
1007 tconn
.callState
[j
] = RX_STATE_NOTINIT
;
1010 tconn
.maxPacketSize
= htonl(tc
->maxPacketSize
);
1011 tconn
.error
= htonl(tc
->error
);
1012 tconn
.flags
= tc
->flags
;
1013 tconn
.type
= tc
->type
;
1014 tconn
.securityIndex
= tc
->securityIndex
;
1015 if (tc
->securityObject
) {
1016 RXS_GetStats(tc
->securityObject
, tc
,
1018 #define DOHTONL(a) (tconn.secStats.a = htonl(tconn.secStats.a))
1019 #define DOHTONS(a) (tconn.secStats.a = htons(tconn.secStats.a))
1022 DOHTONL(packetsReceived
);
1023 DOHTONL(packetsSent
);
1024 DOHTONL(bytesReceived
);
1027 i
< sizeof(tconn
.secStats
.spares
) / sizeof(int16_t);
1031 i
< sizeof(tconn
.secStats
.sparel
) / 4;
1035 rx_packetwrite(ap
, 0, sizeof(struct rx_debugConn
), (char *) &tconn
);
1037 ap
->length
= sizeof(struct rx_debugConn
);
1038 rxi_SendDebugPacket(ap
, asocket
, ahost
, aport
);
1044 /* if we make it here, there are no interesting packets */
1045 tconn
.cid
= htonl(0xffffffff); /* means end */
1046 rx_packetwrite(ap
, 0, sizeof(struct rx_debugConn
), &tconn
);
1048 ap
->length
= sizeof(struct rx_debugConn
);
1049 rxi_SendDebugPacket(ap
, asocket
, ahost
, aport
);
1054 case RX_DEBUGI_RXSTATS
:{
1058 tl
= sizeof(rx_stats
) - ap
->length
;
1060 tl
= rxi_AllocDataBuf(ap
, tl
);
1064 /* Since its all longs convert to network order with a loop. */
1065 s
= (uint32_t *) &rx_stats
;
1066 for (i
= 0; i
< sizeof(rx_stats
) / 4; i
++, s
++)
1067 rx_SlowPutLong(ap
, i
* 4, htonl(*s
));
1070 ap
->length
= sizeof(rx_stats
);
1071 rxi_SendDebugPacket(ap
, asocket
, ahost
, aport
);
1077 /* error response packet */
1078 tin
.type
= htonl(RX_DEBUGI_BADTYPE
);
1079 tin
.index
= tin
.type
;
1080 rx_packetwrite(ap
, 0, sizeof(struct rx_debugIn
), &tin
);
1082 ap
->length
= sizeof(struct rx_debugIn
);
1083 rxi_SendDebugPacket(ap
, asocket
, ahost
, aport
);
1091 rxi_ReceiveVersionPacket(struct rx_packet
*ap
, osi_socket asocket
,
1092 uint32_t ahost
, uint16_t aport
)
1096 rx_packetwrite(ap
, 0, CML_VERSION_NUMBER_SIZE
, cml_version_number
);
1099 rxi_SendDebugPacket(ap
, asocket
, ahost
, aport
);
1105 /* send a debug packet back to the sender */
1107 rxi_SendDebugPacket(struct rx_packet
*apacket
, osi_socket asocket
,
1108 uint32_t ahost
, uint16_t aport
)
1110 struct sockaddr_in taddr
;
1116 taddr
.sin_family
= AF_INET
;
1117 taddr
.sin_port
= aport
;
1118 taddr
.sin_addr
.s_addr
= ahost
;
1120 nbytes
= apacket
->length
;
1122 for (i
= 1; i
< apacket
->niovecs
; i
++) {
1123 if (nbytes
<= apacket
->wirevec
[i
].iov_len
) {
1124 savelen
= apacket
->wirevec
[i
].iov_len
;
1125 saven
= apacket
->niovecs
;
1126 apacket
->wirevec
[i
].iov_len
= nbytes
;
1127 apacket
->niovecs
= i
+ 1;
1128 /* so condition fails because i == niovecs */
1130 nbytes
-= apacket
->wirevec
[i
].iov_len
;
1134 /* debug packets are not reliably delivered, hence the cast below. */
1135 /* MTUXXX need to adjust lengths as in sendSpecial */
1136 (void) osi_NetSend(asocket
, (char *)&taddr
, apacket
->wirevec
,
1137 apacket
->niovecs
, apacket
->length
+ RX_HEADER_SIZE
);
1141 apacket
->wirevec
[i
- 1].iov_len
= savelen
;
1142 apacket
->niovecs
= saven
;
1147 * Send the packet to appropriate destination for the specified
1148 * connection. The header is first encoded and placed in the packet.
1151 rxi_SendPacket(struct rx_connection
*conn
,
1152 struct rx_packet
*p
)
1154 struct sockaddr_in addr
;
1155 struct rx_peer
*peer
= conn
->peer
;
1159 char deliveryType
= 'S';
1163 memset(&addr
, 0, sizeof(addr
));
1165 /* The address we're sending the packet to */
1166 addr
.sin_family
= AF_INET
;
1167 addr
.sin_port
= peer
->port
;
1168 addr
.sin_addr
.s_addr
= peer
->host
;
1171 * This stuff should be revamped, I think, so that most, if not all, of
1172 * the header stuff is always added here. We could probably do away with
1173 * the encode/decode routines. XXXXX
1177 * Stamp each packet with a unique serial number. The serial number is
1178 * maintained on a connection basis because some types of security may be
1179 * based on the serial number of the packet, and security is handled on a
1180 * per authenticated-connection basis.
1184 * Pre-increment, to guarantee no zero serial number; a zero serial
1185 * number means the packet was never sent.
1187 p
->header
.serial
= ++conn
->serial
;
1190 * This is so we can adjust retransmit time-outs better in the face of
1191 * rapidly changing round-trip times. RTO estimation is not a la Karn.
1193 if (p
->firstSerial
== 0) {
1194 p
->firstSerial
= p
->header
.serial
;
1199 * If an output tracer function is defined, call it with the packet and
1200 * network address. Note this function may modify its arguments.
1202 if (rx_almostSent
) {
1203 int drop
= (*rx_almostSent
) (p
, &addr
);
1205 /* drop packet if return value is non-zero? */
1207 deliveryType
= 'D'; /* Drop the packet */
1211 /* Get network byte order header */
1212 rxi_EncodePacketHeader(p
); /* XXX in the event of rexmit, etc,
1213 * don't need to touch ALL the fields */
1216 * Send the packet out on the same socket that related packets are being
1219 asocket
= (conn
->type
== RX_CLIENT_CONNECTION
1220 ? rx_socket
: conn
->service
->socket
);
1223 /* Possibly drop this packet, for testing purposes */
1224 if ((deliveryType
== 'D') ||
1225 ((rx_intentionallyDroppedPacketsPer100
> 0) &&
1226 (random() % 100 < rx_intentionallyDroppedPacketsPer100
))) {
1227 deliveryType
= 'D'; /* Drop the packet */
1229 deliveryType
= 'S'; /* Send the packet */
1230 #endif /* RXDEBUG */
1233 * Loop until the packet is sent. We'd prefer just to use a blocking
1234 * socket, but unfortunately the interface doesn't allow us to have
1235 * the socket block in send mode, and not block in receive mode
1238 if (osi_NetSend(asocket
, (char *)&addr
, p
->wirevec
,
1239 p
->niovecs
, p
->length
+ RX_HEADER_SIZE
)) {
1240 /* send failed, so let's hurry up the resend, eh? */
1241 rx_stats
.netSendFailures
++;
1242 clock_Zero(&p
->retryTime
);
1243 p
->header
.serial
= 0; /* Another way of saying never
1249 dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %x resend %d.%0.3d",
1250 deliveryType
, p
->header
.serial
, rx_packetTypes
[p
->header
.type
- 1],
1251 peer
->host
, peer
->port
, p
->header
.serial
, p
->header
.epoch
,
1252 p
->header
.cid
, p
->header
.callNumber
, p
->header
.seq
, p
->header
.flags
,
1253 p
, p
->retryTime
.sec
, p
->retryTime
.usec
/ 1000));
1255 rx_stats
.packetsSent
[p
->header
.type
- 1]++;
1260 * Send a "special" packet to the peer connection. If call is
1261 * specified, then the packet is directed to a specific call channel
1262 * associated with the connection, otherwise it is directed to the
1263 * connection only. Uses optionalPacket if it is supplied, rather than
1264 * allocating a new packet buffer. Nbytes is the length of the data
1265 * portion of the packet. If data is non-null, nbytes of data are
1266 * copied into the packet. Type is the type of the packet, as defined
1267 * in rx.h. Bug: there's a lot of duplication between this and other
1268 * routines. This needs to be cleaned up.
1271 rxi_SendSpecial(struct rx_call
*call
,
1272 struct rx_connection
*conn
,
1273 struct rx_packet
*optionalPacket
, int type
, char *data
,
1278 * Some of the following stuff should be common code for all packet sends
1279 * (it's repeated elsewhere)
1281 struct rx_packet
*p
;
1285 int channel
, callNumber
;
1288 channel
= call
->channel
;
1289 callNumber
= *call
->callNumber
;
1296 p
= rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL
);
1298 osi_Panic("rxi_SendSpecial failure");
1304 p
->header
.serviceId
= conn
->serviceId
;
1305 p
->header
.securityIndex
= conn
->securityIndex
;
1306 p
->header
.cid
= (conn
->cid
| channel
);
1307 p
->header
.callNumber
= callNumber
;
1309 p
->header
.epoch
= conn
->epoch
;
1310 p
->header
.type
= type
;
1311 p
->header
.flags
= 0;
1312 if (conn
->type
== RX_CLIENT_CONNECTION
)
1313 p
->header
.flags
|= RX_CLIENT_INITIATED
;
1315 rx_packetwrite(p
, 0, nbytes
, data
);
1317 for (i
= 1; i
< p
->niovecs
; i
++) {
1318 if (nbytes
<= p
->wirevec
[i
].iov_len
) {
1319 savelen
= p
->wirevec
[i
].iov_len
;
1321 p
->wirevec
[i
].iov_len
= nbytes
;
1322 p
->niovecs
= i
+ 1; /* so condition fails because i ==
1325 nbytes
-= p
->wirevec
[i
].iov_len
;
1331 rxi_SendPacket(conn
, p
);
1332 if (saven
) { /* means we truncated the packet
1333 * above. We probably don't */
1334 /* really need to do this, but it seems safer this way, given that */
1335 /* sneaky optionalPacket... */
1336 p
->wirevec
[i
- 1].iov_len
= savelen
;
1339 if (!optionalPacket
)
1341 return optionalPacket
;
1344 /* Encode the packet's header (from the struct header in the packet to
1345 * the net byte order representation in the wire representation of the
1346 * packet, which is what is actually sent out on the wire) */
1348 rxi_EncodePacketHeader(struct rx_packet
*p
)
1350 unsigned char *buf
= (unsigned char *)p
->wirevec
[0].iov_base
;
1352 memset(buf
, 0, RX_HEADER_SIZE
);
1353 put32(&buf
, p
->header
.epoch
);
1354 put32(&buf
, p
->header
.cid
);
1355 put32(&buf
, p
->header
.callNumber
);
1356 put32(&buf
, p
->header
.seq
);
1357 put32(&buf
, p
->header
.serial
);
1359 ((((unsigned long) p
->header
.type
) << 24)
1360 | (((unsigned long) p
->header
.flags
) << 16)
1361 | (p
->header
.userStatus
<< 8) | p
->header
.securityIndex
));
1362 /* Note: top 16 bits of this next word were reserved */
1364 ((p
->header
.spare
<< 16) | (p
->header
.serviceId
& 0xffff)));
1367 /* Decode the packet's header (from net byte order to a struct header) */
1369 rxi_DecodePacketHeader(struct rx_packet
*p
)
1371 unsigned char *buf
= (unsigned char *)p
->wirevec
[0].iov_base
;
1374 p
->header
.epoch
= get32(&buf
);
1375 p
->header
.cid
= get32(&buf
);
1376 p
->header
.callNumber
= get32(&buf
);
1377 p
->header
.seq
= get32(&buf
);
1378 p
->header
.serial
= get32(&buf
);
1380 /* C will truncate byte fields to bytes for me */
1381 p
->header
.type
= temp
>> 24;
1382 p
->header
.flags
= temp
>> 16;
1383 p
->header
.userStatus
= temp
>> 8;
1384 p
->header
.securityIndex
= temp
>> 0;
1386 p
->header
.serviceId
= (temp
& 0xffff);
1387 p
->header
.spare
= temp
>> 16;
1388 /* Note: top 16 bits of this last word are the security checksum */
1392 rxi_PrepareSendPacket(struct rx_call
*call
,
1393 struct rx_packet
*p
, int last
)
1395 struct rx_connection
*conn
= call
->conn
;
1399 p
->header
.cid
= (conn
->cid
| call
->channel
);
1400 p
->header
.serviceId
= conn
->serviceId
;
1401 p
->header
.securityIndex
= conn
->securityIndex
;
1402 p
->header
.callNumber
= *call
->callNumber
;
1403 p
->header
.seq
= call
->tnext
++;
1404 p
->header
.epoch
= conn
->epoch
;
1405 p
->header
.type
= RX_PACKET_TYPE_DATA
;
1406 p
->header
.flags
= 0;
1407 p
->header
.spare
= 0;
1408 if (conn
->type
== RX_CLIENT_CONNECTION
)
1409 p
->header
.flags
|= RX_CLIENT_INITIATED
;
1412 p
->header
.flags
|= RX_LAST_PACKET
;
1414 clock_Zero(&p
->retryTime
); /* Never yet transmitted */
1415 p
->header
.serial
= 0; /* Another way of saying never
1420 * Now that we're sure this is the last data on the call, make sure that
1421 * the "length" and the sum of the iov_lens matches.
1423 len
= p
->length
+ call
->conn
->securityHeaderSize
;
1425 for (i
= 1; i
< p
->niovecs
&& len
> 0; i
++) {
1426 len
-= p
->wirevec
[i
].iov_len
;
1429 osi_Panic("PrepareSendPacket 1\n"); /* MTUXXX */
1432 p
->wirevec
[i
- 1].iov_len
+= len
;
1434 RXS_PreparePacket(conn
->securityObject
, call
, p
);