3 * ===================================
4 * HARP | Host ATM Research Platform
5 * ===================================
8 * This Host ATM Research Platform ("HARP") file (the "Software") is
9 * made available by Network Computing Services, Inc. ("NetworkCS")
10 * "AS IS". NetworkCS does not provide maintenance, improvements or
11 * support of any kind.
13 * NETWORKCS MAKES NO WARRANTIES OR REPRESENTATIONS, EXPRESS OR IMPLIED,
14 * INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY
15 * AND FITNESS FOR A PARTICULAR PURPOSE, AS TO ANY ELEMENT OF THE
16 * SOFTWARE OR ANY SUPPORT PROVIDED IN CONNECTION WITH THIS SOFTWARE.
17 * In no event shall NetworkCS be responsible for any damages, including
18 * but not limited to consequential damages, arising from or relating to
19 * any use of the Software or related support.
21 * Copyright 1994-1998 Network Computing Services, Inc.
23 * Copies of this Software may be made, however, the above copyright
24 * notice must be reproduced on all copies.
26 * @(#) $FreeBSD: src/sys/netatm/atm_subr.c,v 1.7 2000/02/13 03:31:59 peter Exp $
27 * @(#) $DragonFly: src/sys/netproto/atm/atm_subr.c,v 1.23 2008/09/24 14:26:39 sephe Exp $
34 * Miscellaneous ATM subroutines
38 #include "kern_include.h"
40 #include <sys/thread2.h>
41 #include <sys/msgport2.h>
46 struct atm_pif
*atm_interface_head
= NULL
;
47 struct atm_ncm
*atm_netconv_head
= NULL
;
48 Atm_endpoint
*atm_endpoints
[ENDPT_MAX
+1] = {NULL
};
49 struct sp_info
*atm_pool_head
= NULL
;
50 struct stackq_entry
*atm_stackq_head
= NULL
, *atm_stackq_tail
;
51 struct atm_sock_stat atm_sock_stat
= { { 0 } };
54 int atm_dev_print
= 0;
55 int atm_print_data
= 0;
56 int atm_version
= ATM_VERSION
;
57 struct timeval atm_debugtime
= {0, 0};
58 struct ifqueue atm_intrq
;
60 struct sp_info atm_attributes_pool
= {
61 "atm attributes pool", /* si_name */
62 sizeof(Atm_attributes
), /* si_blksiz */
67 static struct callout atm_timexp_ch
;
72 static void atm_compact (struct atm_time
*);
73 static KTimeout_ret
atm_timexp (void *);
74 static void atm_intr(struct netmsg
*);
79 static struct atm_time
*atm_timeq
= NULL
;
80 static struct atm_time atm_compactimer
= {0, 0};
82 static struct sp_info atm_stackq_pool
= {
83 "Service stack queue pool", /* si_name */
84 sizeof(struct stackq_entry
), /* si_blksiz */
91 * Initialize ATM kernel
93 * Performs any initialization required before things really get underway.
94 * Called from ATM domain initialization or from first registration function
108 * Never called from interrupts, so no locking needed
114 atm_intrq
.ifq_maxlen
= ATM_INTRQ_MAX
;
115 netisr_register(NETISR_ATM
, cpu0_portfn
, pktinfo_portfn_cpu0
,
116 atm_intr
, NETISR_FLAG_NOTMPSAFE
);
119 * Initialize subsystems
126 callout_init(&atm_timexp_ch
);
127 callout_reset(&atm_timexp_ch
, hz
/ ATM_HZ
, atm_timexp
, NULL
);
130 * Start the compaction timer
132 atm_timeout(&atm_compactimer
, SPOOL_COMPACT
, atm_compact
);
137 * Allocate a Control Block
139 * Gets a new control block allocated from the specified storage pool,
140 * acquiring memory for new pool chunks if required. The returned control
141 * block's contents will be cleared.
144 * sip pointer to sp_info for storage pool
147 * addr pointer to allocated control block
148 * 0 allocation failed
152 atm_allocate(struct sp_info
*sip
)
155 struct sp_chunk
*scp
;
166 * Are there any free in the pool?
171 * Find first chunk with a free block
173 for (scp
= sip
->si_poolh
; scp
; scp
= scp
->sc_next
) {
174 if (scp
->sc_freeh
!= NULL
)
181 * No free blocks - have to allocate a new
182 * chunk (but put a limit to this)
184 struct sp_link
*slp_next
;
188 * First time for this pool??
190 if (sip
->si_chunksiz
== 0) {
194 * Initialize pool information
196 n
= sizeof(struct sp_chunk
) +
198 (sip
->si_blksiz
+ sizeof(struct sp_link
));
199 sip
->si_chunksiz
= roundup(n
, SPOOL_ROUNDUP
);
202 * Place pool on kernel chain
204 LINK2TAIL(sip
, struct sp_info
, atm_pool_head
, si_next
);
207 if (sip
->si_chunks
>= sip
->si_maxallow
) {
213 scp
= KM_ALLOC(sip
->si_chunksiz
, M_DEVBUF
,
214 M_INTWAIT
| M_NULLOK
);
222 scp
->sc_magic
= SPOOL_MAGIC
;
226 * Divy up chunk into free blocks
228 slp
= (struct sp_link
*)(scp
+ 1);
231 for (i
= sip
->si_blkcnt
; i
> 1; i
--) {
232 slp_next
= (struct sp_link
*)((caddr_t
)(slp
+ 1) +
234 slp
->sl_u
.slu_next
= slp_next
;
237 slp
->sl_u
.slu_next
= NULL
;
241 * Add new chunk to end of pool
244 sip
->si_poolt
->sc_next
= scp
;
250 sip
->si_total
+= sip
->si_blkcnt
;
251 sip
->si_free
+= sip
->si_blkcnt
;
252 if (sip
->si_chunks
> sip
->si_maxused
)
253 sip
->si_maxused
= sip
->si_chunks
;
257 * Allocate the first free block in chunk
260 scp
->sc_freeh
= slp
->sl_u
.slu_next
;
266 * Save link back to pool chunk
268 slp
->sl_u
.slu_chunk
= scp
;
273 KM_ZERO(bp
, sip
->si_blksiz
);
281 * Free a Control Block
283 * Returns a previously allocated control block back to the owners
287 * bp pointer to block to be freed
297 struct sp_chunk
*scp
;
303 * Get containing chunk and pool info
305 slp
= (struct sp_link
*)bp
;
307 scp
= slp
->sl_u
.slu_chunk
;
308 if (scp
->sc_magic
!= SPOOL_MAGIC
)
309 panic("atm_free: chunk magic missing");
313 * Add block to free chain
316 scp
->sc_freet
->sl_u
.slu_next
= slp
;
319 scp
->sc_freeh
= scp
->sc_freet
= slp
;
320 slp
->sl_u
.slu_next
= NULL
;
330 * Storage Pool Compaction
332 * Called periodically in order to perform compaction of the
333 * storage pools. Each pool will be checked to see if any chunks
334 * can be freed, taking some care to avoid freeing too many chunks
335 * in order to avoid memory thrashing.
337 * Called from a critical section.
340 * tip pointer to timer control block (atm_compactimer)
347 atm_compact(struct atm_time
*tip
)
350 struct sp_chunk
*scp
;
352 struct sp_chunk
*scp_prev
;
355 * Check out all storage pools
357 for (sip
= atm_pool_head
; sip
; sip
= sip
->si_next
) {
360 * Always keep a minimum number of chunks around
362 if (sip
->si_chunks
<= SPOOL_MIN_CHUNK
)
366 * Maximum chunks to free at one time will leave
367 * pool with at least 50% utilization, but never
368 * go below minimum chunk count.
370 i
= ((sip
->si_free
* 2) - sip
->si_total
) / sip
->si_blkcnt
;
371 i
= MIN(i
, sip
->si_chunks
- SPOOL_MIN_CHUNK
);
374 * Look for chunks to free
377 for (scp
= sip
->si_poolh
; scp
&& i
> 0; ) {
379 if (scp
->sc_used
== 0) {
382 * Found a chunk to free, so do it
385 scp_prev
->sc_next
= scp
->sc_next
;
386 if (sip
->si_poolt
== scp
)
387 sip
->si_poolt
= scp_prev
;
389 sip
->si_poolh
= scp
->sc_next
;
391 KM_FREE((caddr_t
)scp
, sip
->si_chunksiz
,
395 * Update pool controls
398 sip
->si_total
-= sip
->si_blkcnt
;
399 sip
->si_free
-= sip
->si_blkcnt
;
402 scp
= scp_prev
->sc_next
;
413 * Restart the compaction timer
415 atm_timeout(&atm_compactimer
, SPOOL_COMPACT
, atm_compact
);
422 * Release a Storage Pool
424 * Frees all dynamic storage acquired for a storage pool.
425 * This function is normally called just prior to a module's unloading.
428 * sip pointer to sp_info for storage pool
435 atm_release_pool(struct sp_info
*sip
)
437 struct sp_chunk
*scp
, *scp_next
;
441 * Free each chunk in pool
443 for (scp
= sip
->si_poolh
; scp
; scp
= scp_next
) {
446 * Check for memory leaks
449 panic("atm_release_pool: unfreed blocks");
451 scp_next
= scp
->sc_next
;
453 KM_FREE((caddr_t
)scp
, sip
->si_chunksiz
, M_DEVBUF
);
457 * Update pool controls
459 sip
->si_poolh
= NULL
;
465 * Unlink pool from active chain
467 sip
->si_chunksiz
= 0;
468 UNLINK(sip
, struct sp_info
, atm_pool_head
, si_next
);
475 * Handle timer tick expiration
477 * Decrement tick count in first block on timer queue. If there
478 * are blocks with expired timers, call their timeout function.
479 * This function is called ATM_HZ times per second.
482 * arg argument passed on timeout() call
489 atm_timexp(void *arg
)
491 struct atm_time
*tip
;
495 * Decrement tick count
497 if (((tip
= atm_timeq
) == NULL
) || (--tip
->ti_ticks
> 0)) {
502 * Stack queue should have been drained
505 if (atm_stackq_head
!= NULL
)
506 panic("atm_timexp: stack queue not empty");
510 * Dispatch expired timers
512 while (((tip
= atm_timeq
) != NULL
) && (tip
->ti_ticks
== 0)) {
513 void (*func
)(struct atm_time
*);
516 * Remove expired block from queue
518 atm_timeq
= tip
->ti_next
;
519 tip
->ti_flag
&= ~TIF_QUEUED
;
522 * Call timeout handler (with network interrupts locked out)
528 * Drain any deferred calls
538 callout_reset(&atm_timexp_ch
, hz
/ ATM_HZ
, atm_timexp
, NULL
);
543 * Schedule a control block timeout
545 * Place the supplied timer control block on the timer queue. The
546 * function (func) will be called in 't' timer ticks with the
547 * control block address as its only argument. There are ATM_HZ
548 * timer ticks per second. The ticks value stored in each block is
549 * a delta of the number of ticks from the previous block in the queue.
550 * Thus, for each tick interval, only the first block in the queue
551 * needs to have its tick value decremented.
554 * tip pointer to timer control block
555 * t number of timer ticks until expiration
556 * func pointer to function to call at expiration
563 atm_timeout(struct atm_time
*tip
, int t
, void (*func
)(struct atm_time
*))
565 struct atm_time
*tip1
, *tip2
;
569 * Check for double queueing error
571 if (tip
->ti_flag
& TIF_QUEUED
)
572 panic("atm_timeout: double queueing");
575 * Make sure we delay at least a little bit
581 * Find out where we belong on the queue
584 for (tip1
= NULL
, tip2
= atm_timeq
; tip2
&& (tip2
->ti_ticks
<= t
);
585 tip1
= tip2
, tip2
= tip1
->ti_next
) {
590 * Place ourselves on queue and update timer deltas
604 tip
->ti_flag
|= TIF_QUEUED
;
616 * Remove the supplied timer control block from the timer queue.
619 * tip pointer to timer control block
622 * 0 control block successfully dequeued
623 * 1 control block not on timer queue
627 atm_untimeout(struct atm_time
*tip
)
629 struct atm_time
*tip1
, *tip2
;
632 * Is control block queued?
634 if ((tip
->ti_flag
& TIF_QUEUED
) == 0)
638 * Find control block on the queue
641 for (tip1
= NULL
, tip2
= atm_timeq
; tip2
&& (tip2
!= tip
);
642 tip1
= tip2
, tip2
= tip1
->ti_next
) {
651 * Remove block from queue and update timer deltas
657 tip1
->ti_next
= tip2
;
660 tip2
->ti_ticks
+= tip
->ti_ticks
;
665 tip
->ti_flag
&= ~TIF_QUEUED
;
675 * Queues a stack call which must be deferred to the global stack queue.
676 * The call parameters are stored in entries which are allocated from the
677 * stack queue storage pool.
681 * func destination function
682 * token destination layer's token
683 * cvp pointer to connection vcc
684 * arg1 command argument
685 * arg2 command argument
689 * errno call not queued - reason indicated
693 atm_stack_enq(int cmd
, void (*func
)(int, void *, int, int), void *token
,
694 Atm_connvc
*cvp
, int arg1
, int arg2
)
696 struct stackq_entry
*sqp
;
701 * Get a new queue entry for this call
703 sqp
= (struct stackq_entry
*)atm_allocate(&atm_stackq_pool
);
715 sqp
->sq_token
= token
;
718 sqp
->sq_connvc
= cvp
;
721 * Put new entry at end of queue
723 if (atm_stackq_head
== NULL
)
724 atm_stackq_head
= sqp
;
726 atm_stackq_tail
->sq_next
= sqp
;
727 atm_stackq_tail
= sqp
;
735 * Drain the Stack Queue
737 * Dequeues and processes entries from the global stack queue.
747 atm_stack_drain(void)
749 struct stackq_entry
*sqp
, *qprev
, *qnext
;
754 * Loop thru entire queue until queue is empty
755 * (but panic rather loop forever)
760 for (sqp
= atm_stackq_head
; sqp
; ) {
763 * Got an eligible entry, do STACK_CALL stuff
765 if (sqp
->sq_cmd
& STKCMD_UP
) {
766 if (sqp
->sq_connvc
->cvc_downcnt
) {
769 * Cant process now, skip it
777 * OK, dispatch the call
779 sqp
->sq_connvc
->cvc_upcnt
++;
780 (*sqp
->sq_func
)(sqp
->sq_cmd
,
784 sqp
->sq_connvc
->cvc_upcnt
--;
786 if (sqp
->sq_connvc
->cvc_upcnt
) {
789 * Cant process now, skip it
797 * OK, dispatch the call
799 sqp
->sq_connvc
->cvc_downcnt
++;
800 (*sqp
->sq_func
)(sqp
->sq_cmd
,
804 sqp
->sq_connvc
->cvc_downcnt
--;
808 * Dequeue processed entry and free it
811 qnext
= sqp
->sq_next
;
813 qprev
->sq_next
= qnext
;
815 atm_stackq_head
= qnext
;
817 atm_stackq_tail
= qprev
;
818 atm_free((caddr_t
)sqp
);
824 * Make sure entire queue was drained
826 if (atm_stackq_head
!= NULL
)
827 panic("atm_stack_drain: Queue not emptied");
833 * Process Interrupt Queue
835 * Processes entries on the ATM interrupt queue. This queue is used by
836 * device interface drivers in order to schedule events from the driver's
837 * lower (interrupt) half to the driver's stack services.
839 * The interrupt routines must store the stack processing function to call
840 * and a token (typically a driver/stack control block) at the front of the
841 * queued buffer. We assume that the function pointer and token values are
842 * both contained (and properly aligned) in the first buffer of the chain.
852 atm_intr(struct netmsg
*msg
)
854 struct mbuf
*m
= ((struct netmsg_packet
*)msg
)->nm_packet
;
856 atm_intr_func_t func
;
860 * Get function to call and token value
862 KB_DATASTART(m
, cp
, caddr_t
);
863 func
= *(atm_intr_func_t
*)cp
;
865 token
= *(void **)cp
;
866 KB_HEADADJ(m
, -(sizeof(func
) + sizeof(token
)));
867 if (KB_LEN(m
) == 0) {
869 KB_UNLINKHEAD(m
, m1
);
874 * Call processing function
879 * Drain any deferred calls
882 /* msg was embedded in the mbuf, do not reply! */
886 * Print a pdu buffer chain
889 * m pointer to pdu buffer chain
890 * msg pointer to message header string
897 atm_pdu_print(KBuffer
*m
, char *msg
)
905 KB_DATASTART(m
, cp
, caddr_t
);
906 kprintf("%cbfr=%p data=%p len=%d: ",
907 c
, m
, cp
, KB_LEN(m
));
909 if (atm_print_data
) {
910 for (i
= 0; i
< KB_LEN(m
); i
++) {
911 kprintf("%2x ", (u_char
)*cp
++);
913 kprintf("<end_bfr>\n");