3 * ===================================
4 * HARP | Host ATM Research Platform
5 * ===================================
8 * This Host ATM Research Platform ("HARP") file (the "Software") is
9 * made available by Network Computing Services, Inc. ("NetworkCS")
10 * "AS IS". NetworkCS does not provide maintenance, improvements or
11 * support of any kind.
13 * NETWORKCS MAKES NO WARRANTIES OR REPRESENTATIONS, EXPRESS OR IMPLIED,
14 * INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY
15 * AND FITNESS FOR A PARTICULAR PURPOSE, AS TO ANY ELEMENT OF THE
16 * SOFTWARE OR ANY SUPPORT PROVIDED IN CONNECTION WITH THIS SOFTWARE.
17 * In no event shall NetworkCS be responsible for any damages, including
18 * but not limited to consequential damages, arising from or relating to
19 * any use of the Software or related support.
21 * Copyright 1994-1998 Network Computing Services, Inc.
23 * Copies of this Software may be made, however, the above copyright
24 * notice must be reproduced on all copies.
26 * @(#) $FreeBSD: src/sys/netatm/atm_subr.c,v 1.7 2000/02/13 03:31:59 peter Exp $
27 * @(#) $DragonFly: src/sys/netproto/atm/atm_subr.c,v 1.21 2007/05/23 08:57:07 dillon Exp $
34 * Miscellaneous ATM subroutines
38 #include "kern_include.h"
40 #include <sys/thread2.h>
41 #include <sys/msgport2.h>
46 struct atm_pif
*atm_interface_head
= NULL
;
47 struct atm_ncm
*atm_netconv_head
= NULL
;
48 Atm_endpoint
*atm_endpoints
[ENDPT_MAX
+1] = {NULL
};
49 struct sp_info
*atm_pool_head
= NULL
;
50 struct stackq_entry
*atm_stackq_head
= NULL
, *atm_stackq_tail
;
51 struct atm_sock_stat atm_sock_stat
= { { 0 } };
54 int atm_dev_print
= 0;
55 int atm_print_data
= 0;
56 int atm_version
= ATM_VERSION
;
57 struct timeval atm_debugtime
= {0, 0};
58 struct ifqueue atm_intrq
;
60 struct sp_info atm_attributes_pool
= {
61 "atm attributes pool", /* si_name */
62 sizeof(Atm_attributes
), /* si_blksiz */
67 static struct callout atm_timexp_ch
;
72 static void atm_compact (struct atm_time
*);
73 static KTimeout_ret
atm_timexp (void *);
74 static void atm_intr(struct netmsg
*);
79 static struct atm_time
*atm_timeq
= NULL
;
80 static struct atm_time atm_compactimer
= {0, 0};
82 static struct sp_info atm_stackq_pool
= {
83 "Service stack queue pool", /* si_name */
84 sizeof(struct stackq_entry
), /* si_blksiz */
91 * Initialize ATM kernel
93 * Performs any initialization required before things really get underway.
94 * Called from ATM domain initialization or from first registration function
108 * Never called from interrupts, so no locking needed
114 atm_intrq
.ifq_maxlen
= ATM_INTRQ_MAX
;
115 netisr_register(NETISR_ATM
, cpu0_portfn
, atm_intr
);
118 * Initialize subsystems
125 callout_init(&atm_timexp_ch
);
126 callout_reset(&atm_timexp_ch
, hz
/ ATM_HZ
, atm_timexp
, NULL
);
129 * Start the compaction timer
131 atm_timeout(&atm_compactimer
, SPOOL_COMPACT
, atm_compact
);
136 * Allocate a Control Block
138 * Gets a new control block allocated from the specified storage pool,
139 * acquiring memory for new pool chunks if required. The returned control
140 * block's contents will be cleared.
143 * sip pointer to sp_info for storage pool
146 * addr pointer to allocated control block
147 * 0 allocation failed
151 atm_allocate(struct sp_info
*sip
)
154 struct sp_chunk
*scp
;
165 * Are there any free in the pool?
170 * Find first chunk with a free block
172 for (scp
= sip
->si_poolh
; scp
; scp
= scp
->sc_next
) {
173 if (scp
->sc_freeh
!= NULL
)
180 * No free blocks - have to allocate a new
181 * chunk (but put a limit to this)
183 struct sp_link
*slp_next
;
187 * First time for this pool??
189 if (sip
->si_chunksiz
== 0) {
193 * Initialize pool information
195 n
= sizeof(struct sp_chunk
) +
197 (sip
->si_blksiz
+ sizeof(struct sp_link
));
198 sip
->si_chunksiz
= roundup(n
, SPOOL_ROUNDUP
);
201 * Place pool on kernel chain
203 LINK2TAIL(sip
, struct sp_info
, atm_pool_head
, si_next
);
206 if (sip
->si_chunks
>= sip
->si_maxallow
) {
212 scp
= KM_ALLOC(sip
->si_chunksiz
, M_DEVBUF
,
213 M_INTWAIT
| M_NULLOK
);
221 scp
->sc_magic
= SPOOL_MAGIC
;
225 * Divy up chunk into free blocks
227 slp
= (struct sp_link
*)(scp
+ 1);
230 for (i
= sip
->si_blkcnt
; i
> 1; i
--) {
231 slp_next
= (struct sp_link
*)((caddr_t
)(slp
+ 1) +
233 slp
->sl_u
.slu_next
= slp_next
;
236 slp
->sl_u
.slu_next
= NULL
;
240 * Add new chunk to end of pool
243 sip
->si_poolt
->sc_next
= scp
;
249 sip
->si_total
+= sip
->si_blkcnt
;
250 sip
->si_free
+= sip
->si_blkcnt
;
251 if (sip
->si_chunks
> sip
->si_maxused
)
252 sip
->si_maxused
= sip
->si_chunks
;
256 * Allocate the first free block in chunk
259 scp
->sc_freeh
= slp
->sl_u
.slu_next
;
265 * Save link back to pool chunk
267 slp
->sl_u
.slu_chunk
= scp
;
272 KM_ZERO(bp
, sip
->si_blksiz
);
280 * Free a Control Block
282 * Returns a previously allocated control block back to the owners
286 * bp pointer to block to be freed
296 struct sp_chunk
*scp
;
302 * Get containing chunk and pool info
304 slp
= (struct sp_link
*)bp
;
306 scp
= slp
->sl_u
.slu_chunk
;
307 if (scp
->sc_magic
!= SPOOL_MAGIC
)
308 panic("atm_free: chunk magic missing");
312 * Add block to free chain
315 scp
->sc_freet
->sl_u
.slu_next
= slp
;
318 scp
->sc_freeh
= scp
->sc_freet
= slp
;
319 slp
->sl_u
.slu_next
= NULL
;
329 * Storage Pool Compaction
331 * Called periodically in order to perform compaction of the
332 * storage pools. Each pool will be checked to see if any chunks
333 * can be freed, taking some care to avoid freeing too many chunks
334 * in order to avoid memory thrashing.
336 * Called from a critical section.
339 * tip pointer to timer control block (atm_compactimer)
346 atm_compact(struct atm_time
*tip
)
349 struct sp_chunk
*scp
;
351 struct sp_chunk
*scp_prev
;
354 * Check out all storage pools
356 for (sip
= atm_pool_head
; sip
; sip
= sip
->si_next
) {
359 * Always keep a minimum number of chunks around
361 if (sip
->si_chunks
<= SPOOL_MIN_CHUNK
)
365 * Maximum chunks to free at one time will leave
366 * pool with at least 50% utilization, but never
367 * go below minimum chunk count.
369 i
= ((sip
->si_free
* 2) - sip
->si_total
) / sip
->si_blkcnt
;
370 i
= MIN(i
, sip
->si_chunks
- SPOOL_MIN_CHUNK
);
373 * Look for chunks to free
376 for (scp
= sip
->si_poolh
; scp
&& i
> 0; ) {
378 if (scp
->sc_used
== 0) {
381 * Found a chunk to free, so do it
384 scp_prev
->sc_next
= scp
->sc_next
;
385 if (sip
->si_poolt
== scp
)
386 sip
->si_poolt
= scp_prev
;
388 sip
->si_poolh
= scp
->sc_next
;
390 KM_FREE((caddr_t
)scp
, sip
->si_chunksiz
,
394 * Update pool controls
397 sip
->si_total
-= sip
->si_blkcnt
;
398 sip
->si_free
-= sip
->si_blkcnt
;
401 scp
= scp_prev
->sc_next
;
412 * Restart the compaction timer
414 atm_timeout(&atm_compactimer
, SPOOL_COMPACT
, atm_compact
);
421 * Release a Storage Pool
423 * Frees all dynamic storage acquired for a storage pool.
424 * This function is normally called just prior to a module's unloading.
427 * sip pointer to sp_info for storage pool
434 atm_release_pool(struct sp_info
*sip
)
436 struct sp_chunk
*scp
, *scp_next
;
440 * Free each chunk in pool
442 for (scp
= sip
->si_poolh
; scp
; scp
= scp_next
) {
445 * Check for memory leaks
448 panic("atm_release_pool: unfreed blocks");
450 scp_next
= scp
->sc_next
;
452 KM_FREE((caddr_t
)scp
, sip
->si_chunksiz
, M_DEVBUF
);
456 * Update pool controls
458 sip
->si_poolh
= NULL
;
464 * Unlink pool from active chain
466 sip
->si_chunksiz
= 0;
467 UNLINK(sip
, struct sp_info
, atm_pool_head
, si_next
);
474 * Handle timer tick expiration
476 * Decrement tick count in first block on timer queue. If there
477 * are blocks with expired timers, call their timeout function.
478 * This function is called ATM_HZ times per second.
481 * arg argument passed on timeout() call
488 atm_timexp(void *arg
)
490 struct atm_time
*tip
;
494 * Decrement tick count
496 if (((tip
= atm_timeq
) == NULL
) || (--tip
->ti_ticks
> 0)) {
501 * Stack queue should have been drained
504 if (atm_stackq_head
!= NULL
)
505 panic("atm_timexp: stack queue not empty");
509 * Dispatch expired timers
511 while (((tip
= atm_timeq
) != NULL
) && (tip
->ti_ticks
== 0)) {
512 void (*func
)(struct atm_time
*);
515 * Remove expired block from queue
517 atm_timeq
= tip
->ti_next
;
518 tip
->ti_flag
&= ~TIF_QUEUED
;
521 * Call timeout handler (with network interrupts locked out)
527 * Drain any deferred calls
537 callout_reset(&atm_timexp_ch
, hz
/ ATM_HZ
, atm_timexp
, NULL
);
542 * Schedule a control block timeout
544 * Place the supplied timer control block on the timer queue. The
545 * function (func) will be called in 't' timer ticks with the
546 * control block address as its only argument. There are ATM_HZ
547 * timer ticks per second. The ticks value stored in each block is
548 * a delta of the number of ticks from the previous block in the queue.
549 * Thus, for each tick interval, only the first block in the queue
550 * needs to have its tick value decremented.
553 * tip pointer to timer control block
554 * t number of timer ticks until expiration
555 * func pointer to function to call at expiration
562 atm_timeout(struct atm_time
*tip
, int t
, void (*func
)(struct atm_time
*))
564 struct atm_time
*tip1
, *tip2
;
568 * Check for double queueing error
570 if (tip
->ti_flag
& TIF_QUEUED
)
571 panic("atm_timeout: double queueing");
574 * Make sure we delay at least a little bit
580 * Find out where we belong on the queue
583 for (tip1
= NULL
, tip2
= atm_timeq
; tip2
&& (tip2
->ti_ticks
<= t
);
584 tip1
= tip2
, tip2
= tip1
->ti_next
) {
589 * Place ourselves on queue and update timer deltas
603 tip
->ti_flag
|= TIF_QUEUED
;
615 * Remove the supplied timer control block from the timer queue.
618 * tip pointer to timer control block
621 * 0 control block successfully dequeued
622 * 1 control block not on timer queue
626 atm_untimeout(struct atm_time
*tip
)
628 struct atm_time
*tip1
, *tip2
;
631 * Is control block queued?
633 if ((tip
->ti_flag
& TIF_QUEUED
) == 0)
637 * Find control block on the queue
640 for (tip1
= NULL
, tip2
= atm_timeq
; tip2
&& (tip2
!= tip
);
641 tip1
= tip2
, tip2
= tip1
->ti_next
) {
650 * Remove block from queue and update timer deltas
656 tip1
->ti_next
= tip2
;
659 tip2
->ti_ticks
+= tip
->ti_ticks
;
664 tip
->ti_flag
&= ~TIF_QUEUED
;
674 * Queues a stack call which must be deferred to the global stack queue.
675 * The call parameters are stored in entries which are allocated from the
676 * stack queue storage pool.
680 * func destination function
681 * token destination layer's token
682 * cvp pointer to connection vcc
683 * arg1 command argument
684 * arg2 command argument
688 * errno call not queued - reason indicated
692 atm_stack_enq(int cmd
, void (*func
)(int, void *, int, int), void *token
,
693 Atm_connvc
*cvp
, int arg1
, int arg2
)
695 struct stackq_entry
*sqp
;
700 * Get a new queue entry for this call
702 sqp
= (struct stackq_entry
*)atm_allocate(&atm_stackq_pool
);
714 sqp
->sq_token
= token
;
717 sqp
->sq_connvc
= cvp
;
720 * Put new entry at end of queue
722 if (atm_stackq_head
== NULL
)
723 atm_stackq_head
= sqp
;
725 atm_stackq_tail
->sq_next
= sqp
;
726 atm_stackq_tail
= sqp
;
734 * Drain the Stack Queue
736 * Dequeues and processes entries from the global stack queue.
746 atm_stack_drain(void)
748 struct stackq_entry
*sqp
, *qprev
, *qnext
;
753 * Loop thru entire queue until queue is empty
754 * (but panic rather loop forever)
759 for (sqp
= atm_stackq_head
; sqp
; ) {
762 * Got an eligible entry, do STACK_CALL stuff
764 if (sqp
->sq_cmd
& STKCMD_UP
) {
765 if (sqp
->sq_connvc
->cvc_downcnt
) {
768 * Cant process now, skip it
776 * OK, dispatch the call
778 sqp
->sq_connvc
->cvc_upcnt
++;
779 (*sqp
->sq_func
)(sqp
->sq_cmd
,
783 sqp
->sq_connvc
->cvc_upcnt
--;
785 if (sqp
->sq_connvc
->cvc_upcnt
) {
788 * Cant process now, skip it
796 * OK, dispatch the call
798 sqp
->sq_connvc
->cvc_downcnt
++;
799 (*sqp
->sq_func
)(sqp
->sq_cmd
,
803 sqp
->sq_connvc
->cvc_downcnt
--;
807 * Dequeue processed entry and free it
810 qnext
= sqp
->sq_next
;
812 qprev
->sq_next
= qnext
;
814 atm_stackq_head
= qnext
;
816 atm_stackq_tail
= qprev
;
817 atm_free((caddr_t
)sqp
);
823 * Make sure entire queue was drained
825 if (atm_stackq_head
!= NULL
)
826 panic("atm_stack_drain: Queue not emptied");
832 * Process Interrupt Queue
834 * Processes entries on the ATM interrupt queue. This queue is used by
835 * device interface drivers in order to schedule events from the driver's
836 * lower (interrupt) half to the driver's stack services.
838 * The interrupt routines must store the stack processing function to call
839 * and a token (typically a driver/stack control block) at the front of the
840 * queued buffer. We assume that the function pointer and token values are
841 * both contained (and properly aligned) in the first buffer of the chain.
851 atm_intr(struct netmsg
*msg
)
853 struct mbuf
*m
= ((struct netmsg_packet
*)msg
)->nm_packet
;
855 atm_intr_func_t func
;
859 * Get function to call and token value
861 KB_DATASTART(m
, cp
, caddr_t
);
862 func
= *(atm_intr_func_t
*)cp
;
864 token
= *(void **)cp
;
865 KB_HEADADJ(m
, -(sizeof(func
) + sizeof(token
)));
866 if (KB_LEN(m
) == 0) {
868 KB_UNLINKHEAD(m
, m1
);
873 * Call processing function
878 * Drain any deferred calls
881 /* msg was embedded in the mbuf, do not reply! */
885 * Print a pdu buffer chain
888 * m pointer to pdu buffer chain
889 * msg pointer to message header string
896 atm_pdu_print(KBuffer
*m
, char *msg
)
904 KB_DATASTART(m
, cp
, caddr_t
);
905 kprintf("%cbfr=%p data=%p len=%d: ",
906 c
, m
, cp
, KB_LEN(m
));
908 if (atm_print_data
) {
909 for (i
= 0; i
< KB_LEN(m
); i
++) {
910 kprintf("%2x ", (u_char
)*cp
++);
912 kprintf("<end_bfr>\n");