fetch.9: Minor fixes.
[dragonfly.git] / sys / netproto / atm / atm_subr.c
blob0283da27644934d2bd6526b3f2fb7836c884d794
1 /*
3 * ===================================
4 * HARP | Host ATM Research Platform
5 * ===================================
8 * This Host ATM Research Platform ("HARP") file (the "Software") is
9 * made available by Network Computing Services, Inc. ("NetworkCS")
10 * "AS IS". NetworkCS does not provide maintenance, improvements or
11 * support of any kind.
13 * NETWORKCS MAKES NO WARRANTIES OR REPRESENTATIONS, EXPRESS OR IMPLIED,
14 * INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY
15 * AND FITNESS FOR A PARTICULAR PURPOSE, AS TO ANY ELEMENT OF THE
16 * SOFTWARE OR ANY SUPPORT PROVIDED IN CONNECTION WITH THIS SOFTWARE.
17 * In no event shall NetworkCS be responsible for any damages, including
18 * but not limited to consequential damages, arising from or relating to
19 * any use of the Software or related support.
21 * Copyright 1994-1998 Network Computing Services, Inc.
23 * Copies of this Software may be made, however, the above copyright
24 * notice must be reproduced on all copies.
26 * @(#) $FreeBSD: src/sys/netatm/atm_subr.c,v 1.7 2000/02/13 03:31:59 peter Exp $
27 * @(#) $DragonFly: src/sys/netproto/atm/atm_subr.c,v 1.23 2008/09/24 14:26:39 sephe Exp $
31 * Core ATM Services
32 * -----------------
34 * Miscellaneous ATM subroutines
38 #include "kern_include.h"
40 #include <sys/thread2.h>
41 #include <sys/msgport2.h>
44 * Global variables
46 struct atm_pif *atm_interface_head = NULL;
47 struct atm_ncm *atm_netconv_head = NULL;
48 Atm_endpoint *atm_endpoints[ENDPT_MAX+1] = {NULL};
49 struct sp_info *atm_pool_head = NULL;
50 struct stackq_entry *atm_stackq_head = NULL, *atm_stackq_tail;
51 struct atm_sock_stat atm_sock_stat = { { 0 } };
52 int atm_init = 0;
53 int atm_debug = 0;
54 int atm_dev_print = 0;
55 int atm_print_data = 0;
56 int atm_version = ATM_VERSION;
57 struct timeval atm_debugtime = {0, 0};
58 struct ifqueue atm_intrq;
60 struct sp_info atm_attributes_pool = {
61 "atm attributes pool", /* si_name */
62 sizeof(Atm_attributes), /* si_blksiz */
63 10, /* si_blkcnt */
64 100 /* si_maxallow */
67 static struct callout atm_timexp_ch;
70 * Local functions
72 static void atm_compact (struct atm_time *);
73 static KTimeout_ret atm_timexp (void *);
74 static void atm_intr(struct netmsg *);
77 * Local variables
79 static struct atm_time *atm_timeq = NULL;
80 static struct atm_time atm_compactimer = {0, 0};
82 static struct sp_info atm_stackq_pool = {
83 "Service stack queue pool", /* si_name */
84 sizeof(struct stackq_entry), /* si_blksiz */
85 10, /* si_blkcnt */
86 10 /* si_maxallow */
91 * Initialize ATM kernel
93 * Performs any initialization required before things really get underway.
94 * Called from ATM domain initialization or from first registration function
95 * which gets called.
97 * Arguments:
98 * none
100 * Returns:
101 * none
104 void
105 atm_initialize(void)
108 * Never called from interrupts, so no locking needed
110 if (atm_init)
111 return;
112 atm_init = 1;
114 atm_intrq.ifq_maxlen = ATM_INTRQ_MAX;
115 netisr_register(NETISR_ATM, cpu0_portfn, pktinfo_portfn_cpu0,
116 atm_intr, NETISR_FLAG_NOTMPSAFE);
119 * Initialize subsystems
121 atm_aal5_init();
124 * Prime the timer
126 callout_init(&atm_timexp_ch);
127 callout_reset(&atm_timexp_ch, hz / ATM_HZ, atm_timexp, NULL);
130 * Start the compaction timer
132 atm_timeout(&atm_compactimer, SPOOL_COMPACT, atm_compact);
137 * Allocate a Control Block
139 * Gets a new control block allocated from the specified storage pool,
140 * acquiring memory for new pool chunks if required. The returned control
141 * block's contents will be cleared.
143 * Arguments:
144 * sip pointer to sp_info for storage pool
146 * Returns:
147 * addr pointer to allocated control block
148 * 0 allocation failed
151 void *
152 atm_allocate(struct sp_info *sip)
154 void *bp;
155 struct sp_chunk *scp;
156 struct sp_link *slp;
158 crit_enter();
161 * Count calls
163 sip->si_allocs++;
166 * Are there any free in the pool?
168 if (sip->si_free) {
171 * Find first chunk with a free block
173 for (scp = sip->si_poolh; scp; scp = scp->sc_next) {
174 if (scp->sc_freeh != NULL)
175 break;
178 } else {
181 * No free blocks - have to allocate a new
182 * chunk (but put a limit to this)
184 struct sp_link *slp_next;
185 int i;
188 * First time for this pool??
190 if (sip->si_chunksiz == 0) {
191 size_t n;
194 * Initialize pool information
196 n = sizeof(struct sp_chunk) +
197 sip->si_blkcnt *
198 (sip->si_blksiz + sizeof(struct sp_link));
199 sip->si_chunksiz = roundup(n, SPOOL_ROUNDUP);
202 * Place pool on kernel chain
204 LINK2TAIL(sip, struct sp_info, atm_pool_head, si_next);
207 if (sip->si_chunks >= sip->si_maxallow) {
208 sip->si_fails++;
209 crit_exit();
210 return (NULL);
213 scp = KM_ALLOC(sip->si_chunksiz, M_DEVBUF,
214 M_INTWAIT | M_NULLOK);
215 if (scp == NULL) {
216 sip->si_fails++;
217 crit_exit();
218 return (NULL);
220 scp->sc_next = NULL;
221 scp->sc_info = sip;
222 scp->sc_magic = SPOOL_MAGIC;
223 scp->sc_used = 0;
226 * Divy up chunk into free blocks
228 slp = (struct sp_link *)(scp + 1);
229 scp->sc_freeh = slp;
231 for (i = sip->si_blkcnt; i > 1; i--) {
232 slp_next = (struct sp_link *)((caddr_t)(slp + 1) +
233 sip->si_blksiz);
234 slp->sl_u.slu_next = slp_next;
235 slp = slp_next;
237 slp->sl_u.slu_next = NULL;
238 scp->sc_freet = slp;
241 * Add new chunk to end of pool
243 if (sip->si_poolh)
244 sip->si_poolt->sc_next = scp;
245 else
246 sip->si_poolh = scp;
247 sip->si_poolt = scp;
249 sip->si_chunks++;
250 sip->si_total += sip->si_blkcnt;
251 sip->si_free += sip->si_blkcnt;
252 if (sip->si_chunks > sip->si_maxused)
253 sip->si_maxused = sip->si_chunks;
257 * Allocate the first free block in chunk
259 slp = scp->sc_freeh;
260 scp->sc_freeh = slp->sl_u.slu_next;
261 scp->sc_used++;
262 sip->si_free--;
263 bp = (slp + 1);
266 * Save link back to pool chunk
268 slp->sl_u.slu_chunk = scp;
271 * Clear out block
273 KM_ZERO(bp, sip->si_blksiz);
275 crit_exit();
276 return (bp);
281 * Free a Control Block
283 * Returns a previously allocated control block back to the owners
284 * storage pool.
286 * Arguments:
287 * bp pointer to block to be freed
289 * Returns:
290 * none
293 void
294 atm_free(void *bp)
296 struct sp_info *sip;
297 struct sp_chunk *scp;
298 struct sp_link *slp;
300 crit_enter();
303 * Get containing chunk and pool info
305 slp = (struct sp_link *)bp;
306 slp--;
307 scp = slp->sl_u.slu_chunk;
308 if (scp->sc_magic != SPOOL_MAGIC)
309 panic("atm_free: chunk magic missing");
310 sip = scp->sc_info;
313 * Add block to free chain
315 if (scp->sc_freeh) {
316 scp->sc_freet->sl_u.slu_next = slp;
317 scp->sc_freet = slp;
318 } else
319 scp->sc_freeh = scp->sc_freet = slp;
320 slp->sl_u.slu_next = NULL;
321 sip->si_free++;
322 scp->sc_used--;
324 crit_exit();
325 return;
330 * Storage Pool Compaction
332 * Called periodically in order to perform compaction of the
333 * storage pools. Each pool will be checked to see if any chunks
334 * can be freed, taking some care to avoid freeing too many chunks
335 * in order to avoid memory thrashing.
337 * Called from a critical section.
339 * Arguments:
340 * tip pointer to timer control block (atm_compactimer)
342 * Returns:
343 * none
346 static void
347 atm_compact(struct atm_time *tip)
349 struct sp_info *sip;
350 struct sp_chunk *scp;
351 int i;
352 struct sp_chunk *scp_prev;
355 * Check out all storage pools
357 for (sip = atm_pool_head; sip; sip = sip->si_next) {
360 * Always keep a minimum number of chunks around
362 if (sip->si_chunks <= SPOOL_MIN_CHUNK)
363 continue;
366 * Maximum chunks to free at one time will leave
367 * pool with at least 50% utilization, but never
368 * go below minimum chunk count.
370 i = ((sip->si_free * 2) - sip->si_total) / sip->si_blkcnt;
371 i = MIN(i, sip->si_chunks - SPOOL_MIN_CHUNK);
374 * Look for chunks to free
376 scp_prev = NULL;
377 for (scp = sip->si_poolh; scp && i > 0; ) {
379 if (scp->sc_used == 0) {
382 * Found a chunk to free, so do it
384 if (scp_prev) {
385 scp_prev->sc_next = scp->sc_next;
386 if (sip->si_poolt == scp)
387 sip->si_poolt = scp_prev;
388 } else
389 sip->si_poolh = scp->sc_next;
391 KM_FREE((caddr_t)scp, sip->si_chunksiz,
392 M_DEVBUF);
395 * Update pool controls
397 sip->si_chunks--;
398 sip->si_total -= sip->si_blkcnt;
399 sip->si_free -= sip->si_blkcnt;
400 i--;
401 if (scp_prev)
402 scp = scp_prev->sc_next;
403 else
404 scp = sip->si_poolh;
405 } else {
406 scp_prev = scp;
407 scp = scp->sc_next;
413 * Restart the compaction timer
415 atm_timeout(&atm_compactimer, SPOOL_COMPACT, atm_compact);
417 return;
422 * Release a Storage Pool
424 * Frees all dynamic storage acquired for a storage pool.
425 * This function is normally called just prior to a module's unloading.
427 * Arguments:
428 * sip pointer to sp_info for storage pool
430 * Returns:
431 * none
434 void
435 atm_release_pool(struct sp_info *sip)
437 struct sp_chunk *scp, *scp_next;
439 crit_enter();
441 * Free each chunk in pool
443 for (scp = sip->si_poolh; scp; scp = scp_next) {
446 * Check for memory leaks
448 if (scp->sc_used)
449 panic("atm_release_pool: unfreed blocks");
451 scp_next = scp->sc_next;
453 KM_FREE((caddr_t)scp, sip->si_chunksiz, M_DEVBUF);
457 * Update pool controls
459 sip->si_poolh = NULL;
460 sip->si_chunks = 0;
461 sip->si_total = 0;
462 sip->si_free = 0;
465 * Unlink pool from active chain
467 sip->si_chunksiz = 0;
468 UNLINK(sip, struct sp_info, atm_pool_head, si_next);
469 crit_exit();
470 return;
475 * Handle timer tick expiration
477 * Decrement tick count in first block on timer queue. If there
478 * are blocks with expired timers, call their timeout function.
479 * This function is called ATM_HZ times per second.
481 * Arguments:
482 * arg argument passed on timeout() call
484 * Returns:
485 * none
488 static KTimeout_ret
489 atm_timexp(void *arg)
491 struct atm_time *tip;
493 crit_enter();
495 * Decrement tick count
497 if (((tip = atm_timeq) == NULL) || (--tip->ti_ticks > 0)) {
498 goto restart;
502 * Stack queue should have been drained
504 #ifdef DIAGNOSTIC
505 if (atm_stackq_head != NULL)
506 panic("atm_timexp: stack queue not empty");
507 #endif
510 * Dispatch expired timers
512 while (((tip = atm_timeq) != NULL) && (tip->ti_ticks == 0)) {
513 void (*func)(struct atm_time *);
516 * Remove expired block from queue
518 atm_timeq = tip->ti_next;
519 tip->ti_flag &= ~TIF_QUEUED;
522 * Call timeout handler (with network interrupts locked out)
524 func = tip->ti_func;
525 (*func)(tip);
528 * Drain any deferred calls
530 STACK_DRAIN();
533 restart:
535 * Restart the timer
537 crit_exit();
538 callout_reset(&atm_timexp_ch, hz / ATM_HZ, atm_timexp, NULL);
543 * Schedule a control block timeout
545 * Place the supplied timer control block on the timer queue. The
546 * function (func) will be called in 't' timer ticks with the
547 * control block address as its only argument. There are ATM_HZ
548 * timer ticks per second. The ticks value stored in each block is
549 * a delta of the number of ticks from the previous block in the queue.
550 * Thus, for each tick interval, only the first block in the queue
551 * needs to have its tick value decremented.
553 * Arguments:
554 * tip pointer to timer control block
555 * t number of timer ticks until expiration
556 * func pointer to function to call at expiration
558 * Returns:
559 * none
562 void
563 atm_timeout(struct atm_time *tip, int t, void (*func)(struct atm_time *))
565 struct atm_time *tip1, *tip2;
569 * Check for double queueing error
571 if (tip->ti_flag & TIF_QUEUED)
572 panic("atm_timeout: double queueing");
575 * Make sure we delay at least a little bit
577 if (t <= 0)
578 t = 1;
581 * Find out where we belong on the queue
583 crit_enter();
584 for (tip1 = NULL, tip2 = atm_timeq; tip2 && (tip2->ti_ticks <= t);
585 tip1 = tip2, tip2 = tip1->ti_next) {
586 t -= tip2->ti_ticks;
590 * Place ourselves on queue and update timer deltas
592 if (tip1 == NULL)
593 atm_timeq = tip;
594 else
595 tip1->ti_next = tip;
596 tip->ti_next = tip2;
598 if (tip2)
599 tip2->ti_ticks -= t;
602 * Setup timer block
604 tip->ti_flag |= TIF_QUEUED;
605 tip->ti_ticks = t;
606 tip->ti_func = func;
608 crit_exit();
609 return;
614 * Cancel a timeout
616 * Remove the supplied timer control block from the timer queue.
618 * Arguments:
619 * tip pointer to timer control block
621 * Returns:
622 * 0 control block successfully dequeued
623 * 1 control block not on timer queue
627 atm_untimeout(struct atm_time *tip)
629 struct atm_time *tip1, *tip2;
632 * Is control block queued?
634 if ((tip->ti_flag & TIF_QUEUED) == 0)
635 return(1);
638 * Find control block on the queue
640 crit_enter();
641 for (tip1 = NULL, tip2 = atm_timeq; tip2 && (tip2 != tip);
642 tip1 = tip2, tip2 = tip1->ti_next) {
645 if (tip2 == NULL) {
646 crit_exit();
647 return (1);
651 * Remove block from queue and update timer deltas
653 tip2 = tip->ti_next;
654 if (tip1 == NULL)
655 atm_timeq = tip2;
656 else
657 tip1->ti_next = tip2;
659 if (tip2)
660 tip2->ti_ticks += tip->ti_ticks;
663 * Reset timer block
665 tip->ti_flag &= ~TIF_QUEUED;
667 crit_exit();
668 return (0);
673 * Queue a Stack Call
675 * Queues a stack call which must be deferred to the global stack queue.
676 * The call parameters are stored in entries which are allocated from the
677 * stack queue storage pool.
679 * Arguments:
680 * cmd stack command
681 * func destination function
682 * token destination layer's token
683 * cvp pointer to connection vcc
684 * arg1 command argument
685 * arg2 command argument
687 * Returns:
688 * 0 call queued
689 * errno call not queued - reason indicated
693 atm_stack_enq(int cmd, void (*func)(int, void *, int, int), void *token,
694 Atm_connvc *cvp, int arg1, int arg2)
696 struct stackq_entry *sqp;
698 crit_enter();
701 * Get a new queue entry for this call
703 sqp = (struct stackq_entry *)atm_allocate(&atm_stackq_pool);
704 if (sqp == NULL) {
705 crit_exit();
706 return (ENOMEM);
710 * Fill in new entry
712 sqp->sq_next = NULL;
713 sqp->sq_cmd = cmd;
714 sqp->sq_func = func;
715 sqp->sq_token = token;
716 sqp->sq_arg1 = arg1;
717 sqp->sq_arg2 = arg2;
718 sqp->sq_connvc = cvp;
721 * Put new entry at end of queue
723 if (atm_stackq_head == NULL)
724 atm_stackq_head = sqp;
725 else
726 atm_stackq_tail->sq_next = sqp;
727 atm_stackq_tail = sqp;
729 crit_exit();
730 return (0);
735 * Drain the Stack Queue
737 * Dequeues and processes entries from the global stack queue.
739 * Arguments:
740 * none
742 * Returns:
743 * none
746 void
747 atm_stack_drain(void)
749 struct stackq_entry *sqp, *qprev, *qnext;
750 int cnt;
752 crit_enter();
754 * Loop thru entire queue until queue is empty
755 * (but panic rather loop forever)
757 do {
758 cnt = 0;
759 qprev = NULL;
760 for (sqp = atm_stackq_head; sqp; ) {
763 * Got an eligible entry, do STACK_CALL stuff
765 if (sqp->sq_cmd & STKCMD_UP) {
766 if (sqp->sq_connvc->cvc_downcnt) {
769 * Cant process now, skip it
771 qprev = sqp;
772 sqp = sqp->sq_next;
773 continue;
777 * OK, dispatch the call
779 sqp->sq_connvc->cvc_upcnt++;
780 (*sqp->sq_func)(sqp->sq_cmd,
781 sqp->sq_token,
782 sqp->sq_arg1,
783 sqp->sq_arg2);
784 sqp->sq_connvc->cvc_upcnt--;
785 } else {
786 if (sqp->sq_connvc->cvc_upcnt) {
789 * Cant process now, skip it
791 qprev = sqp;
792 sqp = sqp->sq_next;
793 continue;
797 * OK, dispatch the call
799 sqp->sq_connvc->cvc_downcnt++;
800 (*sqp->sq_func)(sqp->sq_cmd,
801 sqp->sq_token,
802 sqp->sq_arg1,
803 sqp->sq_arg2);
804 sqp->sq_connvc->cvc_downcnt--;
808 * Dequeue processed entry and free it
810 cnt++;
811 qnext = sqp->sq_next;
812 if (qprev)
813 qprev->sq_next = qnext;
814 else
815 atm_stackq_head = qnext;
816 if (qnext == NULL)
817 atm_stackq_tail = qprev;
818 atm_free((caddr_t)sqp);
819 sqp = qnext;
821 } while (cnt > 0);
824 * Make sure entire queue was drained
826 if (atm_stackq_head != NULL)
827 panic("atm_stack_drain: Queue not emptied");
828 crit_exit();
833 * Process Interrupt Queue
835 * Processes entries on the ATM interrupt queue. This queue is used by
836 * device interface drivers in order to schedule events from the driver's
837 * lower (interrupt) half to the driver's stack services.
839 * The interrupt routines must store the stack processing function to call
840 * and a token (typically a driver/stack control block) at the front of the
841 * queued buffer. We assume that the function pointer and token values are
842 * both contained (and properly aligned) in the first buffer of the chain.
844 * Arguments:
845 * none
847 * Returns:
848 * none
851 static void
852 atm_intr(struct netmsg *msg)
854 struct mbuf *m = ((struct netmsg_packet *)msg)->nm_packet;
855 caddr_t cp;
856 atm_intr_func_t func;
857 void *token;
860 * Get function to call and token value
862 KB_DATASTART(m, cp, caddr_t);
863 func = *(atm_intr_func_t *)cp;
864 cp += sizeof(func);
865 token = *(void **)cp;
866 KB_HEADADJ(m, -(sizeof(func) + sizeof(token)));
867 if (KB_LEN(m) == 0) {
868 KBuffer *m1;
869 KB_UNLINKHEAD(m, m1);
870 m = m1;
874 * Call processing function
876 (*func)(token, m);
879 * Drain any deferred calls
881 STACK_DRAIN();
882 /* msg was embedded in the mbuf, do not reply! */
886 * Print a pdu buffer chain
888 * Arguments:
889 * m pointer to pdu buffer chain
890 * msg pointer to message header string
892 * Returns:
893 * none
896 void
897 atm_pdu_print(KBuffer *m, char *msg)
899 caddr_t cp;
900 int i;
901 char c = ' ';
903 kprintf("%s:", msg);
904 while (m) {
905 KB_DATASTART(m, cp, caddr_t);
906 kprintf("%cbfr=%p data=%p len=%d: ",
907 c, m, cp, KB_LEN(m));
908 c = '\t';
909 if (atm_print_data) {
910 for (i = 0; i < KB_LEN(m); i++) {
911 kprintf("%2x ", (u_char)*cp++);
913 kprintf("<end_bfr>\n");
914 } else {
915 kprintf("\n");
917 m = KB_NEXT(m);