* Fix some cases where NULL was used but 0 was meant (and vice versa).
[dragonfly.git] / sys / dev / atm / hfa / fore_buffer.c
blob9128a5cddf70250c7628ad9c1680c46fa42f28d5
1 /*
3 * ===================================
4 * HARP | Host ATM Research Platform
5 * ===================================
8 * This Host ATM Research Platform ("HARP") file (the "Software") is
9 * made available by Network Computing Services, Inc. ("NetworkCS")
10 * "AS IS". NetworkCS does not provide maintenance, improvements or
11 * support of any kind.
13 * NETWORKCS MAKES NO WARRANTIES OR REPRESENTATIONS, EXPRESS OR IMPLIED,
14 * INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY
15 * AND FITNESS FOR A PARTICULAR PURPOSE, AS TO ANY ELEMENT OF THE
16 * SOFTWARE OR ANY SUPPORT PROVIDED IN CONNECTION WITH THIS SOFTWARE.
17 * In no event shall NetworkCS be responsible for any damages, including
18 * but not limited to consequential damages, arising from or relating to
19 * any use of the Software or related support.
21 * Copyright 1994-1998 Network Computing Services, Inc.
23 * Copies of this Software may be made, however, the above copyright
24 * notice must be reproduced on all copies.
26 * @(#) $FreeBSD: src/sys/dev/hfa/fore_buffer.c,v 1.5 2000/01/15 21:01:04 mks Exp $
27 * @(#) $DragonFly: src/sys/dev/atm/hfa/fore_buffer.c,v 1.6 2008/06/05 18:06:31 swildner Exp $
31 * FORE Systems 200-Series Adapter Support
32 * ---------------------------------------
34 * Buffer Supply queue management
38 #include "fore_include.h"
41 * Local functions
43 static void fore_buf_drain (Fore_unit *);
44 static void fore_buf_supply_1s (Fore_unit *);
45 static void fore_buf_supply_1l (Fore_unit *);
49 * Allocate Buffer Supply Queues Data Structures
51 * Here we are allocating memory for both Strategy 1 Small and Large
52 * structures contiguously.
54 * Arguments:
55 * fup pointer to device unit structure
57 * Returns:
58 * 0 allocations successful
59 * else allocation failed
61 int
62 fore_buf_allocate(Fore_unit *fup)
64 caddr_t memp;
67 * Allocate non-cacheable memory for buffer supply status words
69 memp = atm_dev_alloc(
70 sizeof(Q_status) * (BUF1_SM_QUELEN + BUF1_LG_QUELEN),
71 QSTAT_ALIGN, ATM_DEV_NONCACHE);
72 if (memp == NULL) {
73 return (1);
75 fup->fu_buf1s_stat = (Q_status *) memp;
76 fup->fu_buf1l_stat = ((Q_status *) memp) + BUF1_SM_QUELEN;
78 memp = DMA_GET_ADDR(fup->fu_buf1s_stat,
79 sizeof(Q_status) * (BUF1_SM_QUELEN + BUF1_LG_QUELEN),
80 QSTAT_ALIGN, ATM_DEV_NONCACHE);
81 if (memp == NULL) {
82 return (1);
84 fup->fu_buf1s_statd = (Q_status *) memp;
85 fup->fu_buf1l_statd = ((Q_status *) memp) + BUF1_SM_QUELEN;
88 * Allocate memory for buffer supply descriptors
90 memp = atm_dev_alloc(sizeof(Buf_descr) *
91 ((BUF1_SM_QUELEN * BUF1_SM_ENTSIZE) +
92 (BUF1_LG_QUELEN * BUF1_LG_ENTSIZE)),
93 BUF_DESCR_ALIGN, 0);
94 if (memp == NULL) {
95 return (1);
97 fup->fu_buf1s_desc = (Buf_descr *) memp;
98 fup->fu_buf1l_desc = ((Buf_descr *) memp) +
99 (BUF1_SM_QUELEN * BUF1_SM_ENTSIZE);
101 memp = DMA_GET_ADDR(fup->fu_buf1s_desc, sizeof(Buf_descr) *
102 ((BUF1_SM_QUELEN * BUF1_SM_ENTSIZE) +
103 (BUF1_LG_QUELEN * BUF1_LG_ENTSIZE)),
104 BUF_DESCR_ALIGN, 0);
105 if (memp == NULL) {
106 return (1);
108 fup->fu_buf1s_descd = (Buf_descr *) memp;
109 fup->fu_buf1l_descd = ((Buf_descr *) memp) +
110 (BUF1_SM_QUELEN * BUF1_SM_ENTSIZE);
112 return (0);
117 * Buffer Supply Queues Initialization
119 * Allocate and initialize the host-resident buffer supply queue structures
120 * and then initialize the CP-resident queue structures.
122 * Called at interrupt level.
124 * Arguments:
125 * fup pointer to device unit structure
127 * Returns:
128 * none
130 void
131 fore_buf_initialize(Fore_unit *fup)
133 Aali *aap = fup->fu_aali;
134 Buf_queue *cqp;
135 H_buf_queue *hbp;
136 Buf_descr *bdp;
137 Buf_descr *bdp_dma;
138 Q_status *qsp;
139 Q_status *qsp_dma;
140 int i;
143 * Initialize Strategy 1 Small Queues
147 * Point to CP-resident buffer supply queue
149 cqp = (Buf_queue *)(fup->fu_ram + CP_READ(aap->aali_buf1s_q));
152 * Point to host-resident buffer supply queue structures
154 hbp = fup->fu_buf1s_q;
155 qsp = fup->fu_buf1s_stat;
156 qsp_dma = fup->fu_buf1s_statd;
157 bdp = fup->fu_buf1s_desc;
158 bdp_dma = fup->fu_buf1s_descd;
161 * Loop thru all queue entries and do whatever needs doing
163 for (i = 0; i < BUF1_SM_QUELEN; i++) {
166 * Set queue status word to free
168 *qsp = QSTAT_FREE;
171 * Set up host queue entry and link into ring
173 hbp->hbq_cpelem = cqp;
174 hbp->hbq_status = qsp;
175 hbp->hbq_descr = bdp;
176 hbp->hbq_descr_dma = bdp_dma;
177 if (i == (BUF1_SM_QUELEN - 1))
178 hbp->hbq_next = fup->fu_buf1s_q;
179 else
180 hbp->hbq_next = hbp + 1;
183 * Now let the CP into the game
185 cqp->cq_status = (CP_dma) CP_WRITE(qsp_dma);
188 * Bump all queue pointers
190 hbp++;
191 qsp++;
192 qsp_dma++;
193 bdp += BUF1_SM_ENTSIZE;
194 bdp_dma += BUF1_SM_ENTSIZE;
195 cqp++;
199 * Initialize queue pointers
201 fup->fu_buf1s_head = fup->fu_buf1s_tail = fup->fu_buf1s_q;
205 * Initialize Strategy 1 Large Queues
209 * Point to CP-resident buffer supply queue
211 cqp = (Buf_queue *)(fup->fu_ram + CP_READ(aap->aali_buf1l_q));
214 * Point to host-resident buffer supply queue structures
216 hbp = fup->fu_buf1l_q;
217 qsp = fup->fu_buf1l_stat;
218 qsp_dma = fup->fu_buf1l_statd;
219 bdp = fup->fu_buf1l_desc;
220 bdp_dma = fup->fu_buf1l_descd;
223 * Loop thru all queue entries and do whatever needs doing
225 for (i = 0; i < BUF1_LG_QUELEN; i++) {
228 * Set queue status word to free
230 *qsp = QSTAT_FREE;
233 * Set up host queue entry and link into ring
235 hbp->hbq_cpelem = cqp;
236 hbp->hbq_status = qsp;
237 hbp->hbq_descr = bdp;
238 hbp->hbq_descr_dma = bdp_dma;
239 if (i == (BUF1_LG_QUELEN - 1))
240 hbp->hbq_next = fup->fu_buf1l_q;
241 else
242 hbp->hbq_next = hbp + 1;
245 * Now let the CP into the game
247 cqp->cq_status = (CP_dma) CP_WRITE(qsp_dma);
250 * Bump all queue pointers
252 hbp++;
253 qsp++;
254 qsp_dma++;
255 bdp += BUF1_LG_ENTSIZE;
256 bdp_dma += BUF1_LG_ENTSIZE;
257 cqp++;
261 * Initialize queue pointers
263 fup->fu_buf1l_head = fup->fu_buf1l_tail = fup->fu_buf1l_q;
265 return;
270 * Supply Buffers to CP
272 * This function will resupply the CP with buffers to be used to
273 * store incoming data.
275 * May be called in interrupt state.
276 * Must be called with interrupts locked out.
278 * Arguments:
279 * fup pointer to device unit structure
281 * Returns:
282 * none
284 void
285 fore_buf_supply(Fore_unit *fup)
289 * First, clean out the supply queues
291 fore_buf_drain(fup);
294 * Then, supply the buffers for each queue
296 fore_buf_supply_1s(fup);
297 fore_buf_supply_1l(fup);
299 return;
304 * Supply Strategy 1 Small Buffers to CP
306 * May be called in interrupt state.
307 * Must be called with interrupts locked out.
309 * Arguments:
310 * fup pointer to device unit structure
312 * Returns:
313 * none
315 static void
316 fore_buf_supply_1s(Fore_unit *fup)
318 H_buf_queue *hbp;
319 Buf_queue *cqp;
320 Buf_descr *bdp;
321 Buf_handle *bhp;
322 KBuffer *m;
323 int nvcc, nbuf, i;
326 * Figure out how many buffers we should be giving to the CP.
327 * We're basing this calculation on the current number of open
328 * VCCs thru this device, with certain minimum and maximum values
329 * enforced. This will then allow us to figure out how many more
330 * buffers we need to supply to the CP. This will be rounded up
331 * to fill a supply queue entry.
333 nvcc = MAX(fup->fu_open_vcc, BUF_MIN_VCC);
334 nbuf = nvcc * 4;
335 nbuf = MIN(nbuf, BUF1_SM_CPPOOL);
336 nbuf -= fup->fu_buf1s_cnt;
337 nbuf = roundup(nbuf, BUF1_SM_ENTSIZE);
340 * OK, now supply the buffers to the CP
342 while (nbuf > 0) {
345 * Acquire a supply queue entry
347 hbp = fup->fu_buf1s_tail;
348 if (!((*hbp->hbq_status) & QSTAT_FREE))
349 break;
350 bdp = hbp->hbq_descr;
353 * Get a buffer for each descriptor in the queue entry
355 for (i = 0; i < BUF1_SM_ENTSIZE; i++, bdp++) {
356 caddr_t cp;
359 * Get a small buffer
361 KB_ALLOCPKT(m, BUF1_SM_SIZE, KB_F_NOWAIT, KB_T_DATA);
362 if (m == 0) {
363 break;
365 KB_HEADSET(m, BUF1_SM_DOFF);
368 * Point to buffer handle structure
370 bhp = (Buf_handle *)((caddr_t)m + BUF1_SM_HOFF);
371 bhp->bh_type = BHT_S1_SMALL;
374 * Setup buffer descriptor
376 bdp->bsd_handle = bhp;
377 KB_DATASTART(m, cp, caddr_t);
378 bhp->bh_dma = bdp->bsd_buffer = (H_dma) DMA_GET_ADDR(
379 cp, BUF1_SM_SIZE, BUF_DATA_ALIGN, 0);
380 if (bdp->bsd_buffer == 0) {
382 * Unable to assign dma address - free up
383 * this descriptor's buffer
385 fup->fu_stats->st_drv.drv_bf_segdma++;
386 KB_FREEALL(m);
387 break;
391 * All set, so queue buffer (handle)
393 ENQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1s_bq);
397 * If we we're not able to fill all the descriptors for
398 * an entry, free up what's been partially built
400 if (i != BUF1_SM_ENTSIZE) {
401 caddr_t cp;
404 * Clean up each used descriptor
406 for (bdp = hbp->hbq_descr; i; i--, bdp++) {
408 bhp = bdp->bsd_handle;
410 DEQUEUE(bhp, Buf_handle, bh_qelem,
411 fup->fu_buf1s_bq);
413 m = (KBuffer *)
414 ((caddr_t)bhp - BUF1_SM_HOFF);
415 KB_DATASTART(m, cp, caddr_t);
416 DMA_FREE_ADDR(cp, bhp->bh_dma, BUF1_SM_SIZE, 0);
417 KB_FREEALL(m);
419 break;
423 * Finally, we've got an entry ready for the CP.
424 * So claim the host queue entry and setup the CP-resident
425 * queue entry. The CP will (potentially) grab the supplied
426 * buffers when the descriptor pointer is set.
428 fup->fu_buf1s_tail = hbp->hbq_next;
429 (*hbp->hbq_status) = QSTAT_PENDING;
430 cqp = hbp->hbq_cpelem;
431 cqp->cq_descr = (CP_dma) CP_WRITE((u_long)hbp->hbq_descr_dma);
434 * Update counters, etc for supplied buffers
436 fup->fu_buf1s_cnt += BUF1_SM_ENTSIZE;
437 nbuf -= BUF1_SM_ENTSIZE;
440 return;
445 * Supply Strategy 1 Large Buffers to CP
447 * May be called in interrupt state.
448 * Must be called with interrupts locked out.
450 * Arguments:
451 * fup pointer to device unit structure
453 * Returns:
454 * none
456 static void
457 fore_buf_supply_1l(Fore_unit *fup)
459 H_buf_queue *hbp;
460 Buf_queue *cqp;
461 Buf_descr *bdp;
462 Buf_handle *bhp;
463 KBuffer *m;
464 int nvcc, nbuf, i;
467 * Figure out how many buffers we should be giving to the CP.
468 * We're basing this calculation on the current number of open
469 * VCCs thru this device, with certain minimum and maximum values
470 * enforced. This will then allow us to figure out how many more
471 * buffers we need to supply to the CP. This will be rounded up
472 * to fill a supply queue entry.
474 nvcc = MAX(fup->fu_open_vcc, BUF_MIN_VCC);
475 nbuf = nvcc * 4 * RECV_MAX_SEGS;
476 nbuf = MIN(nbuf, BUF1_LG_CPPOOL);
477 nbuf -= fup->fu_buf1l_cnt;
478 nbuf = roundup(nbuf, BUF1_LG_ENTSIZE);
481 * OK, now supply the buffers to the CP
483 while (nbuf > 0) {
486 * Acquire a supply queue entry
488 hbp = fup->fu_buf1l_tail;
489 if (!((*hbp->hbq_status) & QSTAT_FREE))
490 break;
491 bdp = hbp->hbq_descr;
494 * Get a buffer for each descriptor in the queue entry
496 for (i = 0; i < BUF1_LG_ENTSIZE; i++, bdp++) {
497 caddr_t cp;
500 * Get a cluster buffer
502 KB_ALLOCEXT(m, BUF1_LG_SIZE, KB_F_NOWAIT, KB_T_DATA);
503 if (m == 0) {
504 break;
506 KB_HEADSET(m, BUF1_LG_DOFF);
509 * Point to buffer handle structure
511 bhp = (Buf_handle *)((caddr_t)m + BUF1_LG_HOFF);
512 bhp->bh_type = BHT_S1_LARGE;
515 * Setup buffer descriptor
517 bdp->bsd_handle = bhp;
518 KB_DATASTART(m, cp, caddr_t);
519 bhp->bh_dma = bdp->bsd_buffer = (H_dma) DMA_GET_ADDR(
520 cp, BUF1_LG_SIZE, BUF_DATA_ALIGN, 0);
521 if (bdp->bsd_buffer == 0) {
523 * Unable to assign dma address - free up
524 * this descriptor's buffer
526 fup->fu_stats->st_drv.drv_bf_segdma++;
527 KB_FREEALL(m);
528 break;
532 * All set, so queue buffer (handle)
534 ENQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1l_bq);
538 * If we we're not able to fill all the descriptors for
539 * an entry, free up what's been partially built
541 if (i != BUF1_LG_ENTSIZE) {
542 caddr_t cp;
545 * Clean up each used descriptor
547 for (bdp = hbp->hbq_descr; i; i--, bdp++) {
548 bhp = bdp->bsd_handle;
550 DEQUEUE(bhp, Buf_handle, bh_qelem,
551 fup->fu_buf1l_bq);
553 m = (KBuffer *)
554 ((caddr_t)bhp - BUF1_LG_HOFF);
555 KB_DATASTART(m, cp, caddr_t);
556 DMA_FREE_ADDR(cp, bhp->bh_dma, BUF1_LG_SIZE, 0);
557 KB_FREEALL(m);
559 break;
563 * Finally, we've got an entry ready for the CP.
564 * So claim the host queue entry and setup the CP-resident
565 * queue entry. The CP will (potentially) grab the supplied
566 * buffers when the descriptor pointer is set.
568 fup->fu_buf1l_tail = hbp->hbq_next;
569 (*hbp->hbq_status) = QSTAT_PENDING;
570 cqp = hbp->hbq_cpelem;
571 cqp->cq_descr = (CP_dma) CP_WRITE((u_long)hbp->hbq_descr_dma);
574 * Update counters, etc for supplied buffers
576 fup->fu_buf1l_cnt += BUF1_LG_ENTSIZE;
577 nbuf -= BUF1_LG_ENTSIZE;
580 return;
585 * Drain Buffer Supply Queues
587 * This function will free all completed entries at the head of each
588 * buffer supply queue. Since we consider the CP to "own" the buffers
589 * once we put them on a supply queue and since a completed supply queue
590 * entry is only telling us that the CP has accepted the buffers that we
591 * gave to it, there's not much to do here.
593 * May be called in interrupt state.
594 * Must be called with interrupts locked out.
596 * Arguments:
597 * fup pointer to device unit structure
599 * Returns:
600 * none
602 static void
603 fore_buf_drain(Fore_unit *fup)
605 H_buf_queue *hbp;
608 * Drain Strategy 1 Small Queue
612 * Process each completed entry
614 while (*fup->fu_buf1s_head->hbq_status & QSTAT_COMPLETED) {
616 hbp = fup->fu_buf1s_head;
618 if (*hbp->hbq_status & QSTAT_ERROR) {
620 * XXX - what does this mean???
622 log(LOG_ERR, "fore_buf_drain: buf1s queue error\n");
626 * Mark this entry free for use and bump head pointer
627 * to the next entry in the queue
629 *hbp->hbq_status = QSTAT_FREE;
630 fup->fu_buf1s_head = hbp->hbq_next;
635 * Drain Strategy 1 Large Queue
639 * Process each completed entry
641 while (*fup->fu_buf1l_head->hbq_status & QSTAT_COMPLETED) {
643 hbp = fup->fu_buf1l_head;
645 if (*hbp->hbq_status & QSTAT_ERROR) {
647 * XXX - what does this mean???
649 log(LOG_ERR, "fore_buf_drain: buf1l queue error\n");
653 * Mark this entry free for use and bump head pointer
654 * to the next entry in the queue
656 *hbp->hbq_status = QSTAT_FREE;
657 fup->fu_buf1l_head = hbp->hbq_next;
660 return;
665 * Free Buffer Supply Queue Data Structures
667 * Arguments:
668 * fup pointer to device unit structure
670 * Returns:
671 * none
673 void
674 fore_buf_free(Fore_unit *fup)
676 Buf_handle *bhp;
677 KBuffer *m;
680 * Free any previously supplied and not returned buffers
682 if (fup->fu_flags & CUF_INITED) {
685 * Run through Strategy 1 Small queue
687 while ((bhp = Q_HEAD(fup->fu_buf1s_bq, Buf_handle)) != NULL) {
688 caddr_t cp;
691 * Back off to buffer
693 m = (KBuffer *)((caddr_t)bhp - BUF1_SM_HOFF);
696 * Dequeue handle and free buffer
698 DEQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1s_bq);
700 KB_DATASTART(m, cp, caddr_t);
701 DMA_FREE_ADDR(cp, bhp->bh_dma, BUF1_SM_SIZE, 0);
703 KB_FREEALL(m);
707 * Run through Strategy 1 Large queue
709 while ((bhp = Q_HEAD(fup->fu_buf1l_bq, Buf_handle)) != NULL) {
710 caddr_t cp;
713 * Back off to buffer
715 m = (KBuffer *)((caddr_t)bhp - BUF1_LG_HOFF);
718 * Dequeue handle and free buffer
720 DEQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1l_bq);
722 KB_DATASTART(m, cp, caddr_t);
723 DMA_FREE_ADDR(cp, bhp->bh_dma, BUF1_LG_SIZE, 0);
725 KB_FREEALL(m);
730 * Free the status words
732 if (fup->fu_buf1s_stat) {
733 if (fup->fu_buf1s_statd) {
734 DMA_FREE_ADDR(fup->fu_buf1s_stat, fup->fu_buf1s_statd,
735 sizeof(Q_status) *
736 (BUF1_SM_QUELEN + BUF1_LG_QUELEN),
737 ATM_DEV_NONCACHE);
739 atm_dev_free((volatile void *)fup->fu_buf1s_stat);
740 fup->fu_buf1s_stat = NULL;
741 fup->fu_buf1s_statd = NULL;
742 fup->fu_buf1l_stat = NULL;
743 fup->fu_buf1l_statd = NULL;
747 * Free the transmit descriptors
749 if (fup->fu_buf1s_desc) {
750 if (fup->fu_buf1s_descd) {
751 DMA_FREE_ADDR(fup->fu_buf1s_desc, fup->fu_buf1s_descd,
752 sizeof(Buf_descr) *
753 ((BUF1_SM_QUELEN * BUF1_SM_ENTSIZE) +
754 (BUF1_LG_QUELEN * BUF1_LG_ENTSIZE)),
757 atm_dev_free(fup->fu_buf1s_desc);
758 fup->fu_buf1s_desc = NULL;
759 fup->fu_buf1s_descd = NULL;
760 fup->fu_buf1l_desc = NULL;
761 fup->fu_buf1l_descd = NULL;
764 return;