Nuke token ring support. This also means one blob less in DragonFly.
[dragonfly.git] / sys / dev / netif / pdq_layer / pdq.c
blob0ebb4b499306cf3188d706c84421f80335d1a666
1 /*-
2 * Copyright (c) 1995,1996 Matt Thomas <matt@3am-software.com>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the author may not be used to endorse or promote products
11 * derived from this software withough specific prior written permission
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 * $FreeBSD: src/sys/dev/pdq/pdq.c,v 1.5 1999/08/28 00:42:19 peter Exp $
25 * $DragonFly: src/sys/dev/netif/pdq_layer/Attic/pdq.c,v 1.8 2007/05/13 18:33:57 swildner Exp $
30 * DEC PDQ FDDI Controller O/S independent code
32 * This module should work any PDQ based board. Note that changes for
33 * MIPS and Alpha architectures (or any other architecture which requires
34 * a flushing of memory or write buffers and/or has incoherent caches)
35 * have yet to be made.
37 * However, it is expected that the PDQ_CSR_WRITE macro will cause a
38 * flushing of the write buffers.
41 #define PDQ_HWSUPPORT /* for pdq.h */
43 #if defined(__DragonFly__) || defined(__FreeBSD__)
44 #include "pdqvar.h"
45 #include "pdqreg.h"
46 #else
47 #include "pdqvar.h"
48 #include "pdqreg.h"
49 #endif
51 #include <sys/socket.h>
52 #include <net/if.h>
53 #include <net/ifq_var.h>
55 #define PDQ_ROUNDUP(n, x) (((n) + ((x) - 1)) & ~((x) - 1))
56 #define PDQ_CMD_RX_ALIGNMENT 16
58 #if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE)
59 #define PDQ_PRINTF(x) kprintf x
60 #else
61 #define PDQ_PRINTF(x) do { } while (0)
62 #endif
64 static const char * const pdq_halt_codes[] = {
65 "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault",
66 "Software Fault", "Hardware Fault", "PC Trace Path Test",
67 "DMA Error", "Image CRC Error", "Adapter Processer Error"
70 static const char * const pdq_adapter_states[] = {
71 "Reset", "Upgrade", "DMA Unavailable", "DMA Available",
72 "Link Available", "Link Unavailable", "Halted", "Ring Member"
76 * The following are used in conjunction with
77 * unsolicited events
79 static const char * const pdq_entities[] = {
80 "Station", "Link", "Phy Port"
83 static const char * const pdq_station_events[] = {
84 "Trace Received"
87 static const char * const pdq_station_arguments[] = {
88 "Reason"
91 static const char * const pdq_link_events[] = {
92 "Transmit Underrun",
93 "Transmit Failed",
94 "Block Check Error (CRC)",
95 "Frame Status Error",
96 "PDU Length Error",
97 NULL,
98 NULL,
99 "Receive Data Overrun",
100 NULL,
101 "No User Buffer",
102 "Ring Initialization Initiated",
103 "Ring Initialization Received",
104 "Ring Beacon Initiated",
105 "Duplicate Address Failure",
106 "Duplicate Token Detected",
107 "Ring Purger Error",
108 "FCI Strip Error",
109 "Trace Initiated",
110 "Directed Beacon Received",
113 static const char * const pdq_link_arguments[] = {
114 "Reason",
115 "Data Link Header",
116 "Source",
117 "Upstream Neighbor"
120 static const char * const pdq_phy_events[] = {
121 "LEM Error Monitor Reject",
122 "Elasticy Buffer Error",
123 "Link Confidence Test Reject"
126 static const char * const pdq_phy_arguments[] = {
127 "Direction"
130 static const char * const * const pdq_event_arguments[] = {
131 pdq_station_arguments,
132 pdq_link_arguments,
133 pdq_phy_arguments
136 static const char * const * const pdq_event_codes[] = {
137 pdq_station_events,
138 pdq_link_events,
139 pdq_phy_events
142 static const char * const pdq_station_types[] = {
143 "SAS", "DAC", "SAC", "NAC", "DAS"
146 static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" };
148 static const char pdq_phy_types[] = "ABSM";
150 static const char * const pdq_pmd_types0[] = {
151 "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2",
152 "ANSI Sonet"
155 static const char * const pdq_pmd_types100[] = {
156 "Low Power", "Thin Wire", "Shielded Twisted Pair",
157 "Unshielded Twisted Pair"
160 static const char * const * const pdq_pmd_types[] = {
161 pdq_pmd_types0, pdq_pmd_types100
164 static const char * const pdq_descriptions[] = {
165 "DEFPA PCI",
166 "DEFEA EISA",
167 "DEFTA TC",
168 "DEFAA Futurebus",
169 "DEFQA Q-bus",
172 static void
173 pdq_print_fddi_chars(
174 pdq_t *pdq,
175 const pdq_response_status_chars_get_t *rsp)
177 const char hexchars[] = "0123456789abcdef";
179 kprintf(
180 #if !defined(__bsdi__) && !defined(__NetBSD__)
181 PDQ_OS_PREFIX
182 #else
183 ": "
184 #endif
185 "DEC %s FDDI %s Controller\n",
186 #if !defined(__bsdi__) && !defined(__NetBSD__)
187 PDQ_OS_PREFIX_ARGS,
188 #endif
189 pdq_descriptions[pdq->pdq_type],
190 pdq_station_types[rsp->status_chars_get.station_type]);
192 kprintf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c",
193 PDQ_OS_PREFIX_ARGS,
194 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4],
195 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F],
196 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4],
197 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F],
198 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4],
199 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F],
200 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4],
201 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F],
202 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4],
203 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F],
204 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4],
205 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F],
206 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
207 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3],
208 rsp->status_chars_get.module_rev.fwrev_bytes[0]);
210 if (rsp->status_chars_get.smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions)) {
211 kprintf(", SMT %s\n", pdq_smt_versions[rsp->status_chars_get.smt_version_id]);
214 kprintf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)",
215 PDQ_OS_PREFIX_ARGS,
216 rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS ? "[A]" : "",
217 pdq_phy_types[rsp->status_chars_get.phy_type[0]],
218 pdq_pmd_types[rsp->status_chars_get.pmd_type[0] / 100][rsp->status_chars_get.pmd_type[0] % 100]);
220 if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
221 kprintf(", FDDI Port[B] = %c (PMD = %s)",
222 pdq_phy_types[rsp->status_chars_get.phy_type[1]],
223 pdq_pmd_types[rsp->status_chars_get.pmd_type[1] / 100][rsp->status_chars_get.pmd_type[1] % 100]);
225 kprintf("\n");
228 static void
229 pdq_init_csrs(
230 pdq_csrs_t *csrs,
231 pdq_bus_t bus,
232 pdq_bus_memaddr_t csr_base,
233 size_t csrsize)
235 csrs->csr_bus = bus;
236 csrs->csr_base = csr_base;
237 csrs->csr_port_reset = PDQ_CSR_OFFSET(csr_base, 0 * csrsize);
238 csrs->csr_host_data = PDQ_CSR_OFFSET(csr_base, 1 * csrsize);
239 csrs->csr_port_control = PDQ_CSR_OFFSET(csr_base, 2 * csrsize);
240 csrs->csr_port_data_a = PDQ_CSR_OFFSET(csr_base, 3 * csrsize);
241 csrs->csr_port_data_b = PDQ_CSR_OFFSET(csr_base, 4 * csrsize);
242 csrs->csr_port_status = PDQ_CSR_OFFSET(csr_base, 5 * csrsize);
243 csrs->csr_host_int_type_0 = PDQ_CSR_OFFSET(csr_base, 6 * csrsize);
244 csrs->csr_host_int_enable = PDQ_CSR_OFFSET(csr_base, 7 * csrsize);
245 csrs->csr_type_2_producer = PDQ_CSR_OFFSET(csr_base, 8 * csrsize);
246 csrs->csr_cmd_response_producer = PDQ_CSR_OFFSET(csr_base, 10 * csrsize);
247 csrs->csr_cmd_request_producer = PDQ_CSR_OFFSET(csr_base, 11 * csrsize);
248 csrs->csr_host_smt_producer = PDQ_CSR_OFFSET(csr_base, 12 * csrsize);
249 csrs->csr_unsolicited_producer = PDQ_CSR_OFFSET(csr_base, 13 * csrsize);
252 static void
253 pdq_init_pci_csrs(
254 pdq_pci_csrs_t *csrs,
255 pdq_bus_t bus,
256 pdq_bus_memaddr_t csr_base,
257 size_t csrsize)
259 csrs->csr_bus = bus;
260 csrs->csr_base = csr_base;
261 csrs->csr_pfi_mode_control = PDQ_CSR_OFFSET(csr_base, 16 * csrsize);
262 csrs->csr_pfi_status = PDQ_CSR_OFFSET(csr_base, 17 * csrsize);
263 csrs->csr_fifo_write = PDQ_CSR_OFFSET(csr_base, 18 * csrsize);
264 csrs->csr_fifo_read = PDQ_CSR_OFFSET(csr_base, 19 * csrsize);
267 static void
268 pdq_flush_databuf_queue(
269 pdq_databuf_queue_t *q)
271 PDQ_OS_DATABUF_T *pdu;
272 for (;;) {
273 PDQ_OS_DATABUF_DEQUEUE(q, pdu);
274 if (pdu == NULL)
275 return;
276 PDQ_OS_DATABUF_FREE(pdu);
280 static pdq_boolean_t
281 pdq_do_port_control(
282 const pdq_csrs_t * const csrs,
283 pdq_uint32_t cmd)
285 int cnt = 0;
286 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
287 PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd);
288 while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000)
289 cnt++;
290 PDQ_PRINTF(("CSR cmd spun %d times\n", cnt));
291 if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) {
292 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
293 return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE;
295 /* adapter failure */
296 PDQ_ASSERT(0);
297 return PDQ_FALSE;
300 static void
301 pdq_read_mla(
302 const pdq_csrs_t * const csrs,
303 pdq_lanaddr_t *hwaddr)
305 pdq_uint32_t data;
307 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
308 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
309 data = PDQ_CSR_READ(csrs, csr_host_data);
311 hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF;
312 hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF;
313 hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF;
314 hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF;
316 PDQ_CSR_WRITE(csrs, csr_port_data_a, 1);
317 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
318 data = PDQ_CSR_READ(csrs, csr_host_data);
320 hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF;
321 hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF;
324 static void
325 pdq_read_fwrev(
326 const pdq_csrs_t * const csrs,
327 pdq_fwrev_t *fwrev)
329 pdq_uint32_t data;
331 pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ);
332 data = PDQ_CSR_READ(csrs, csr_host_data);
334 fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF;
335 fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF;
336 fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF;
337 fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF;
340 static pdq_boolean_t
341 pdq_read_error_log(
342 pdq_t *pdq,
343 pdq_response_error_log_get_t *log_entry)
345 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
346 pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry;
348 pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START);
350 while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) {
351 *ptr++ = PDQ_CSR_READ(csrs, csr_host_data);
352 if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry))
353 break;
355 return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE;
358 static pdq_chip_rev_t
359 pdq_read_chiprev(
360 const pdq_csrs_t * const csrs)
362 pdq_uint32_t data;
364 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET);
365 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
366 data = PDQ_CSR_READ(csrs, csr_host_data);
368 return (pdq_chip_rev_t) data;
371 static const struct {
372 size_t cmd_len;
373 size_t rsp_len;
374 const char *cmd_name;
375 } pdq_cmd_info[] = {
376 { sizeof(pdq_cmd_generic_t), /* 0 - PDQC_START */
377 sizeof(pdq_response_generic_t),
378 "Start"
380 { sizeof(pdq_cmd_filter_set_t), /* 1 - PDQC_FILTER_SET */
381 sizeof(pdq_response_generic_t),
382 "Filter Set"
384 { sizeof(pdq_cmd_generic_t), /* 2 - PDQC_FILTER_GET */
385 sizeof(pdq_response_filter_get_t),
386 "Filter Get"
388 { sizeof(pdq_cmd_chars_set_t), /* 3 - PDQC_CHARS_SET */
389 sizeof(pdq_response_generic_t),
390 "Chars Set"
392 { sizeof(pdq_cmd_generic_t), /* 4 - PDQC_STATUS_CHARS_GET */
393 sizeof(pdq_response_status_chars_get_t),
394 "Status Chars Get"
396 #if 0
397 { sizeof(pdq_cmd_generic_t), /* 5 - PDQC_COUNTERS_GET */
398 sizeof(pdq_response_counters_get_t),
399 "Counters Get"
401 { sizeof(pdq_cmd_counters_set_t), /* 6 - PDQC_COUNTERS_SET */
402 sizeof(pdq_response_generic_t),
403 "Counters Set"
405 #else
406 { 0, 0, "Counters Get" },
407 { 0, 0, "Counters Set" },
408 #endif
409 { sizeof(pdq_cmd_addr_filter_set_t), /* 7 - PDQC_ADDR_FILTER_SET */
410 sizeof(pdq_response_generic_t),
411 "Addr Filter Set"
413 { sizeof(pdq_cmd_generic_t), /* 8 - PDQC_ADDR_FILTER_GET */
414 sizeof(pdq_response_addr_filter_get_t),
415 "Addr Filter Get"
417 #if 0
418 { sizeof(pdq_cmd_generic_t), /* 9 - PDQC_ERROR_LOG_CLEAR */
419 sizeof(pdq_response_generic_t),
420 "Error Log Clear"
422 { sizeof(pdq_cmd_generic_t), /* 10 - PDQC_ERROR_LOG_SET */
423 sizeof(pdq_response_generic_t),
424 "Error Log Set"
426 { sizeof(pdq_cmd_generic_t), /* 11 - PDQC_FDDI_MIB_GET */
427 sizeof(pdq_response_generic_t),
428 "FDDI MIB Get"
430 { sizeof(pdq_cmd_generic_t), /* 12 - PDQC_DEC_EXT_MIB_GET */
431 sizeof(pdq_response_generic_t),
432 "DEC Ext MIB Get"
434 { sizeof(pdq_cmd_generic_t), /* 13 - PDQC_DEC_SPECIFIC_GET */
435 sizeof(pdq_response_generic_t),
436 "DEC Specific Get"
438 { sizeof(pdq_cmd_generic_t), /* 14 - PDQC_SNMP_SET */
439 sizeof(pdq_response_generic_t),
440 "SNMP Set"
442 { 0, 0, "N/A" },
443 { sizeof(pdq_cmd_generic_t), /* 16 - PDQC_SMT_MIB_GET */
444 sizeof(pdq_response_generic_t),
445 "SMT MIB Get"
447 { sizeof(pdq_cmd_generic_t), /* 17 - PDQC_SMT_MIB_SET */
448 sizeof(pdq_response_generic_t),
449 "SMT MIB Set",
451 #endif
454 static void
455 pdq_queue_commands(
456 pdq_t *pdq)
458 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
459 pdq_command_info_t * const ci = &pdq->pdq_command_info;
460 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
461 pdq_cmd_code_t op;
462 pdq_uint32_t cmdlen, rsplen, mask;
465 * If there are commands or responses active or there aren't
466 * any pending commands, then don't queue any more.
468 if (ci->ci_command_active || ci->ci_pending_commands == 0)
469 return;
472 * Determine which command needs to be queued.
474 op = PDQC_SMT_MIB_SET;
475 for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1)
476 op = (pdq_cmd_code_t) ((int) op - 1);
478 * Obtain the sizes needed for the command and response.
479 * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is
480 * always properly aligned.
482 cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT);
483 rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT);
484 if (cmdlen < rsplen)
485 cmdlen = rsplen;
487 * Since only one command at a time will be queued, there will always
488 * be enough space.
492 * Obtain and fill in the descriptor for the command (descriptor is
493 * pre-initialized)
495 dbp->pdqdb_command_requests[ci->ci_request_producer].txd_seg_len = cmdlen;
496 PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
499 * Obtain and fill in the descriptor for the response (descriptor is
500 * pre-initialized)
502 dbp->pdqdb_command_responses[ci->ci_response_producer].rxd_seg_len_hi = cmdlen / 16;
503 PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
506 * Clear the command area, set the opcode, and the command from the pending
507 * mask.
510 PDQ_OS_MEMZERO(ci->ci_bufstart, cmdlen);
511 *(pdq_cmd_code_t *) ci->ci_bufstart = op;
512 ci->ci_pending_commands &= ~mask;
515 * Fill in the command area, if needed.
517 switch (op) {
518 case PDQC_FILTER_SET: {
519 pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_bufstart;
520 unsigned idx = 0;
521 filter_set->filter_set_items[idx].item_code = PDQI_IND_GROUP_PROM;
522 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
523 idx++;
524 filter_set->filter_set_items[idx].item_code = PDQI_GROUP_PROM;
525 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
526 idx++;
527 filter_set->filter_set_items[idx].item_code = PDQI_SMT_PROM;
528 filter_set->filter_set_items[idx].filter_state = ((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
529 idx++;
530 filter_set->filter_set_items[idx].item_code = PDQI_SMT_USER;
531 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PASS_SMT ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
532 idx++;
533 filter_set->filter_set_items[idx].item_code = PDQI_EOL;
534 break;
536 case PDQC_ADDR_FILTER_SET: {
537 pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_bufstart;
538 pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses;
539 addr->lanaddr_bytes[0] = 0xFF;
540 addr->lanaddr_bytes[1] = 0xFF;
541 addr->lanaddr_bytes[2] = 0xFF;
542 addr->lanaddr_bytes[3] = 0xFF;
543 addr->lanaddr_bytes[4] = 0xFF;
544 addr->lanaddr_bytes[5] = 0xFF;
545 addr++;
546 pdq_os_addr_fill(pdq, addr, 61);
547 break;
549 default: { /* to make gcc happy */
550 break;
554 * At this point the command is done. All that needs to be done is to
555 * produce it to the PDQ.
557 PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n",
558 pdq_cmd_info[op].cmd_name));
560 ci->ci_command_active++;
561 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8));
562 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8));
565 static void
566 pdq_process_command_responses(
567 pdq_t * const pdq)
569 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
570 pdq_command_info_t * const ci = &pdq->pdq_command_info;
571 volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp;
572 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
573 const pdq_response_generic_t *rspgen;
576 * We have to process the command and response in tandem so
577 * just wait for the response to be consumed. If it has been
578 * consumed then the command must have been as well.
581 if (cbp->pdqcb_command_response == ci->ci_response_completion)
582 return;
584 PDQ_ASSERT(cbp->pdqcb_command_request != ci->ci_request_completion);
586 rspgen = (const pdq_response_generic_t *) ci->ci_bufstart;
587 PDQ_ASSERT(rspgen->generic_status == PDQR_SUCCESS);
588 PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d)\n",
589 pdq_cmd_info[rspgen->generic_op].cmd_name,
590 rspgen->generic_status));
592 if (rspgen->generic_op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) {
593 pdq->pdq_flags &= ~PDQ_PRINTCHARS;
594 pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen);
597 PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
598 PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
599 ci->ci_command_active = 0;
601 if (ci->ci_pending_commands != 0) {
602 pdq_queue_commands(pdq);
603 } else {
604 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer,
605 ci->ci_response_producer | (ci->ci_response_completion << 8));
606 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer,
607 ci->ci_request_producer | (ci->ci_request_completion << 8));
612 * This following routine processes unsolicited events.
613 * In addition, it also fills the unsolicited queue with
614 * event buffers so it can be used to initialize the queue
615 * as well.
617 static void
618 pdq_process_unsolicited_events(
619 pdq_t *pdq)
621 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
622 pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info;
623 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
624 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
625 const pdq_unsolicited_event_t *event;
626 pdq_rxdesc_t *rxd;
629 * Process each unsolicited event (if any).
632 while (cbp->pdqcb_unsolicited_event != ui->ui_completion) {
633 rxd = &dbp->pdqdb_unsolicited_events[ui->ui_completion];
634 event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
636 switch (event->event_type) {
637 case PDQ_UNSOLICITED_EVENT: {
638 kprintf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s",
639 PDQ_OS_PREFIX_ARGS,
640 pdq_entities[event->event_entity],
641 pdq_event_codes[event->event_entity][event->event_code.value]);
642 if (event->event_entity == PDQ_ENTITY_PHY_PORT)
643 kprintf("[%d]", event->event_index);
644 kprintf("\n");
645 break;
647 case PDQ_UNSOLICITED_COUNTERS: {
648 break;
651 PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
652 ui->ui_free++;
656 * Now give back the event buffers back to the PDQ.
658 PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
659 ui->ui_free = 0;
661 PDQ_CSR_WRITE(csrs, csr_unsolicited_producer,
662 ui->ui_producer | (ui->ui_completion << 8));
665 static void
666 pdq_process_received_data(
667 pdq_t *pdq,
668 pdq_rx_info_t *rx,
669 pdq_rxdesc_t *receives,
670 pdq_uint32_t completion_goal,
671 pdq_uint32_t ring_mask)
673 pdq_uint32_t completion = rx->rx_completion;
674 pdq_uint32_t producer = rx->rx_producer;
675 PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers;
676 pdq_rxdesc_t *rxd;
677 pdq_uint32_t idx;
679 while (completion != completion_goal) {
680 PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu;
681 pdq_uint8_t *dataptr;
682 pdq_uint32_t fc, datalen, pdulen, segcnt;
683 pdq_rxstatus_t status;
685 fpdu = lpdu = buffers[completion];
686 PDQ_ASSERT(fpdu != NULL);
688 dataptr = PDQ_OS_DATABUF_PTR(fpdu);
689 status = *(pdq_rxstatus_t *) dataptr;
690 if ((status.rxs_status & 0x200000) == 0) {
691 datalen = status.rxs_status & 0x1FFF;
692 fc = dataptr[PDQ_RX_FC_OFFSET];
693 switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) {
694 case PDQ_FDDI_LLC_ASYNC:
695 case PDQ_FDDI_LLC_SYNC:
696 case PDQ_FDDI_IMP_ASYNC:
697 case PDQ_FDDI_IMP_SYNC: {
698 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) {
699 PDQ_PRINTF(("discard: bad length %d\n", datalen));
700 goto discard_frame;
702 break;
704 case PDQ_FDDI_SMT: {
705 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN)
706 goto discard_frame;
707 break;
709 default: {
710 PDQ_PRINTF(("discard: bad fc 0x%x\n", fc));
711 goto discard_frame;
715 * Update the lengths of the data buffers now that we know
716 * the real length.
718 pdulen = datalen - 4 /* CRC */;
719 segcnt = (pdulen + PDQ_RX_FC_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE;
720 PDQ_OS_DATABUF_ALLOC(npdu);
721 if (npdu == NULL) {
722 PDQ_PRINTF(("discard: no databuf #0\n"));
723 goto discard_frame;
725 buffers[completion] = npdu;
726 for (idx = 1; idx < segcnt; idx++) {
727 PDQ_OS_DATABUF_ALLOC(npdu);
728 if (npdu == NULL) {
729 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
730 PDQ_OS_DATABUF_FREE(fpdu);
731 goto discard_frame;
733 PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]);
734 lpdu = PDQ_OS_DATABUF_NEXT(lpdu);
735 buffers[(completion + idx) & ring_mask] = npdu;
737 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
738 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
739 buffers[(producer + idx) & ring_mask] =
740 buffers[(completion + idx) & ring_mask];
741 buffers[(completion + idx) & ring_mask] = NULL;
743 PDQ_OS_DATABUF_ADJ(fpdu, PDQ_RX_FC_OFFSET);
744 if (segcnt == 1) {
745 PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen);
746 } else {
747 PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_RX_FC_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE);
749 pdq_os_receive_pdu(pdq, fpdu, pdulen);
750 rx->rx_free += PDQ_RX_SEGCNT;
751 PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask);
752 PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask);
753 continue;
754 } else {
755 PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status,
756 status.rxs_rcc_badpdu, status.rxs_rcc_badcrc,
757 status.rxs_rcc_reason, status.rxs_fsc, status.rxs_fsb_e));
758 if (status.rxs_rcc_reason == 7)
759 goto discard_frame;
760 if (status.rxs_rcc_reason != 0) {
761 /* hardware fault */
763 if (status.rxs_rcc_badcrc) {
764 kprintf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n",
765 PDQ_OS_PREFIX_ARGS,
766 dataptr[PDQ_RX_FC_OFFSET+1],
767 dataptr[PDQ_RX_FC_OFFSET+2],
768 dataptr[PDQ_RX_FC_OFFSET+3],
769 dataptr[PDQ_RX_FC_OFFSET+4],
770 dataptr[PDQ_RX_FC_OFFSET+5],
771 dataptr[PDQ_RX_FC_OFFSET+6]);
772 /* rx->rx_badcrc++; */
773 } else if (status.rxs_fsc == 0 || status.rxs_fsb_e == 1) {
774 /* rx->rx_frame_status_errors++; */
775 } else {
776 /* hardware fault */
779 discard_frame:
781 * Discarded frames go right back on the queue; therefore
782 * ring entries were freed.
784 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
785 buffers[producer] = buffers[completion];
786 buffers[completion] = NULL;
787 rxd = &receives[rx->rx_producer];
788 if (idx == 0) {
789 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
790 } else {
791 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
793 rxd->rxd_pa_hi = 0;
794 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
795 rxd->rxd_pa_lo = PDQ_OS_VA_TO_PA(pdq, PDQ_OS_DATABUF_PTR(buffers[rx->rx_producer]));
796 PDQ_ADVANCE(rx->rx_producer, 1, ring_mask);
797 PDQ_ADVANCE(producer, 1, ring_mask);
798 PDQ_ADVANCE(completion, 1, ring_mask);
801 rx->rx_completion = completion;
803 while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) {
804 PDQ_OS_DATABUF_T *pdu;
806 * Allocate the needed number of data buffers.
807 * Try to obtain them from our free queue before
808 * asking the system for more.
810 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
811 if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) {
812 PDQ_OS_DATABUF_ALLOC(pdu);
813 if (pdu == NULL)
814 break;
815 buffers[(rx->rx_producer + idx) & ring_mask] = pdu;
817 rxd = &receives[(rx->rx_producer + idx) & ring_mask];
818 if (idx == 0) {
819 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
820 } else {
821 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
823 rxd->rxd_pa_hi = 0;
824 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
825 rxd->rxd_pa_lo = PDQ_OS_VA_TO_PA(pdq, PDQ_OS_DATABUF_PTR(pdu));
827 if (idx < PDQ_RX_SEGCNT) {
829 * We didn't get all databufs required to complete a new
830 * receive buffer. Keep the ones we got and retry a bit
831 * later for the rest.
833 break;
835 PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask);
836 rx->rx_free -= PDQ_RX_SEGCNT;
840 pdq_boolean_t
841 pdq_queue_transmit_data(struct ifnet *ifp, pdq_t *pdq, PDQ_OS_DATABUF_T *pdu)
843 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
844 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
845 pdq_uint32_t producer = tx->tx_producer;
846 pdq_txdesc_t *eop = NULL;
847 PDQ_OS_DATABUF_T *pdu0;
848 pdq_uint32_t freecnt;
850 if (tx->tx_free < 1)
851 return PDQ_FALSE;
853 dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc;
854 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
856 for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) {
857 pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0);
858 const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0);
861 * The first segment is limited to the space remaining in
862 * page. All segments after that can be up to a full page
863 * in size.
865 fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1));
866 while (datalen > 0 && freecnt > 0) {
867 pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen);
870 * Initialize the transmit descriptor
872 eop = &dbp->pdqdb_transmits[producer];
873 eop->txd_seg_len = seglen;
874 eop->txd_pa_lo = PDQ_OS_VA_TO_PA(pdq, dataptr);
875 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
877 datalen -= seglen;
878 dataptr += seglen;
879 fraglen = PDQ_OS_PAGESIZE;
880 freecnt--;
881 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
883 pdu0 = PDQ_OS_DATABUF_NEXT(pdu0);
885 if (pdu0 != NULL) {
886 PDQ_ASSERT(freecnt == 0);
888 * If we still have data to process then the ring was too full
889 * to store the PDU. Return FALSE so the caller will requeue
890 * the PDU for later.
892 return PDQ_FALSE;
895 * Everything went fine. Finish it up.
897 ifq_dequeue(&ifp->if_snd, pdu);
898 tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt;
899 eop->txd_eop = 1;
900 PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu);
901 tx->tx_producer = producer;
902 tx->tx_free = freecnt;
903 PDQ_DO_TYPE2_PRODUCER(pdq);
904 return PDQ_TRUE;
907 static void
908 pdq_process_transmitted_data(
909 pdq_t *pdq)
911 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
912 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
913 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
914 pdq_uint32_t completion = tx->tx_completion;
916 while (completion != cbp->pdqcb_transmits) {
917 PDQ_OS_DATABUF_T *pdu;
918 pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion];
919 PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1);
920 PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1);
921 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
922 pdq_os_transmit_done(pdq, pdu);
923 tx->tx_free += descriptor_count;
925 PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits));
927 if (tx->tx_completion != completion) {
928 tx->tx_completion = completion;
929 pdq_os_restart_transmitter(pdq);
931 PDQ_DO_TYPE2_PRODUCER(pdq);
934 void
935 pdq_flush_transmitter(
936 pdq_t *pdq)
938 volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp;
939 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
941 for (;;) {
942 PDQ_OS_DATABUF_T *pdu;
943 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
944 if (pdu == NULL)
945 break;
947 * Don't call transmit done since the packet never made it
948 * out on the wire.
950 PDQ_OS_DATABUF_FREE(pdu);
953 tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
954 tx->tx_completion = cbp->pdqcb_transmits = tx->tx_producer;
956 PDQ_DO_TYPE2_PRODUCER(pdq);
959 void
960 pdq_hwreset(
961 pdq_t *pdq)
963 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
964 pdq_state_t state;
965 int cnt;
967 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
968 if (state == PDQS_DMA_UNAVAILABLE)
969 return;
970 PDQ_CSR_WRITE(csrs, csr_port_data_a,
971 (state == PDQS_HALTED) ? 0 : PDQ_PRESET_SKIP_SELFTEST);
972 PDQ_CSR_WRITE(csrs, csr_port_reset, 1);
973 PDQ_OS_USEC_DELAY(100);
974 PDQ_CSR_WRITE(csrs, csr_port_reset, 0);
975 for (cnt = 45000;;cnt--) {
976 PDQ_OS_USEC_DELAY(1000);
977 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
978 if (state == PDQS_DMA_UNAVAILABLE || cnt == 0)
979 break;
981 PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 45000 - cnt));
982 PDQ_OS_USEC_DELAY(10000);
983 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
984 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
985 PDQ_ASSERT(cnt > 0);
989 * The following routine brings the PDQ from whatever state it is
990 * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET).
992 pdq_state_t
993 pdq_stop(
994 pdq_t *pdq)
996 pdq_state_t state;
997 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
998 int cnt, pass = 0, idx;
999 PDQ_OS_DATABUF_T **buffers;
1001 restart:
1002 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1003 if (state != PDQS_DMA_UNAVAILABLE) {
1004 pdq_hwreset(pdq);
1005 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1006 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1008 #if 0
1009 switch (state) {
1010 case PDQS_RING_MEMBER:
1011 case PDQS_LINK_UNAVAILABLE:
1012 case PDQS_LINK_AVAILABLE: {
1013 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT);
1014 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1015 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1016 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1017 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1018 /* FALL THROUGH */
1020 case PDQS_DMA_AVAILABLE: {
1021 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
1022 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1023 pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT);
1024 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1025 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1026 /* FALL THROUGH */
1028 case PDQS_DMA_UNAVAILABLE: {
1029 break;
1032 #endif
1034 * Now we should be in DMA_UNAVAILABLE. So bring the PDQ into
1035 * DMA_AVAILABLE.
1039 * Obtain the hardware address and firmware revisions
1040 * (MLA = my long address which is FDDI speak for hardware address)
1042 pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr);
1043 pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev);
1044 pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs);
1046 if (pdq->pdq_type == PDQ_DEFPA) {
1048 * Disable interrupts and DMA.
1050 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0);
1051 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10);
1055 * Flush all the databuf queues.
1057 pdq_flush_databuf_queue(&pdq->pdq_tx_info.tx_txq);
1058 pdq->pdq_flags &= ~PDQ_TXOK;
1059 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers;
1060 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) {
1061 if (buffers[idx] != NULL) {
1062 PDQ_OS_DATABUF_FREE(buffers[idx]);
1063 buffers[idx] = NULL;
1066 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1067 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers;
1068 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) {
1069 if (buffers[idx] != NULL) {
1070 PDQ_OS_DATABUF_FREE(buffers[idx]);
1071 buffers[idx] = NULL;
1074 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1077 * Reset the consumer indexes to 0.
1079 pdq->pdq_cbp->pdqcb_receives = 0;
1080 pdq->pdq_cbp->pdqcb_transmits = 0;
1081 pdq->pdq_cbp->pdqcb_host_smt = 0;
1082 pdq->pdq_cbp->pdqcb_unsolicited_event = 0;
1083 pdq->pdq_cbp->pdqcb_command_response = 0;
1084 pdq->pdq_cbp->pdqcb_command_request = 0;
1087 * Reset the producer and completion indexes to 0.
1089 pdq->pdq_command_info.ci_request_producer = 0;
1090 pdq->pdq_command_info.ci_response_producer = 0;
1091 pdq->pdq_command_info.ci_request_completion = 0;
1092 pdq->pdq_command_info.ci_response_completion = 0;
1093 pdq->pdq_unsolicited_info.ui_producer = 0;
1094 pdq->pdq_unsolicited_info.ui_completion = 0;
1095 pdq->pdq_rx_info.rx_producer = 0;
1096 pdq->pdq_rx_info.rx_completion = 0;
1097 pdq->pdq_tx_info.tx_producer = 0;
1098 pdq->pdq_tx_info.tx_completion = 0;
1099 pdq->pdq_host_smt_info.rx_producer = 0;
1100 pdq->pdq_host_smt_info.rx_completion = 0;
1102 pdq->pdq_command_info.ci_command_active = 0;
1103 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1104 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1107 * Allow the DEFPA to do DMA. Then program the physical
1108 * addresses of the consumer and descriptor blocks.
1110 if (pdq->pdq_type == PDQ_DEFPA) {
1111 #ifdef PDQTEST
1112 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1113 PDQ_PFI_MODE_DMA_ENABLE);
1114 #else
1115 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1116 PDQ_PFI_MODE_DMA_ENABLE
1117 /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR);
1118 #endif
1122 * Make sure the unsolicited queue has events ...
1124 pdq_process_unsolicited_events(pdq);
1126 if (pdq->pdq_type == PDQ_DEFEA && pdq->pdq_chip_rev == PDQ_CHIP_REV_E)
1127 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_16LW);
1128 else
1129 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW);
1130 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET);
1131 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1133 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1134 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_OS_VA_TO_PA(pdq, pdq->pdq_cbp));
1135 pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK);
1137 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1138 PDQ_CSR_WRITE(csrs, csr_port_data_a,
1139 PDQ_OS_VA_TO_PA(pdq, pdq->pdq_dbp) | PDQ_DMA_INIT_LW_BSWAP_DATA);
1140 pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT);
1142 for (cnt = 0; cnt < 1000; cnt++) {
1143 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1144 if (state == PDQS_HALTED) {
1145 if (pass > 0)
1146 return PDQS_HALTED;
1147 pass = 1;
1148 goto restart;
1150 if (state == PDQS_DMA_AVAILABLE) {
1151 PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt));
1152 break;
1154 PDQ_OS_USEC_DELAY(1000);
1156 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1158 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1159 PDQ_CSR_WRITE(csrs, csr_host_int_enable, 0) /* PDQ_HOST_INT_STATE_CHANGE
1160 |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE
1161 |PDQ_HOST_INT_UNSOL_ENABLE */;
1164 * Any other command but START should be valid.
1166 pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START));
1167 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1168 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1169 pdq_queue_commands(pdq);
1171 if (pdq->pdq_flags & PDQ_PRINTCHARS) {
1173 * Now wait (up to 100ms) for the command(s) to finish.
1175 for (cnt = 0; cnt < 1000; cnt++) {
1176 pdq_process_command_responses(pdq);
1177 if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion)
1178 break;
1179 PDQ_OS_USEC_DELAY(1000);
1181 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1184 return state;
1187 void
1188 pdq_run(
1189 pdq_t *pdq)
1191 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1192 pdq_state_t state;
1194 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1195 PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE);
1196 PDQ_ASSERT(state != PDQS_RESET);
1197 PDQ_ASSERT(state != PDQS_HALTED);
1198 PDQ_ASSERT(state != PDQS_UPGRADE);
1199 PDQ_ASSERT(state != PDQS_RING_MEMBER);
1200 switch (state) {
1201 case PDQS_DMA_AVAILABLE: {
1203 * The PDQ after being reset screws up some of its state.
1204 * So we need to clear all the errors/interrupts so the real
1205 * ones will get through.
1207 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1208 PDQ_CSR_WRITE(csrs, csr_host_int_enable, PDQ_HOST_INT_STATE_CHANGE|PDQ_HOST_INT_XMT_DATA_FLUSH
1209 |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE
1210 |PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_TX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE);
1212 * Set the MAC and address filters and start up the PDQ.
1214 pdq_process_unsolicited_events(pdq);
1215 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1216 pdq->pdq_dbp->pdqdb_receives,
1217 pdq->pdq_cbp->pdqcb_receives,
1218 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1219 PDQ_DO_TYPE2_PRODUCER(pdq);
1220 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1221 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1222 pdq->pdq_dbp->pdqdb_host_smt,
1223 pdq->pdq_cbp->pdqcb_host_smt,
1224 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1225 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1226 pdq->pdq_host_smt_info.rx_producer
1227 | (pdq->pdq_host_smt_info.rx_completion << 8));
1229 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1230 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET) | PDQ_BITMASK(PDQC_START);
1231 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1232 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1233 pdq_queue_commands(pdq);
1234 break;
1236 case PDQS_LINK_UNAVAILABLE:
1237 case PDQS_LINK_AVAILABLE: {
1238 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1239 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET);
1240 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1241 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1242 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1243 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1244 pdq->pdq_dbp->pdqdb_host_smt,
1245 pdq->pdq_cbp->pdqcb_host_smt,
1246 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1247 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1248 pdq->pdq_host_smt_info.rx_producer
1249 | (pdq->pdq_host_smt_info.rx_completion << 8));
1251 pdq_process_unsolicited_events(pdq);
1252 pdq_queue_commands(pdq);
1253 break;
1255 case PDQS_RING_MEMBER: {
1257 default: { /* to make gcc happy */
1258 break;
1264 pdq_interrupt(
1265 pdq_t *pdq)
1267 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1268 pdq_uint32_t data;
1269 int progress = 0;
1271 if (pdq->pdq_type == PDQ_DEFPA)
1272 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1274 while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) {
1275 progress = 1;
1276 PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data));
1277 if (data & PDQ_PSTS_RCV_DATA_PENDING) {
1278 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1279 pdq->pdq_dbp->pdqdb_receives,
1280 pdq->pdq_cbp->pdqcb_receives,
1281 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1282 PDQ_DO_TYPE2_PRODUCER(pdq);
1284 if (data & PDQ_PSTS_HOST_SMT_PENDING) {
1285 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1286 pdq->pdq_dbp->pdqdb_host_smt,
1287 pdq->pdq_cbp->pdqcb_host_smt,
1288 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1289 PDQ_DO_HOST_SMT_PRODUCER(pdq);
1291 if (data & PDQ_PSTS_XMT_DATA_PENDING)
1292 pdq_process_transmitted_data(pdq);
1293 if (data & PDQ_PSTS_UNSOL_PENDING)
1294 pdq_process_unsolicited_events(pdq);
1295 if (data & PDQ_PSTS_CMD_RSP_PENDING)
1296 pdq_process_command_responses(pdq);
1297 if (data & PDQ_PSTS_TYPE_0_PENDING) {
1298 data = PDQ_CSR_READ(csrs, csr_host_int_type_0);
1299 if (data & PDQ_HOST_INT_STATE_CHANGE) {
1300 pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1301 kprintf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]);
1302 if (state == PDQS_LINK_UNAVAILABLE) {
1303 pdq->pdq_flags &= ~PDQ_TXOK;
1304 } else if (state == PDQS_LINK_AVAILABLE) {
1305 pdq->pdq_flags |= PDQ_TXOK;
1306 pdq_os_restart_transmitter(pdq);
1307 } else if (state == PDQS_HALTED) {
1308 pdq_response_error_log_get_t log_entry;
1309 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status));
1310 kprintf(": halt code = %d (%s)\n",
1311 halt_code, pdq_halt_codes[halt_code]);
1312 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) {
1313 PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1314 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1315 data & PDQ_HOST_INT_FATAL_ERROR));
1317 pdq_read_error_log(pdq, &log_entry);
1318 pdq_stop(pdq);
1319 if (pdq->pdq_flags & PDQ_RUNNING)
1320 pdq_run(pdq);
1321 return 1;
1323 kprintf("\n");
1324 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE);
1326 if (data & PDQ_HOST_INT_FATAL_ERROR) {
1327 pdq_stop(pdq);
1328 if (pdq->pdq_flags & PDQ_RUNNING)
1329 pdq_run(pdq);
1330 return 1;
1332 if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) {
1333 kprintf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS);
1334 pdq->pdq_flags &= ~PDQ_TXOK;
1335 pdq_flush_transmitter(pdq);
1336 pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE);
1337 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH);
1340 if (pdq->pdq_type == PDQ_DEFPA)
1341 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1343 return progress;
1346 pdq_t *
1347 pdq_initialize(
1348 pdq_bus_t bus,
1349 pdq_bus_memaddr_t csr_base,
1350 const char *name,
1351 int unit,
1352 void *ctx,
1353 pdq_type_t type)
1355 pdq_t *pdq;
1356 pdq_state_t state;
1357 const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE;
1358 pdq_uint8_t *p;
1359 int idx;
1361 PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192);
1362 PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64);
1363 PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET);
1364 PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET);
1365 PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET);
1366 PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET);
1367 PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET);
1368 PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET);
1369 PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512);
1371 pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t));
1372 if (pdq == NULL) {
1373 PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq)));
1374 return NULL;
1376 PDQ_OS_MEMZERO(pdq, sizeof(pdq_t));
1377 pdq->pdq_type = type;
1378 pdq->pdq_unit = unit;
1379 pdq->pdq_os_ctx = (void *) ctx;
1380 pdq->pdq_os_name = name;
1381 pdq->pdq_flags = PDQ_PRINTCHARS;
1383 * Allocate the additional data structures required by
1384 * the PDQ driver. Allocate a contiguous region of memory
1385 * for the descriptor block. We need to allocated enough
1386 * to guarantee that we will a get 8KB block of memory aligned
1387 * on a 8KB boundary. This turns to require that we allocate
1388 * (N*2 - 1 page) pages of memory. On machine with less than
1389 * a 8KB page size, it mean we will allocate more memory than
1390 * we need. The extra will be used for the unsolicited event
1391 * buffers (though on machines with 8KB pages we will to allocate
1392 * them separately since there will be nothing left overs.)
1394 p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes);
1395 if (p != NULL) {
1396 pdq_physaddr_t physaddr = PDQ_OS_VA_TO_PA(pdq, p);
1398 * Assert that we really got contiguous memory. This isn't really
1399 * needed on systems that actually have physical contiguous allocation
1400 * routines, but on those systems that don't ...
1402 for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) {
1403 if (PDQ_OS_VA_TO_PA(pdq, p + idx) - physaddr != idx)
1404 goto cleanup_and_return;
1406 physaddr &= 0x1FFF;
1407 if (physaddr) {
1408 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p;
1409 pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - physaddr];
1410 } else {
1411 pdq->pdq_dbp = (pdq_descriptor_block_t *) p;
1412 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000];
1415 if (contig_bytes == sizeof(pdq_descriptor_block_t)) {
1416 pdq->pdq_unsolicited_info.ui_events =
1417 (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC(
1418 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1422 * Make sure everything got allocated. If not, free what did
1423 * get allocated and return.
1425 if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) {
1426 cleanup_and_return:
1427 if (p /* pdq->pdq_dbp */ != NULL)
1428 PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes);
1429 if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL)
1430 PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events,
1431 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1432 PDQ_OS_MEMFREE(pdq, sizeof(pdq_t));
1433 return NULL;
1436 pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer;
1437 pdq->pdq_command_info.ci_bufstart = (pdq_uint8_t *) pdq->pdq_dbp->pdqdb_command_pool;
1438 pdq->pdq_rx_info.rx_buffers = (void *) pdq->pdq_dbp->pdqdb_receive_buffers;
1440 pdq->pdq_host_smt_info.rx_buffers = (void *) pdq->pdq_dbp->pdqdb_host_smt_buffers;
1442 PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp));
1443 PDQ_PRINTF((" Receive Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_receives));
1444 PDQ_PRINTF((" Transmit Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_transmits));
1445 PDQ_PRINTF((" Host SMT Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_host_smt));
1446 PDQ_PRINTF((" Command Response Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_command_responses));
1447 PDQ_PRINTF((" Command Request Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_command_requests));
1448 PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp));
1451 * Zero out the descriptor block. Not really required but
1452 * it pays to be neat. This will also zero out the consumer
1453 * block, command pool, and buffer pointers for the receive
1454 * host_smt rings.
1456 PDQ_OS_MEMZERO(pdq->pdq_dbp, sizeof(*pdq->pdq_dbp));
1459 * Initialize the CSR references.
1460 * the DEFAA (FutureBus+) skips a longword between registers
1462 pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, pdq->pdq_type == PDQ_DEFAA ? 2 : 1);
1463 if (pdq->pdq_type == PDQ_DEFPA)
1464 pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1);
1466 PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_PTR_FMT "\n", pdq->pdq_csrs.csr_base));
1467 PDQ_PRINTF((" Port Reset = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1468 pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset)));
1469 PDQ_PRINTF((" Host Data = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1470 pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data)));
1471 PDQ_PRINTF((" Port Control = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1472 pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control)));
1473 PDQ_PRINTF((" Port Data A = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1474 pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a)));
1475 PDQ_PRINTF((" Port Data B = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1476 pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b)));
1477 PDQ_PRINTF((" Port Status = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1478 pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)));
1479 PDQ_PRINTF((" Host Int Type 0 = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1480 pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0)));
1481 PDQ_PRINTF((" Host Int Enable = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1482 pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable)));
1483 PDQ_PRINTF((" Type 2 Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1484 pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer)));
1485 PDQ_PRINTF((" Command Response Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1486 pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer)));
1487 PDQ_PRINTF((" Command Request Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1488 pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer)));
1489 PDQ_PRINTF((" Host SMT Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1490 pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer)));
1491 PDQ_PRINTF((" Unsolicited Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1492 pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer)));
1495 * Initialize the command information block
1497 pdq->pdq_command_info.ci_pa_bufstart = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_command_info.ci_bufstart);
1498 for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_command_requests)/sizeof(pdq->pdq_dbp->pdqdb_command_requests[0]); idx++) {
1499 pdq_txdesc_t *txd = &pdq->pdq_dbp->pdqdb_command_requests[idx];
1501 txd->txd_pa_lo = pdq->pdq_command_info.ci_pa_bufstart;
1502 txd->txd_eop = txd->txd_sop = 1;
1503 txd->txd_pa_hi = 0;
1505 for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_command_responses)/sizeof(pdq->pdq_dbp->pdqdb_command_responses[0]); idx++) {
1506 pdq_rxdesc_t *rxd = &pdq->pdq_dbp->pdqdb_command_responses[idx];
1508 rxd->rxd_pa_lo = pdq->pdq_command_info.ci_pa_bufstart;
1509 rxd->rxd_sop = 1;
1510 rxd->rxd_seg_cnt = 0;
1511 rxd->rxd_seg_len_lo = 0;
1515 * Initialize the unsolicited event information block
1517 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1518 pdq->pdq_unsolicited_info.ui_pa_bufstart = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_unsolicited_info.ui_events);
1519 for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_unsolicited_events)/sizeof(pdq->pdq_dbp->pdqdb_unsolicited_events[0]); idx++) {
1520 pdq_rxdesc_t *rxd = &pdq->pdq_dbp->pdqdb_unsolicited_events[idx];
1521 pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
1523 rxd->rxd_sop = 1;
1524 rxd->rxd_seg_cnt = 0;
1525 rxd->rxd_seg_len_hi = sizeof(pdq_unsolicited_event_t) / 16;
1526 rxd->rxd_pa_lo = pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event
1527 - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events;
1528 rxd->rxd_pa_hi = 0;
1531 * Initialize the receive information blocks (normal and SMT).
1533 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1534 pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8;
1536 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1537 pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3;
1540 * Initialize the transmit information block.
1542 pdq->pdq_tx_hdr[0] = PDQ_FDDI_PH0;
1543 pdq->pdq_tx_hdr[1] = PDQ_FDDI_PH1;
1544 pdq->pdq_tx_hdr[2] = PDQ_FDDI_PH2;
1545 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1546 pdq->pdq_tx_info.tx_hdrdesc.txd_seg_len = sizeof(pdq->pdq_tx_hdr);
1547 pdq->pdq_tx_info.tx_hdrdesc.txd_sop = 1;
1548 pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_tx_hdr);
1550 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1551 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1554 * Stop the PDQ if it is running and put it into a known state.
1556 state = pdq_stop(pdq);
1558 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1559 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1561 * If the adapter is not the state we expect, then the initialization
1562 * failed. Cleanup and exit.
1564 #if defined(PDQVERBOSE)
1565 if (state == PDQS_HALTED) {
1566 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1567 kprintf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]);
1568 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA)
1569 PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1570 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1571 PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR));
1573 #endif
1574 if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE)
1575 goto cleanup_and_return;
1577 PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
1578 pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1],
1579 pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3],
1580 pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5]));
1581 PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n",
1582 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
1583 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3]));
1584 PDQ_PRINTF(("PDQ Chip Revision = "));
1585 switch (pdq->pdq_chip_rev) {
1586 case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break;
1587 case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break;
1588 case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break;
1589 default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev));
1591 PDQ_PRINTF(("\n"));
1593 return pdq;