Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / drivers / net / tokenring / smctr.c
blob32a596d2a22670a69fa5fdf54ca195dd55729daa
1 /*
2 * smctr.c: A network driver for the SMC Token Ring Adapters.
4 * Written by Jay Schulist <jschlst@turbolinux.com>
6 * This software may be used and distributed according to the terms
7 * of the GNU Public License, incorporated herein by reference.
9 * This device driver works with the following SMC adapters:
10 * - SMC TokenCard Elite (8115T)
11 * - SMC TokenCard Elite/A MCA (8115T/A)
13 * Source(s):
14 * - SMC TokenCard SDK.
16 * Maintainer(s):
17 * JS Jay Schulist <jschlst@turbolinux.com>
19 * To do:
20 * 1. Multicast support.
23 static const char *version = "smctr.c: v1.1 1/1/00 by jschlst@turbolinux.com\n";
24 static const char *cardname = "smctr";
26 #ifdef MODULE
27 #include <linux/module.h>
28 #include <linux/version.h>
29 #endif
31 #include <linux/config.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/types.h>
35 #include <linux/fcntl.h>
36 #include <linux/interrupt.h>
37 #include <linux/ptrace.h>
38 #include <linux/ioport.h>
39 #include <linux/in.h>
40 #include <linux/malloc.h>
41 #include <linux/string.h>
42 #include <linux/time.h>
43 #include <asm/system.h>
44 #include <asm/bitops.h>
45 #include <asm/io.h>
46 #include <asm/dma.h>
47 #include <asm/irq.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/pci.h>
51 #include <linux/mca.h>
52 #include <linux/delay.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/trdevice.h>
59 #include "smctr.h" /* Our Stuff */
60 #include "smctr_firmware.h" /* SMC adapter firmware */
62 #define SMCTR_IO_EXTENT 20
64 /* A zero-terminated list of I/O addresses to be probed. */
65 static unsigned int smctr_portlist[] __initdata = {
66 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, 0x300,
67 0x320, 0x340, 0x360, 0x380,
71 #ifdef CONFIG_MCA
72 static unsigned int smctr_posid = 0x6ec6;
73 #endif
75 static int ringspeed = 0;
77 /* SMC Name of the Adapter. */
78 static char *smctr_name = "SMC TokenCard";
79 char *smctr_model = "Unknown";
81 /* Use 0 for production, 1 for verification, 2 for debug, and
82 * 3 for very verbose debug.
84 #ifndef SMCTR_DEBUG
85 #define SMCTR_DEBUG 1
86 #endif
87 static unsigned int smctr_debug = SMCTR_DEBUG;
89 /* smctr.c prototypes and functions are arranged alphabeticly
90 * for clearity, maintainability and pure old fashion fun.
92 /* A */
93 static int smctr_alloc_shared_memory(struct net_device *dev);
95 /* B */
96 static int smctr_bypass_state(struct net_device *dev);
98 /* C */
99 static int smctr_checksum_firmware(struct net_device *dev);
100 static int __init smctr_chk_isa(struct net_device *dev);
101 static int smctr_chg_rx_mask(struct net_device *dev);
102 static int smctr_clear_int(struct net_device *dev);
103 static int smctr_clear_trc_reset(int ioaddr);
104 static int smctr_close(struct net_device *dev);
106 /* D */
107 static int smctr_decode_firmware(struct net_device *dev);
108 static int smctr_disable_16bit(struct net_device *dev);
109 static int smctr_disable_adapter_ctrl_store(struct net_device *dev);
110 static int smctr_disable_adapter_ram(struct net_device *dev);
111 static int smctr_disable_bic_int(struct net_device *dev);
113 /* E */
114 static int smctr_enable_16bit(struct net_device *dev);
115 static int smctr_enable_adapter_ctrl_store(struct net_device *dev);
116 static int smctr_enable_adapter_ram(struct net_device *dev);
117 static int smctr_enable_bic_int(struct net_device *dev);
119 /* G */
120 static int __init smctr_get_boardid(struct net_device *dev, int mca);
121 static int smctr_get_group_address(struct net_device *dev);
122 static int smctr_get_functional_address(struct net_device *dev);
123 static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev);
124 static int smctr_get_physical_drop_number(struct net_device *dev);
125 static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue);
126 static int smctr_get_station_id(struct net_device *dev);
127 static struct net_device_stats *smctr_get_stats(struct net_device *dev);
128 static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
129 __u16 bytes_count);
130 static int smctr_get_upstream_neighbor_addr(struct net_device *dev);
132 /* H */
133 static int smctr_hardware_send_packet(struct net_device *dev,
134 struct net_local *tp);
135 /* I */
136 static int smctr_init_acbs(struct net_device *dev);
137 static int smctr_init_adapter(struct net_device *dev);
138 static int __init smctr_init_card(struct net_device *dev);
139 static int smctr_init_card_real(struct net_device *dev);
140 static int smctr_init_rx_bdbs(struct net_device *dev);
141 static int smctr_init_rx_fcbs(struct net_device *dev);
142 static int smctr_init_shared_memory(struct net_device *dev);
143 static int smctr_init_tx_bdbs(struct net_device *dev);
144 static int smctr_init_tx_fcbs(struct net_device *dev);
145 static int smctr_internal_self_test(struct net_device *dev);
146 static void smctr_interrupt(int irq, void *dev_id, struct pt_regs *regs);
147 static int smctr_issue_enable_int_cmd(struct net_device *dev,
148 __u16 interrupt_enable_mask);
149 static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code,
150 __u16 ibits);
151 static int smctr_issue_init_timers_cmd(struct net_device *dev);
152 static int smctr_issue_init_txrx_cmd(struct net_device *dev);
153 static int smctr_issue_insert_cmd(struct net_device *dev);
154 static int smctr_issue_read_ring_status_cmd(struct net_device *dev);
155 static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt);
156 static int smctr_issue_remove_cmd(struct net_device *dev);
157 static int smctr_issue_resume_acb_cmd(struct net_device *dev);
158 static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue);
159 static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue);
160 static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue);
161 static int smctr_issue_test_internal_rom_cmd(struct net_device *dev);
162 static int smctr_issue_test_hic_cmd(struct net_device *dev);
163 static int smctr_issue_test_mac_reg_cmd(struct net_device *dev);
164 static int smctr_issue_trc_loopback_cmd(struct net_device *dev);
165 static int smctr_issue_tri_loopback_cmd(struct net_device *dev);
166 static int smctr_issue_write_byte_cmd(struct net_device *dev,
167 short aword_cnt, void *byte);
168 static int smctr_issue_write_word_cmd(struct net_device *dev,
169 short aword_cnt, void *word);
171 /* J */
172 static int smctr_join_complete_state(struct net_device *dev);
174 /* L */
175 static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev);
176 static int smctr_load_firmware(struct net_device *dev);
177 static int smctr_load_node_addr(struct net_device *dev);
178 static int smctr_lobe_media_test(struct net_device *dev);
179 static int smctr_lobe_media_test_cmd(struct net_device *dev);
180 static int smctr_lobe_media_test_state(struct net_device *dev);
182 /* M */
183 static int smctr_make_8025_hdr(struct net_device *dev,
184 MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc);
185 static int smctr_make_access_pri(struct net_device *dev,
186 MAC_SUB_VECTOR *tsv);
187 static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv);
188 static int smctr_make_auth_funct_class(struct net_device *dev,
189 MAC_SUB_VECTOR *tsv);
190 static int smctr_make_corr(struct net_device *dev,
191 MAC_SUB_VECTOR *tsv, __u16 correlator);
192 static int smctr_make_funct_addr(struct net_device *dev,
193 MAC_SUB_VECTOR *tsv);
194 static int smctr_make_group_addr(struct net_device *dev,
195 MAC_SUB_VECTOR *tsv);
196 static int smctr_make_phy_drop_num(struct net_device *dev,
197 MAC_SUB_VECTOR *tsv);
198 static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv);
199 static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv);
200 static int smctr_make_ring_station_status(struct net_device *dev,
201 MAC_SUB_VECTOR *tsv);
202 static int smctr_make_ring_station_version(struct net_device *dev,
203 MAC_SUB_VECTOR *tsv);
204 static int smctr_make_tx_status_code(struct net_device *dev,
205 MAC_SUB_VECTOR *tsv, __u16 tx_fstatus);
206 static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
207 MAC_SUB_VECTOR *tsv);
208 static int smctr_make_wrap_data(struct net_device *dev,
209 MAC_SUB_VECTOR *tsv);
211 /* O */
212 static int smctr_open(struct net_device *dev);
213 static int smctr_open_tr(struct net_device *dev);
215 /* P */
216 int __init smctr_probe (struct net_device *dev);
217 static int __init smctr_probe1(struct net_device *dev, int ioaddr);
218 static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
219 struct net_device *dev, __u16 rx_status);
221 /* R */
222 static int smctr_ram_conflict_test(struct net_device *dev);
223 static int smctr_ram_memory_test(struct net_device *dev);
224 static unsigned int __init smctr_read_584_chksum(int ioaddr);
225 static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
226 __u16 *correlator);
227 static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
228 __u16 *correlator);
229 static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf);
230 static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
231 MAC_HEADER *rmf, __u16 *correlator);
232 static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
233 __u16 *correlator);
234 static int smctr_reset_adapter(struct net_device *dev);
235 static int smctr_restart_tx_chain(struct net_device *dev, short queue);
236 static int smctr_ring_status_chg(struct net_device *dev);
237 static int smctr_rom_conflict_test(struct net_device *dev);
238 static int smctr_rx_frame(struct net_device *dev);
240 /* S */
241 static int smctr_send_dat(struct net_device *dev);
242 static int smctr_send_packet(struct sk_buff *skb, struct net_device *dev);
243 static int smctr_send_lobe_media_test(struct net_device *dev);
244 static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
245 __u16 correlator);
246 static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
247 __u16 correlator);
248 static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
249 __u16 correlator);
250 static int smctr_send_rpt_tx_forward(struct net_device *dev,
251 MAC_HEADER *rmf, __u16 tx_fstatus);
252 static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
253 __u16 rcode, __u16 correlator);
254 static int smctr_send_rq_init(struct net_device *dev);
255 static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
256 __u16 *tx_fstatus);
257 static int smctr_set_auth_access_pri(struct net_device *dev,
258 MAC_SUB_VECTOR *rsv);
259 static int smctr_set_auth_funct_class(struct net_device *dev,
260 MAC_SUB_VECTOR *rsv);
261 static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
262 __u16 *correlator);
263 static int smctr_set_error_timer_value(struct net_device *dev,
264 MAC_SUB_VECTOR *rsv);
265 static int smctr_set_frame_forward(struct net_device *dev,
266 MAC_SUB_VECTOR *rsv, __u8 dc_sc);
267 static int smctr_set_local_ring_num(struct net_device *dev,
268 MAC_SUB_VECTOR *rsv);
269 static unsigned short smctr_set_ctrl_attention(struct net_device *dev);
270 static void smctr_set_multicast_list(struct net_device *dev);
271 static int smctr_set_page(struct net_device *dev, __u8 *buf);
272 static int smctr_set_phy_drop(struct net_device *dev,
273 MAC_SUB_VECTOR *rsv);
274 static int smctr_set_ring_speed(struct net_device *dev);
275 static int smctr_set_rx_look_ahead(struct net_device *dev);
276 static int smctr_set_trc_reset(int ioaddr);
277 static int smctr_setup_single_cmd(struct net_device *dev,
278 __u16 command, __u16 subcommand);
279 static int smctr_setup_single_cmd_w_data(struct net_device *dev,
280 __u16 command, __u16 subcommand);
281 static char *smctr_malloc(struct net_device *dev, __u16 size);
282 static int smctr_status_chg(struct net_device *dev);
284 /* T */
285 static void smctr_timeout(struct net_device *dev);
286 static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
287 __u16 queue);
288 static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue);
289 static unsigned short smctr_tx_move_frame(struct net_device *dev,
290 struct sk_buff *skb, __u8 *pbuff, unsigned int bytes);
292 /* U */
293 static int smctr_update_err_stats(struct net_device *dev);
294 static int smctr_update_rx_chain(struct net_device *dev, __u16 queue);
295 static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
296 __u16 queue);
298 /* W */
299 static int smctr_wait_cmd(struct net_device *dev);
300 static int smctr_wait_while_cbusy(struct net_device *dev);
302 #define TO_256_BYTE_BOUNDRY(X) (((X + 0xff) & 0xff00) - X)
303 #define TO_PARAGRAPH_BOUNDRY(X) (((X + 0x0f) & 0xfff0) - X)
304 #define PARAGRAPH_BOUNDRY(X) smctr_malloc(dev, TO_PARAGRAPH_BOUNDRY(X))
306 /* Allocate Adapter Shared Memory.
307 * IMPORTANT NOTE: Any changes to this function MUST be mirrored in the
308 * function "get_num_rx_bdbs" below!!!
310 * Order of memory allocation:
312 * 0. Initial System Configuration Block Pointer
313 * 1. System Configuration Block
314 * 2. System Control Block
315 * 3. Action Command Block
316 * 4. Interrupt Status Block
318 * 5. MAC TX FCB'S
319 * 6. NON-MAC TX FCB'S
320 * 7. MAC TX BDB'S
321 * 8. NON-MAC TX BDB'S
322 * 9. MAC RX FCB'S
323 * 10. NON-MAC RX FCB'S
324 * 11. MAC RX BDB'S
325 * 12. NON-MAC RX BDB'S
326 * 13. MAC TX Data Buffer( 1, 256 byte buffer)
327 * 14. MAC RX Data Buffer( 1, 256 byte buffer)
329 * 15. NON-MAC TX Data Buffer
330 * 16. NON-MAC RX Data Buffer
332 static int smctr_alloc_shared_memory(struct net_device *dev)
334 struct net_local *tp = (struct net_local *)dev->priv;
336 if(smctr_debug > 10)
337 printk("%s: smctr_alloc_shared_memory\n", dev->name);
339 /* Allocate initial System Control Block pointer.
340 * This pointer is located in the last page, last offset - 4.
342 tp->iscpb_ptr = (ISCPBlock *)(tp->ram_access + ((__u32)64 * 0x400)
343 - (long)ISCP_BLOCK_SIZE);
345 /* Allocate System Control Blocks. */
346 tp->scgb_ptr = (SCGBlock *)smctr_malloc(dev, sizeof(SCGBlock));
347 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
349 tp->sclb_ptr = (SCLBlock *)smctr_malloc(dev, sizeof(SCLBlock));
350 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
352 tp->acb_head = (ACBlock *)smctr_malloc(dev,
353 sizeof(ACBlock)*tp->num_acbs);
354 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
356 tp->isb_ptr = (ISBlock *)smctr_malloc(dev, sizeof(ISBlock));
357 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
359 tp->misc_command_data = (__u16 *)smctr_malloc(dev, MISC_DATA_SIZE);
360 PARAGRAPH_BOUNDRY(tp->sh_mem_used);
362 /* Allocate transmit FCBs. */
363 tp->tx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
364 sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]);
366 tp->tx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
367 sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]);
369 tp->tx_fcb_head[BUG_QUEUE] = (FCBlock *)smctr_malloc(dev,
370 sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]);
372 /* Allocate transmit BDBs. */
373 tp->tx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
374 sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]);
376 tp->tx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
377 sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]);
379 tp->tx_bdb_head[BUG_QUEUE] = (BDBlock *)smctr_malloc(dev,
380 sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]);
382 /* Allocate receive FCBs. */
383 tp->rx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
384 sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]);
386 tp->rx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
387 sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]);
389 /* Allocate receive BDBs. */
390 tp->rx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
391 sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]);
393 tp->rx_bdb_end[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
395 tp->rx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
396 sizeof(BDBlock) * tp->num_rx_bdbs[NON_MAC_QUEUE]);
398 tp->rx_bdb_end[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
400 /* Allocate MAC transmit buffers.
401 * MAC Tx Buffers doen't have to be on an ODD Boundry.
403 tp->tx_buff_head[MAC_QUEUE]
404 = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[MAC_QUEUE]);
405 tp->tx_buff_curr[MAC_QUEUE] = tp->tx_buff_head[MAC_QUEUE];
406 tp->tx_buff_end [MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
408 /* Allocate BUG transmit buffers. */
409 tp->tx_buff_head[BUG_QUEUE]
410 = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[BUG_QUEUE]);
411 tp->tx_buff_curr[BUG_QUEUE] = tp->tx_buff_head[BUG_QUEUE];
412 tp->tx_buff_end[BUG_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
414 /* Allocate MAC receive data buffers.
415 * MAC Rx buffer doesn't have to be on a 256 byte boundry.
417 tp->rx_buff_head[MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
418 RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]);
419 tp->rx_buff_end[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
421 /* Allocate Non-MAC transmit buffers.
422 * ?? For maximum Netware performance, put Tx Buffers on
423 * ODD Boundry and then restore malloc to Even Boundrys.
425 smctr_malloc(dev, 1L);
426 tp->tx_buff_head[NON_MAC_QUEUE]
427 = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[NON_MAC_QUEUE]);
428 tp->tx_buff_curr[NON_MAC_QUEUE] = tp->tx_buff_head[NON_MAC_QUEUE];
429 tp->tx_buff_end [NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
430 smctr_malloc(dev, 1L);
432 /* Allocate Non-MAC receive data buffers.
433 * To guarantee a minimum of 256 contigous memory to
434 * UM_Receive_Packet's lookahead pointer, before a page
435 * change or ring end is encountered, place each rx buffer on
436 * a 256 byte boundry.
438 smctr_malloc(dev, TO_256_BYTE_BOUNDRY(tp->sh_mem_used));
439 tp->rx_buff_head[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
440 RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]);
441 tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
443 return (0);
446 /* Enter Bypass state. */
447 static int smctr_bypass_state(struct net_device *dev)
449 int err;
451 if(smctr_debug > 10)
452 printk("%s: smctr_bypass_state\n", dev->name);
454 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
455 JS_BYPASS_STATE);
457 return (err);
460 static int smctr_checksum_firmware(struct net_device *dev)
462 struct net_local *tp = (struct net_local *)dev->priv;
463 __u16 i, checksum = 0;
465 if(smctr_debug > 10)
466 printk("%s: smctr_checksum_firmware\n", dev->name);
468 smctr_enable_adapter_ctrl_store(dev);
470 for(i = 0; i < CS_RAM_SIZE; i += 2)
471 checksum += *((__u16 *)(tp->ram_access + i));
473 tp->microcode_version = *(__u16 *)(tp->ram_access
474 + CS_RAM_VERSION_OFFSET);
475 tp->microcode_version >>= 8;
477 smctr_disable_adapter_ctrl_store(dev);
479 if(checksum)
480 return (checksum);
482 return (0);
485 static int smctr_chk_mca(struct net_device *dev)
487 #ifdef CONFIG_MCA
488 struct net_local *tp = (struct net_local *)dev->priv;
489 int current_slot;
490 __u8 r1, r2, r3, r4, r5;
492 current_slot = mca_find_unused_adapter(smctr_posid, 0);
493 if(current_slot == MCA_NOTFOUND)
494 return (-ENODEV);
496 mca_set_adapter_name(current_slot, smctr_name);
497 mca_mark_as_used(current_slot);
498 tp->slot_num = current_slot;
500 r1 = mca_read_stored_pos(tp->slot_num, 2);
501 r2 = mca_read_stored_pos(tp->slot_num, 3);
503 if(tp->slot_num)
504 outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num - 1) | CNFG_SLOT_ENABLE_BIT));
505 else
506 outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num) | CNFG_SLOT_ENABLE_BIT));
508 r1 = inb(CNFG_POS_REG1);
509 r2 = inb(CNFG_POS_REG0);
511 tp->bic_type = BIC_594_CHIP;
513 /* IO */
514 r2 = mca_read_stored_pos(tp->slot_num, 2);
515 r2 &= 0xF0;
516 dev->base_addr = ((__u16)r2 << 8) + (__u16)0x800;
517 request_region(dev->base_addr, SMCTR_IO_EXTENT, smctr_name);
519 /* IRQ */
520 r5 = mca_read_stored_pos(tp->slot_num, 5);
521 r5 &= 0xC;
522 switch(r5)
524 case 0:
525 dev->irq = 3;
526 break;
528 case 0x4:
529 dev->irq = 4;
530 break;
532 case 0x8:
533 dev->irq = 10;
534 break;
536 default:
537 dev->irq = 15;
538 break;
540 if(request_irq(dev->irq, smctr_interrupt, SA_SHIRQ, smctr_name, dev))
541 return (-ENODEV);
543 /* Get RAM base */
544 r3 = mca_read_stored_pos(tp->slot_num, 3);
545 if(r3 & 0x8)
547 if(r3 & 0x80)
548 tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0xFD0000;
549 else
550 tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0x0D0000;
552 else
554 if(r3 & 0x80)
555 tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0xFC0000;
556 else
557 tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0x0C0000;
560 /* Get Ram Size */
561 r3 &= 0x30;
562 r3 >>= 4;
564 tp->ram_usable = (__u16)CNFG_SIZE_8KB << r3;
565 tp->ram_size = (__u16)CNFG_SIZE_64KB;
566 tp->board_id |= TOKEN_MEDIA;
568 r4 = mca_read_stored_pos(tp->slot_num, 4);
569 if(r4 & 0x8)
570 tp->rom_base = ((__u32)(r4 & 0x7) << 13) + 0xD0000;
571 else
572 tp->rom_base = ((__u32)(r4 & 0x7) << 13) + 0xC0000;
574 /* Get ROM size. */
575 r4 >>= 4;
576 if(r4 == 0)
577 tp->rom_size = CNFG_SIZE_8KB;
578 else
580 if(r4 == 1)
581 tp->rom_size = CNFG_SIZE_16KB;
582 else
584 if(r4 == 2)
585 tp->rom_size = CNFG_SIZE_32KB;
586 else
587 tp->rom_size = ROM_DISABLE;
591 /* Get Media Type. */
592 r5 = mca_read_stored_pos(tp->slot_num, 5);
593 r5 &= CNFG_MEDIA_TYPE_MASK;
594 switch(r5)
596 case (0):
597 tp->media_type = MEDIA_STP_4;
598 break;
600 case (1):
601 tp->media_type = MEDIA_STP_16;
602 break;
604 case (3):
605 tp->media_type = MEDIA_UTP_16;
606 break;
608 default:
609 tp->media_type = MEDIA_UTP_4;
610 break;
612 tp->media_menu = 14;
614 r2 = mca_read_stored_pos(tp->slot_num, 2);
615 if(!(r2 & 0x02))
616 tp->mode_bits |= EARLY_TOKEN_REL;
618 /* Disable slot */
619 outb(CNFG_POS_CONTROL_REG, 0);
621 tp->board_id = smctr_get_boardid(dev, 1);
622 switch(tp->board_id & 0xffff)
624 case WD8115TA:
625 smctr_model = "8115T/A";
626 break;
628 case WD8115T:
629 smctr_model = "8115T";
630 break;
632 default:
633 smctr_model = "Unknown";
634 break;
637 return (0);
638 #else
639 return (-1);
640 #endif /* CONFIG_MCA */
643 static int smctr_chg_rx_mask(struct net_device *dev)
645 struct net_local *tp = (struct net_local *)dev->priv;
646 int err = 0;
648 if(smctr_debug > 10)
649 printk("%s: smctr_chg_rx_mask\n", dev->name);
651 smctr_enable_16bit(dev);
652 smctr_set_page(dev, (__u8 *)tp->ram_access);
654 if(tp->mode_bits & LOOPING_MODE_MASK)
655 tp->config_word0 |= RX_OWN_BIT;
656 else
657 tp->config_word0 &= ~RX_OWN_BIT;
659 if(tp->receive_mask & PROMISCUOUS_MODE)
660 tp->config_word0 |= PROMISCUOUS_BIT;
661 else
662 tp->config_word0 &= ~PROMISCUOUS_BIT;
664 if(tp->receive_mask & ACCEPT_ERR_PACKETS)
665 tp->config_word0 |= SAVBAD_BIT;
666 else
667 tp->config_word0 &= ~SAVBAD_BIT;
669 if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
670 tp->config_word0 |= RXATMAC;
671 else
672 tp->config_word0 &= ~RXATMAC;
674 if(tp->receive_mask & ACCEPT_MULTI_PROM)
675 tp->config_word1 |= MULTICAST_ADDRESS_BIT;
676 else
677 tp->config_word1 &= ~MULTICAST_ADDRESS_BIT;
679 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING)
680 tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS;
681 else
683 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING)
684 tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT;
685 else
686 tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
689 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0,
690 &tp->config_word0)))
692 return (err);
695 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1,
696 &tp->config_word1)))
698 return (err);
701 smctr_disable_16bit(dev);
703 return (0);
706 static int smctr_clear_int(struct net_device *dev)
708 struct net_local *tp = (struct net_local *)dev->priv;
710 outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR);
712 return (0);
715 static int smctr_clear_trc_reset(int ioaddr)
717 __u8 r;
719 r = inb(ioaddr + MSR);
720 outb(~MSR_RST & r, ioaddr + MSR);
722 return (0);
726 * The inverse routine to smctr_open().
728 static int smctr_close(struct net_device *dev)
730 struct net_local *tp = (struct net_local *)dev->priv;
731 struct sk_buff *skb;
732 int err;
734 netif_stop_queue(dev);
736 #ifdef MODULE
737 MOD_DEC_USE_COUNT;
738 #endif
740 tp->cleanup = 1;
742 /* Check to see if adapter is already in a closed state. */
743 if(tp->status != OPEN)
744 return (0);
746 smctr_enable_16bit(dev);
747 smctr_set_page(dev, (__u8 *)tp->ram_access);
749 if((err = smctr_issue_remove_cmd(dev)))
751 smctr_disable_16bit(dev);
752 return (err);
755 for(;;)
757 skb = skb_dequeue(&tp->SendSkbQueue);
758 if(skb == NULL)
759 break;
760 tp->QueueSkb++;
761 dev_kfree_skb(skb);
765 return (0);
768 static int smctr_decode_firmware(struct net_device *dev)
770 struct net_local *tp = (struct net_local *)dev->priv;
771 short bit = 0x80, shift = 12;
772 DECODE_TREE_NODE *tree;
773 short branch, tsize;
774 __u16 buff = 0;
775 long weight;
776 __u8 *ucode;
777 __u16 *mem;
779 if(smctr_debug > 10)
780 printk("%s: smctr_decode_firmware\n", dev->name);
782 weight = *(long *)(tp->ptr_ucode + WEIGHT_OFFSET);
783 tsize = *(__u8 *)(tp->ptr_ucode + TREE_SIZE_OFFSET);
784 tree = (DECODE_TREE_NODE *)(tp->ptr_ucode + TREE_OFFSET);
785 ucode = (__u8 *)(tp->ptr_ucode + TREE_OFFSET
786 + (tsize * sizeof(DECODE_TREE_NODE)));
787 mem = (__u16 *)(tp->ram_access);
789 while(weight)
791 branch = ROOT;
792 while((tree + branch)->tag != LEAF && weight)
794 branch = *ucode & bit ? (tree + branch)->llink
795 : (tree + branch)->rlink;
797 bit >>= 1;
798 weight--;
800 if(bit == 0)
802 bit = 0x80;
803 ucode++;
807 buff |= (tree + branch)->info << shift;
808 shift -= 4;
810 if(shift < 0)
812 *(mem++) = SWAP_BYTES(buff);
813 buff = 0;
814 shift = 12;
818 /* The following assumes the Control Store Memory has
819 * been initialized to zero. If the last partial word
820 * is zero, it will not be written.
822 if(buff)
823 *(mem++) = SWAP_BYTES(buff);
825 return (0);
828 static int smctr_disable_16bit(struct net_device *dev)
830 struct net_local *tp = (struct net_local *)dev->priv;
831 __u8 r;
833 if(tp->adapter_bus == BUS_ISA16_TYPE
834 && ((tp->adapter_flags & FORCED_16BIT_MODE) == 0))
836 r = inb(dev->base_addr + LAAR);
837 outb((r & ~LAAR_MEM16ENB), dev->base_addr + LAAR);
840 return (0);
844 * On Exit, Adapter is:
845 * 1. TRC is in a reset state and un-initialized.
846 * 2. Adapter memory is enabled.
847 * 3. Control Store memory is out of context (-WCSS is 1).
849 static int smctr_disable_adapter_ctrl_store(struct net_device *dev)
851 struct net_local *tp = (struct net_local *)dev->priv;
852 int ioaddr = dev->base_addr;
854 if(smctr_debug > 10)
855 printk("%s: smctr_disable_adapter_ctrl_store\n", dev->name);
857 tp->trc_mask |= CSR_WCSS;
858 outb(tp->trc_mask, ioaddr + CSR);
860 return (0);
863 static int smctr_disable_adapter_ram(struct net_device *dev)
865 int ioaddr = dev->base_addr;
866 __u8 r;
868 /* First disable memory enable bit. */
869 r = inb(ioaddr + MSR);
870 outb(~MSR_MEMB & r, ioaddr + MSR);
872 /* Now disable 16 bit memory enable bit. */
873 r = inb(ioaddr + LAAR);
874 outb(~LAAR_MEM16ENB & r, ioaddr + LAAR);
876 return (0);
879 static int smctr_disable_bic_int(struct net_device *dev)
881 struct net_local *tp = (struct net_local *)dev->priv;
882 int ioaddr = dev->base_addr;
884 tp->trc_mask = CSR_MSK_ALL | CSR_MSKCBUSY
885 | CSR_MSKTINT | CSR_WCSS;
886 outb(tp->trc_mask, ioaddr + CSR);
888 return (0);
891 static int smctr_enable_16bit(struct net_device *dev)
893 struct net_local *tp = (struct net_local *)dev->priv;
894 __u8 r;
896 if(tp->adapter_bus == BUS_ISA16_TYPE)
898 r = inb(dev->base_addr + LAAR);
899 outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR);
902 return (0);
906 * To enable the adapter control store memory:
907 * 1. Adapter must be in a RESET state.
908 * 2. Adapter memory must be enabled.
909 * 3. Control Store Memory is in context (-WCSS is 0).
911 static int smctr_enable_adapter_ctrl_store(struct net_device *dev)
913 struct net_local *tp = (struct net_local *)dev->priv;
914 int ioaddr = dev->base_addr;
916 if(smctr_debug > 10)
917 printk("%s: smctr_enable_adapter_ctrl_store\n", dev->name);
919 smctr_set_trc_reset(ioaddr);
920 smctr_enable_adapter_ram(dev);
922 tp->trc_mask &= ~CSR_WCSS;
923 outb(tp->trc_mask, ioaddr + CSR);
925 return (0);
928 static int smctr_enable_adapter_ram(struct net_device *dev)
930 int ioaddr = dev->base_addr;
931 __u8 r;
933 if(smctr_debug > 10)
934 printk("%s: smctr_enable_adapter_ram\n", dev->name);
936 r = inb(ioaddr + MSR);
937 outb(MSR_MEMB | r, ioaddr + MSR);
939 return (0);
942 static int smctr_enable_bic_int(struct net_device *dev)
944 struct net_local *tp = (struct net_local *)dev->priv;
945 int ioaddr = dev->base_addr;
946 __u8 r;
948 switch(tp->bic_type)
950 case (BIC_584_CHIP):
951 tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS;
952 outb(tp->trc_mask, ioaddr + CSR);
953 r = inb(ioaddr + IRR);
954 outb(r | IRR_IEN, ioaddr + IRR);
955 break;
957 case (BIC_594_CHIP):
958 tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS;
959 outb(tp->trc_mask, ioaddr + CSR);
960 r = inb(ioaddr + IMCCR);
961 outb(r | IMCCR_EIL, ioaddr + IMCCR);
962 break;
965 return (0);
968 static int __init smctr_chk_isa(struct net_device *dev)
970 struct net_local *tp = (struct net_local *)dev->priv;
971 int ioaddr = dev->base_addr;
972 __u8 r1, r2, b, chksum = 0;
973 __u16 r;
974 int i;
976 if(smctr_debug > 10)
977 printk("%s: smctr_chk_isa %#4x\n", dev->name, ioaddr);
979 if((ioaddr & 0x1F) != 0)
980 return (-ENODEV);
982 /* Checksum SMC node address */
983 for(i = 0; i < 8; i++)
985 b = inb(ioaddr + LAR0 + i);
986 chksum += b;
989 if(chksum != NODE_ADDR_CKSUM)
990 return (-ENODEV); /* Adapter Not Found */
992 /* Grab the region so that no one else tries to probe our ioports. */
993 request_region(ioaddr, SMCTR_IO_EXTENT, smctr_name);
995 b = inb(ioaddr + BDID);
997 /* Check for 8115T Board ID */
998 r2 = 0;
999 for(r = 0; r < 8; r++)
1001 r1 = inb(ioaddr + 0x8 + r);
1002 r2 += r1;
1005 /* value of RegF adds up the sum to 0xFF */
1006 if((r2 != 0xFF) && (r2 != 0xEE))
1007 return (-1);
1009 /* Get adapter ID */
1010 tp->board_id = smctr_get_boardid(dev, 0);
1011 switch(tp->board_id & 0xffff)
1013 case WD8115TA:
1014 smctr_model = "8115T/A";
1015 break;
1017 case WD8115T:
1018 smctr_model = "8115T";
1019 break;
1021 default:
1022 smctr_model = "Unknown";
1023 break;
1026 /* Store BIC type. */
1027 tp->bic_type = BIC_584_CHIP;
1028 tp->nic_type = NIC_825_CHIP;
1030 /* Copy Ram Size */
1031 tp->ram_usable = CNFG_SIZE_16KB;
1032 tp->ram_size = CNFG_SIZE_64KB;
1034 /* Get 58x Ram Base */
1035 r1 = inb(ioaddr);
1036 r1 &= 0x3F;
1038 r2 = inb(ioaddr + CNFG_LAAR_584);
1039 r2 &= CNFG_LAAR_MASK;
1040 r2 <<= 3;
1041 r2 |= ((r1 & 0x38) >> 3);
1043 tp->ram_base = ((__u32)r2 << 16) + (((__u32)(r1 & 0x7)) << 13);
1045 /* Get 584 Irq */
1046 r1 = 0;
1047 r1 = inb(ioaddr + CNFG_ICR_583);
1048 r1 &= CNFG_ICR_IR2_584;
1050 r2 = inb(ioaddr + CNFG_IRR_583);
1051 r2 &= CNFG_IRR_IRQS; /* 0x60 */
1052 r2 >>= 5;
1054 switch(r2)
1056 case 0:
1057 if(r1 == 0)
1058 dev->irq = 2;
1059 else
1060 dev->irq = 10;
1061 break;
1063 case 1:
1064 if(r1 == 0)
1065 dev->irq = 3;
1066 else
1067 dev->irq = 11;
1068 break;
1070 case 2:
1071 if(r1 == 0)
1073 if(tp->extra_info & ALTERNATE_IRQ_BIT)
1074 dev->irq = 5;
1075 else
1076 dev->irq = 4;
1078 else
1079 dev->irq = 15;
1080 break;
1082 case 3:
1083 if(r1 == 0)
1084 dev->irq = 7;
1085 else
1086 dev->irq = 4;
1087 break;
1089 default:
1090 printk("%s: No IRQ found aborting\n", dev->name);
1091 return(-1);
1094 if(request_irq(dev->irq, smctr_interrupt, SA_SHIRQ, smctr_name, dev))
1095 return (-ENODEV);
1097 /* Get 58x Rom Base */
1098 r1 = inb(ioaddr + CNFG_BIO_583);
1099 r1 &= 0x3E;
1100 r1 |= 0x40;
1102 tp->rom_base = (__u32)r1 << 13;
1104 /* Get 58x Rom Size */
1105 r1 = inb(ioaddr + CNFG_BIO_583);
1106 r1 &= 0xC0;
1107 if(r1 == 0)
1108 tp->rom_size = ROM_DISABLE;
1109 else
1111 r1 >>= 6;
1112 tp->rom_size = (__u16)CNFG_SIZE_8KB << r1;
1115 /* Get 58x Boot Status */
1116 r1 = inb(ioaddr + CNFG_GP2);
1118 tp->mode_bits &= (~BOOT_STATUS_MASK);
1120 if(r1 & CNFG_GP2_BOOT_NIBBLE)
1121 tp->mode_bits |= BOOT_TYPE_1;
1123 /* Get 58x Zero Wait State */
1124 tp->mode_bits &= (~ZERO_WAIT_STATE_MASK);
1126 r1 = inb(ioaddr + CNFG_IRR_583);
1128 if(r1 & CNFG_IRR_ZWS)
1129 tp->mode_bits |= ZERO_WAIT_STATE_8_BIT;
1131 if(tp->board_id & BOARD_16BIT)
1133 r1 = inb(ioaddr + CNFG_LAAR_584);
1135 if(r1 & CNFG_LAAR_ZWS)
1136 tp->mode_bits |= ZERO_WAIT_STATE_16_BIT;
1139 /* Get 584 Media Menu */
1140 tp->media_menu = 14;
1141 r1 = inb(ioaddr + CNFG_IRR_583);
1143 tp->mode_bits &= 0xf8ff; /* (~CNFG_INTERFACE_TYPE_MASK) */
1144 if((tp->board_id & TOKEN_MEDIA) == TOKEN_MEDIA)
1146 /* Get Advanced Features */
1147 if(((r1 & 0x6) >> 1) == 0x3)
1148 tp->media_type |= MEDIA_UTP_16;
1149 else
1151 if(((r1 & 0x6) >> 1) == 0x2)
1152 tp->media_type |= MEDIA_STP_16;
1153 else
1155 if(((r1 & 0x6) >> 1) == 0x1)
1156 tp->media_type |= MEDIA_UTP_4;
1158 else
1159 tp->media_type |= MEDIA_STP_4;
1163 r1 = inb(ioaddr + CNFG_GP2);
1164 if(!(r1 & 0x2) ) /* GP2_ETRD */
1165 tp->mode_bits |= EARLY_TOKEN_REL;
1167 /* see if the chip is corrupted */
1168 if(smctr_read_584_chksum(ioaddr))
1170 printk("%s: EEPROM Checksum Failure\n", dev->name);
1171 return(-1);
1175 return (0);
1178 static int __init smctr_get_boardid(struct net_device *dev, int mca)
1180 struct net_local *tp = (struct net_local *)dev->priv;
1181 int ioaddr = dev->base_addr;
1182 __u8 r, r1, IdByte;
1183 __u16 BoardIdMask;
1185 tp->board_id = BoardIdMask = 0;
1187 if(mca)
1189 BoardIdMask |= (MICROCHANNEL+INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT);
1190 tp->extra_info |= (INTERFACE_594_CHIP+RAM_SIZE_64K+NIC_825_BIT+ALTERNATE_IRQ_BIT+SLOT_16BIT);
1192 else
1194 BoardIdMask|=(INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT);
1195 tp->extra_info |= (INTERFACE_584_CHIP + RAM_SIZE_64K
1196 + NIC_825_BIT + ALTERNATE_IRQ_BIT);
1199 if(!mca)
1201 r = inb(ioaddr + BID_REG_1);
1202 r &= 0x0c;
1203 outb(r, ioaddr + BID_REG_1);
1204 r = inb(ioaddr + BID_REG_1);
1206 if(r & BID_SIXTEEN_BIT_BIT)
1208 tp->extra_info |= SLOT_16BIT;
1209 tp->adapter_bus = BUS_ISA16_TYPE;
1211 else
1212 tp->adapter_bus = BUS_ISA8_TYPE;
1214 else
1215 tp->adapter_bus = BUS_MCA_TYPE;
1217 /* Get Board Id Byte */
1218 IdByte = inb(ioaddr + BID_BOARD_ID_BYTE);
1220 /* if Major version > 1.0 then
1221 * return;
1223 if(IdByte & 0xF8)
1224 return (-1);
1226 r1 = inb(ioaddr + BID_REG_1);
1227 r1 &= BID_ICR_MASK;
1228 r1 |= BID_OTHER_BIT;
1230 outb(r1, ioaddr + BID_REG_1);
1231 r1 = inb(ioaddr + BID_REG_3);
1233 r1 &= BID_EAR_MASK;
1234 r1 |= BID_ENGR_PAGE;
1236 outb(r1, ioaddr + BID_REG_3);
1237 r1 = inb(ioaddr + BID_REG_1);
1238 r1 &= BID_ICR_MASK;
1239 r1 |= (BID_RLA | BID_OTHER_BIT);
1241 outb(r1, ioaddr + BID_REG_1);
1243 r1 = inb(ioaddr + BID_REG_1);
1244 while(r1 & BID_RECALL_DONE_MASK)
1245 r1 = inb(ioaddr + BID_REG_1);
1247 r = inb(ioaddr + BID_LAR_0 + BID_REG_6);
1249 /* clear chip rev bits */
1250 tp->extra_info &= ~CHIP_REV_MASK;
1251 tp->extra_info |= ((r & BID_EEPROM_CHIP_REV_MASK) << 6);
1253 r1 = inb(ioaddr + BID_REG_1);
1254 r1 &= BID_ICR_MASK;
1255 r1 |= BID_OTHER_BIT;
1257 outb(r1, ioaddr + BID_REG_1);
1258 r1 = inb(ioaddr + BID_REG_3);
1260 r1 &= BID_EAR_MASK;
1261 r1 |= BID_EA6;
1263 outb(r1, ioaddr + BID_REG_3);
1264 r1 = inb(ioaddr + BID_REG_1);
1266 r1 &= BID_ICR_MASK;
1267 r1 |= BID_RLA;
1269 outb(r1, ioaddr + BID_REG_1);
1270 r1 = inb(ioaddr + BID_REG_1);
1272 while(r1 & BID_RECALL_DONE_MASK)
1273 r1 = inb(ioaddr + BID_REG_1);
1275 return (BoardIdMask);
1278 static int smctr_get_group_address(struct net_device *dev)
1280 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR);
1282 return(smctr_wait_cmd(dev));
1285 static int smctr_get_functional_address(struct net_device *dev)
1287 smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR);
1289 return(smctr_wait_cmd(dev));
1292 /* Calculate number of Non-MAC receive BDB's and data buffers.
1293 * This function must simulate allocateing shared memory exactly
1294 * as the allocate_shared_memory function above.
1296 static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
1298 struct net_local *tp = (struct net_local *)dev->priv;
1299 unsigned int mem_used = 0;
1301 /* Allocate System Control Blocks. */
1302 mem_used += sizeof(SCGBlock);
1304 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1305 mem_used += sizeof(SCLBlock);
1307 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1308 mem_used += sizeof(ACBlock) * tp->num_acbs;
1310 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1311 mem_used += sizeof(ISBlock);
1313 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1314 mem_used += MISC_DATA_SIZE;
1316 /* Allocate transmit FCB's. */
1317 mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
1319 mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE];
1320 mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE];
1321 mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE];
1323 /* Allocate transmit BDBs. */
1324 mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE];
1325 mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE];
1326 mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE];
1328 /* Allocate receive FCBs. */
1329 mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE];
1330 mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE];
1332 /* Allocate receive BDBs. */
1333 mem_used += sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE];
1335 /* Allocate MAC transmit buffers.
1336 * MAC transmit buffers don't have to be on an ODD Boundry.
1338 mem_used += tp->tx_buff_size[MAC_QUEUE];
1340 /* Allocate BUG transmit buffers. */
1341 mem_used += tp->tx_buff_size[BUG_QUEUE];
1343 /* Allocate MAC receive data buffers.
1344 * MAC receive buffers don't have to be on a 256 byte boundry.
1346 mem_used += RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE];
1348 /* Allocate Non-MAC transmit buffers.
1349 * For maximum Netware performance, put Tx Buffers on
1350 * ODD Boundry,and then restore malloc to Even Boundrys.
1352 mem_used += 1L;
1353 mem_used += tp->tx_buff_size[NON_MAC_QUEUE];
1354 mem_used += 1L;
1356 /* CALCULATE NUMBER OF NON-MAC RX BDB'S
1357 * AND NON-MAC RX DATA BUFFERS
1359 * Make sure the mem_used offset at this point is the
1360 * same as in allocate_shared memory or the following
1361 * boundry adjustment will be incorrect (i.e. not allocating
1362 * the non-mac recieve buffers above cannot change the 256
1363 * byte offset).
1365 * Since this cannot be guaranteed, adding the full 256 bytes
1366 * to the amount of shared memory used at this point will guaranteed
1367 * that the rx data buffers do not overflow shared memory.
1369 mem_used += 0x100;
1371 return((0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock)));
1374 static int smctr_get_physical_drop_number(struct net_device *dev)
1376 smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER);
1378 return(smctr_wait_cmd(dev));
1381 static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue)
1383 struct net_local *tp = (struct net_local *)dev->priv;
1384 BDBlock *bdb;
1386 bdb = (BDBlock *)((__u32)tp->ram_access
1387 + (__u32)(tp->rx_fcb_curr[queue]->trc_bdb_ptr));
1389 tp->rx_fcb_curr[queue]->bdb_ptr = bdb;
1391 return ((__u8 *)bdb->data_block_ptr);
1394 static int smctr_get_station_id(struct net_device *dev)
1396 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS);
1398 return(smctr_wait_cmd(dev));
1402 * Get the current statistics. This may be called with the card open
1403 * or closed.
1405 static struct net_device_stats *smctr_get_stats(struct net_device *dev)
1407 struct net_local *tp = (struct net_local *)dev->priv;
1409 return ((struct net_device_stats *)&tp->MacStat);
1412 static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
1413 __u16 bytes_count)
1415 struct net_local *tp = (struct net_local *)dev->priv;
1416 FCBlock *pFCB;
1417 BDBlock *pbdb;
1418 unsigned short alloc_size;
1419 unsigned short *temp;
1421 if(smctr_debug > 20)
1422 printk("smctr_get_tx_fcb\n");
1424 /* check if there is enough FCB blocks */
1425 if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue])
1426 return ((FCBlock *)(-1L));
1428 /* round off the input pkt size to the nearest even number */
1429 alloc_size = (bytes_count + 1) & 0xfffe;
1431 /* check if enough mem */
1432 if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue])
1433 return ((FCBlock *)(-1L));
1435 /* check if past the end ;
1436 * if exactly enough mem to end of ring, alloc from front.
1437 * this avoids update of curr when curr = end
1439 if(((unsigned long)(tp->tx_buff_curr[queue]) + alloc_size)
1440 >= (unsigned long)(tp->tx_buff_end[queue]))
1442 /* check if enough memory from ring head */
1443 alloc_size = alloc_size +
1444 (__u16)((__u32)tp->tx_buff_end[queue]
1445 - (__u32)tp->tx_buff_curr[queue]);
1447 if((tp->tx_buff_used[queue] + alloc_size)
1448 > tp->tx_buff_size[queue])
1450 return ((FCBlock *)(-1L));
1453 /* ring wrap */
1454 tp->tx_buff_curr[queue] = tp->tx_buff_head[queue];
1457 tp->tx_buff_used[queue] += alloc_size;
1458 tp->num_tx_fcbs_used[queue]++;
1459 tp->tx_fcb_curr[queue]->frame_length = bytes_count;
1460 tp->tx_fcb_curr[queue]->memory_alloc = alloc_size;
1461 temp = tp->tx_buff_curr[queue];
1462 tp->tx_buff_curr[queue]
1463 = (__u16 *)((__u32)temp + (__u32)((bytes_count + 1) & 0xfffe));
1465 pbdb = tp->tx_fcb_curr[queue]->bdb_ptr;
1466 pbdb->buffer_length = bytes_count;
1467 pbdb->data_block_ptr = temp;
1468 pbdb->trc_data_block_ptr = TRC_POINTER(temp);
1470 pFCB = tp->tx_fcb_curr[queue];
1471 tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr;
1473 return (pFCB);
1476 static int smctr_get_upstream_neighbor_addr(struct net_device *dev)
1478 smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS);
1480 return(smctr_wait_cmd(dev));
1483 static int smctr_hardware_send_packet(struct net_device *dev,
1484 struct net_local *tp)
1486 struct tr_statistics *tstat = &tp->MacStat;
1487 struct sk_buff *skb;
1488 FCBlock *fcb;
1490 if(smctr_debug > 10)
1491 printk("%s: smctr_hardware_send_packet\n", dev->name);
1493 if(tp->status != OPEN)
1494 return (-1);
1496 if(tp->monitor_state_ready != 1)
1497 return (-1);
1499 for(;;)
1501 /* Send first buffer from queue */
1502 skb = skb_dequeue(&tp->SendSkbQueue);
1503 if(skb == NULL)
1504 return (-1);
1506 tp->QueueSkb++;
1508 if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size) return (-1);
1510 smctr_enable_16bit(dev);
1511 smctr_set_page(dev, (__u8 *)tp->ram_access);
1513 if((fcb = smctr_get_tx_fcb(dev, NON_MAC_QUEUE, skb->len))
1514 == (FCBlock *)(-1L))
1516 smctr_disable_16bit(dev);
1517 return (-1);
1520 smctr_tx_move_frame(dev, skb,
1521 (__u8 *)fcb->bdb_ptr->data_block_ptr, skb->len);
1523 smctr_set_page(dev, (__u8 *)fcb);
1525 smctr_trc_send_packet(dev, fcb, NON_MAC_QUEUE);
1526 dev_kfree_skb(skb);
1528 tstat->tx_packets++;
1530 smctr_disable_16bit(dev);
1533 return (0);
1536 static int smctr_init_acbs(struct net_device *dev)
1538 struct net_local *tp = (struct net_local *)dev->priv;
1539 unsigned int i;
1540 ACBlock *acb;
1542 if(smctr_debug > 10)
1543 printk("%s: smctr_init_acbs\n", dev->name);
1545 acb = tp->acb_head;
1546 acb->cmd_done_status = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL);
1547 acb->cmd_info = ACB_CHAIN_END;
1548 acb->cmd = 0;
1549 acb->subcmd = 0;
1550 acb->data_offset_lo = 0;
1551 acb->data_offset_hi = 0;
1552 acb->next_ptr
1553 = (ACBlock *)(((char *)acb) + sizeof(ACBlock));
1554 acb->trc_next_ptr = TRC_POINTER(acb->next_ptr);
1556 for(i = 1; i < tp->num_acbs; i++)
1558 acb = acb->next_ptr;
1559 acb->cmd_done_status
1560 = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL);
1561 acb->cmd_info = ACB_CHAIN_END;
1562 acb->cmd = 0;
1563 acb->subcmd = 0;
1564 acb->data_offset_lo = 0;
1565 acb->data_offset_hi = 0;
1566 acb->next_ptr
1567 = (ACBlock *)(((char *)acb) + sizeof(ACBlock));
1568 acb->trc_next_ptr = TRC_POINTER(acb->next_ptr);
1571 acb->next_ptr = tp->acb_head;
1572 acb->trc_next_ptr = TRC_POINTER(tp->acb_head);
1573 tp->acb_next = tp->acb_head->next_ptr;
1574 tp->acb_curr = tp->acb_head->next_ptr;
1575 tp->num_acbs_used = 0;
1577 return (0);
1580 static int smctr_init_adapter(struct net_device *dev)
1582 struct net_local *tp = (struct net_local *)dev->priv;
1583 int err;
1585 if(smctr_debug > 10)
1586 printk("%s: smctr_init_adapter\n", dev->name);
1588 tp->status = CLOSED;
1589 tp->page_offset_mask = (tp->ram_usable * 1024) - 1;
1590 skb_queue_head_init(&tp->SendSkbQueue);
1591 tp->QueueSkb = MAX_TX_QUEUE;
1593 if(!(tp->group_address_0 & 0x0080))
1594 tp->group_address_0 |= 0x00C0;
1596 if(!(tp->functional_address_0 & 0x00C0))
1597 tp->functional_address_0 |= 0x00C0;
1599 tp->functional_address[0] &= 0xFF7F;
1601 if(tp->authorized_function_classes == 0)
1602 tp->authorized_function_classes = 0x7FFF;
1604 if(tp->authorized_access_priority == 0)
1605 tp->authorized_access_priority = 0x06;
1607 smctr_disable_bic_int(dev);
1608 smctr_set_trc_reset(dev->base_addr);
1610 /* By default the adapter will operate in 16-bit mode only. If
1611 * there are two or more adapters in a box, switching between
1612 * 16-bit and 8-bit mode may cause problems. In short the adapters
1613 * will interfere with each other. XXX - smc.
1615 smctr_disable_adapter_ram(dev);
1616 if((err = smctr_rom_conflict_test(dev)))
1617 return (err);
1619 if((err = smctr_ram_conflict_test(dev)))
1620 return (err);
1622 smctr_enable_adapter_ram(dev);
1623 smctr_enable_16bit(dev);
1624 smctr_set_page(dev, (__u8 *)tp->ram_access);
1626 if(smctr_checksum_firmware(dev))
1627 return (UCODE_NOT_PRESENT);
1629 if((err = smctr_ram_memory_test(dev)))
1630 return (err);
1632 smctr_enable_16bit(dev);
1633 if(smctr_checksum_firmware(dev))
1634 return (-1);
1636 if((err = smctr_ram_memory_test(dev)))
1637 return (-1);
1639 smctr_set_rx_look_ahead(dev);
1640 smctr_load_node_addr(dev);
1642 /* Initialize adapter for Internal Self Test. */
1643 smctr_reset_adapter(dev);
1645 if((err = smctr_init_card_real(dev)))
1646 return (err);
1648 /* This routine clobbers the TRC's internal registers. */
1649 if((err = smctr_internal_self_test(dev)))
1650 return (err);
1652 /* Re-Initialize adapter's internal registers */
1653 smctr_reset_adapter(dev);
1655 if((err = smctr_init_card_real(dev)))
1656 return (err);
1658 smctr_enable_bic_int(dev);
1660 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
1661 return (err);
1663 smctr_disable_16bit(dev);
1665 return (0);
1668 /* Dummy function */
1669 static int __init smctr_init_card(struct net_device *dev)
1671 if(smctr_debug > 10)
1672 printk("%s: smctr_init_card\n", dev->name);
1674 return (0);
1677 static int smctr_init_card_real(struct net_device *dev)
1679 struct net_local *tp = (struct net_local *)dev->priv;
1680 int err = 0;
1682 if(smctr_debug > 10)
1683 printk("%s: smctr_init_card_real\n", dev->name);
1685 tp->sh_mem_used = 0;
1686 tp->num_acbs = NUM_OF_ACBS;
1688 /* Range Check Max Packet Size */
1689 if(tp->max_packet_size < 256)
1690 tp->max_packet_size = 256;
1691 else
1693 if(tp->max_packet_size > NON_MAC_TX_BUFFER_MEMORY)
1694 tp->max_packet_size = NON_MAC_TX_BUFFER_MEMORY;
1697 tp->num_of_tx_buffs = (NON_MAC_TX_BUFFER_MEMORY
1698 / tp->max_packet_size) - 1;
1700 if(tp->num_of_tx_buffs > NUM_NON_MAC_TX_FCBS)
1701 tp->num_of_tx_buffs = NUM_NON_MAC_TX_FCBS;
1702 else
1704 if(tp->num_of_tx_buffs == 0)
1705 tp->num_of_tx_buffs = 1;
1708 /* Tx queue constants */
1709 tp->num_tx_fcbs [BUG_QUEUE] = NUM_BUG_TX_FCBS;
1710 tp->num_tx_bdbs [BUG_QUEUE] = NUM_BUG_TX_BDBS;
1711 tp->tx_buff_size [BUG_QUEUE] = BUG_TX_BUFFER_MEMORY;
1712 tp->tx_buff_used [BUG_QUEUE] = 0;
1713 tp->tx_queue_status [BUG_QUEUE] = NOT_TRANSMITING;
1715 tp->num_tx_fcbs [MAC_QUEUE] = NUM_MAC_TX_FCBS;
1716 tp->num_tx_bdbs [MAC_QUEUE] = NUM_MAC_TX_BDBS;
1717 tp->tx_buff_size [MAC_QUEUE] = MAC_TX_BUFFER_MEMORY;
1718 tp->tx_buff_used [MAC_QUEUE] = 0;
1719 tp->tx_queue_status [MAC_QUEUE] = NOT_TRANSMITING;
1721 tp->num_tx_fcbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_FCBS;
1722 tp->num_tx_bdbs [NON_MAC_QUEUE] = NUM_NON_MAC_TX_BDBS;
1723 tp->tx_buff_size [NON_MAC_QUEUE] = NON_MAC_TX_BUFFER_MEMORY;
1724 tp->tx_buff_used [NON_MAC_QUEUE] = 0;
1725 tp->tx_queue_status [NON_MAC_QUEUE] = NOT_TRANSMITING;
1727 /* Receive Queue Constants */
1728 tp->num_rx_fcbs[MAC_QUEUE] = NUM_MAC_RX_FCBS;
1729 tp->num_rx_bdbs[MAC_QUEUE] = NUM_MAC_RX_BDBS;
1731 if(tp->extra_info & CHIP_REV_MASK)
1732 tp->num_rx_fcbs[NON_MAC_QUEUE] = 78; /* 825 Rev. XE */
1733 else
1734 tp->num_rx_fcbs[NON_MAC_QUEUE] = 7; /* 825 Rev. XD */
1736 tp->num_rx_bdbs[NON_MAC_QUEUE] = smctr_get_num_rx_bdbs(dev);
1738 smctr_alloc_shared_memory(dev);
1739 smctr_init_shared_memory(dev);
1741 if((err = smctr_issue_init_timers_cmd(dev)))
1742 return (err);
1744 if((err = smctr_issue_init_txrx_cmd(dev)))
1745 return (err);
1747 return (0);
1750 static int smctr_init_rx_bdbs(struct net_device *dev)
1752 struct net_local *tp = (struct net_local *)dev->priv;
1753 unsigned int i, j;
1754 BDBlock *bdb;
1755 __u16 *buf;
1757 if(smctr_debug > 10)
1758 printk("%s: smctr_init_rx_bdbs\n", dev->name);
1760 for(i = 0; i < NUM_RX_QS_USED; i++)
1762 bdb = tp->rx_bdb_head[i];
1763 buf = tp->rx_buff_head[i];
1764 bdb->info = (BDB_CHAIN_END | BDB_NO_WARNING);
1765 bdb->buffer_length = RX_DATA_BUFFER_SIZE;
1766 bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
1767 bdb->data_block_ptr = buf;
1768 bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1770 if(i == NON_MAC_QUEUE)
1771 bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf);
1772 else
1773 bdb->trc_data_block_ptr = TRC_POINTER(buf);
1775 for(j = 1; j < tp->num_rx_bdbs[i]; j++)
1777 bdb->next_ptr->back_ptr = bdb;
1778 bdb = bdb->next_ptr;
1779 buf = (__u16 *)((char *)buf + RX_DATA_BUFFER_SIZE);
1780 bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
1781 bdb->buffer_length = RX_DATA_BUFFER_SIZE;
1782 bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
1783 bdb->data_block_ptr = buf;
1784 bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1786 if(i == NON_MAC_QUEUE)
1787 bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf);
1788 else
1789 bdb->trc_data_block_ptr = TRC_POINTER(buf);
1792 bdb->next_ptr = tp->rx_bdb_head[i];
1793 bdb->trc_next_ptr = TRC_POINTER(tp->rx_bdb_head[i]);
1795 tp->rx_bdb_head[i]->back_ptr = bdb;
1796 tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr;
1799 return (0);
1802 static int smctr_init_rx_fcbs(struct net_device *dev)
1804 struct net_local *tp = (struct net_local *)dev->priv;
1805 unsigned int i, j;
1806 FCBlock *fcb;
1808 for(i = 0; i < NUM_RX_QS_USED; i++)
1810 fcb = tp->rx_fcb_head[i];
1811 fcb->frame_status = 0;
1812 fcb->frame_length = 0;
1813 fcb->info = FCB_CHAIN_END;
1814 fcb->next_ptr = (FCBlock *)(((char*)fcb) + sizeof(FCBlock));
1815 if(i == NON_MAC_QUEUE)
1816 fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr);
1817 else
1818 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1820 for(j = 1; j < tp->num_rx_fcbs[i]; j++)
1822 fcb->next_ptr->back_ptr = fcb;
1823 fcb = fcb->next_ptr;
1824 fcb->frame_status = 0;
1825 fcb->frame_length = 0;
1826 fcb->info = FCB_WARNING;
1827 fcb->next_ptr
1828 = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
1830 if(i == NON_MAC_QUEUE)
1831 fcb->trc_next_ptr
1832 = RX_FCB_TRC_POINTER(fcb->next_ptr);
1833 else
1834 fcb->trc_next_ptr
1835 = TRC_POINTER(fcb->next_ptr);
1838 fcb->next_ptr = tp->rx_fcb_head[i];
1840 if(i == NON_MAC_QUEUE)
1841 fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr);
1842 else
1843 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1845 tp->rx_fcb_head[i]->back_ptr = fcb;
1846 tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr;
1849 return(0);
1852 static int smctr_init_shared_memory(struct net_device *dev)
1854 struct net_local *tp = (struct net_local *)dev->priv;
1855 unsigned int i;
1856 __u32 *iscpb;
1858 if(smctr_debug > 10)
1859 printk("%s: smctr_init_shared_memory\n", dev->name);
1861 smctr_set_page(dev, (__u8 *)(unsigned int)tp->iscpb_ptr);
1863 /* Initialize Initial System Configuration Point. (ISCP) */
1864 iscpb = (__u32 *)PAGE_POINTER(&tp->iscpb_ptr->trc_scgb_ptr);
1865 *iscpb = (__u32)(SWAP_WORDS(TRC_POINTER(tp->scgb_ptr)));
1867 smctr_set_page(dev, (__u8 *)tp->ram_access);
1869 /* Initialize System Configuration Pointers. (SCP) */
1870 tp->scgb_ptr->config = (SCGB_ADDRESS_POINTER_FORMAT
1871 | SCGB_MULTI_WORD_CONTROL | SCGB_DATA_FORMAT
1872 | SCGB_BURST_LENGTH);
1874 tp->scgb_ptr->trc_sclb_ptr = TRC_POINTER(tp->sclb_ptr);
1875 tp->scgb_ptr->trc_acb_ptr = TRC_POINTER(tp->acb_head);
1876 tp->scgb_ptr->trc_isb_ptr = TRC_POINTER(tp->isb_ptr);
1877 tp->scgb_ptr->isbsiz = (sizeof(ISBlock)) - 2;
1879 /* Initialize System Control Block. (SCB) */
1880 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_NOP;
1881 tp->sclb_ptr->iack_code = 0;
1882 tp->sclb_ptr->resume_control = 0;
1883 tp->sclb_ptr->int_mask_control = 0;
1884 tp->sclb_ptr->int_mask_state = 0;
1886 /* Initialize Interrupt Status Block. (ISB) */
1887 for(i = 0; i < NUM_OF_INTERRUPTS; i++)
1889 tp->isb_ptr->IStatus[i].IType = 0xf0;
1890 tp->isb_ptr->IStatus[i].ISubtype = 0;
1893 tp->current_isb_index = 0;
1895 /* Initialize Action Command Block. (ACB) */
1896 smctr_init_acbs(dev);
1898 /* Initialize transmit FCB's and BDB's. */
1899 smctr_link_tx_fcbs_to_bdbs(dev);
1900 smctr_init_tx_bdbs(dev);
1901 smctr_init_tx_fcbs(dev);
1903 /* Initialize receive FCB's and BDB's. */
1904 smctr_init_rx_bdbs(dev);
1905 smctr_init_rx_fcbs(dev);
1907 return (0);
1910 static int smctr_init_tx_bdbs(struct net_device *dev)
1912 struct net_local *tp = (struct net_local *)dev->priv;
1913 unsigned int i, j;
1914 BDBlock *bdb;
1916 for(i = 0; i < NUM_TX_QS_USED; i++)
1918 bdb = tp->tx_bdb_head[i];
1919 bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
1920 bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
1921 bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1923 for(j = 1; j < tp->num_tx_bdbs[i]; j++)
1925 bdb->next_ptr->back_ptr = bdb;
1926 bdb = bdb->next_ptr;
1927 bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
1928 bdb->next_ptr
1929 = (BDBlock *)(((char *)bdb) + sizeof( BDBlock)); bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
1932 bdb->next_ptr = tp->tx_bdb_head[i];
1933 bdb->trc_next_ptr = TRC_POINTER(tp->tx_bdb_head[i]);
1934 tp->tx_bdb_head[i]->back_ptr = bdb;
1937 return (0);
1940 static int smctr_init_tx_fcbs(struct net_device *dev)
1942 struct net_local *tp = (struct net_local *)dev->priv;
1943 unsigned int i, j;
1944 FCBlock *fcb;
1946 for(i = 0; i < NUM_TX_QS_USED; i++)
1948 fcb = tp->tx_fcb_head[i];
1949 fcb->frame_status = 0;
1950 fcb->frame_length = 0;
1951 fcb->info = FCB_CHAIN_END;
1952 fcb->next_ptr = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
1953 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1955 for(j = 1; j < tp->num_tx_fcbs[i]; j++)
1957 fcb->next_ptr->back_ptr = fcb;
1958 fcb = fcb->next_ptr;
1959 fcb->frame_status = 0;
1960 fcb->frame_length = 0;
1961 fcb->info = FCB_CHAIN_END;
1962 fcb->next_ptr
1963 = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
1964 fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
1967 fcb->next_ptr = tp->tx_fcb_head[i];
1968 fcb->trc_next_ptr = TRC_POINTER(tp->tx_fcb_head[i]);
1970 tp->tx_fcb_head[i]->back_ptr = fcb;
1971 tp->tx_fcb_end[i] = tp->tx_fcb_head[i]->next_ptr;
1972 tp->tx_fcb_curr[i] = tp->tx_fcb_head[i]->next_ptr;
1973 tp->num_tx_fcbs_used[i] = 0;
1976 return (0);
1979 static int smctr_internal_self_test(struct net_device *dev)
1981 struct net_local *tp = (struct net_local *)dev->priv;
1982 int err;
1984 if((err = smctr_issue_test_internal_rom_cmd(dev)))
1985 return (err);
1987 if((err = smctr_wait_cmd(dev)))
1988 return (err);
1990 if(tp->acb_head->cmd_done_status & 0xff)
1991 return (-1);
1993 if((err = smctr_issue_test_hic_cmd(dev)))
1994 return (err);
1996 if((err = smctr_wait_cmd(dev)))
1997 return (err);
1999 if(tp->acb_head->cmd_done_status & 0xff)
2000 return (-1);
2002 if((err = smctr_issue_test_mac_reg_cmd(dev)))
2003 return (err);
2005 if((err = smctr_wait_cmd(dev)))
2006 return (err);
2008 if(tp->acb_head->cmd_done_status & 0xff)
2009 return (-1);
2011 return (0);
2015 * The typical workload of the driver: Handle the network interface interrupts.
2017 static void smctr_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2019 struct net_device *dev = dev_id;
2020 struct net_local *tp;
2021 int ioaddr;
2022 __u16 interrupt_unmask_bits = 0, interrupt_ack_code = 0xff00;
2023 __u16 err1, err = NOT_MY_INTERRUPT;
2024 __u8 isb_type, isb_subtype;
2025 __u16 isb_index;
2027 if(dev == NULL)
2029 printk("%s: irq %d for unknown device.\n", dev->name, irq);
2030 return;
2033 ioaddr = dev->base_addr;
2034 tp = (struct net_local *)dev->priv;
2036 if(tp->status == NOT_INITIALIZED)
2037 return;
2039 smctr_disable_bic_int(dev);
2040 smctr_enable_16bit(dev);
2042 smctr_clear_int(dev);
2044 /* First read the LSB */
2045 while((tp->isb_ptr->IStatus[tp->current_isb_index].IType & 0xf0) == 0)
2047 isb_index = tp->current_isb_index;
2048 isb_type = tp->isb_ptr->IStatus[isb_index].IType;
2049 isb_subtype = tp->isb_ptr->IStatus[isb_index].ISubtype;
2051 (tp->current_isb_index)++;
2052 if(tp->current_isb_index == NUM_OF_INTERRUPTS)
2053 tp->current_isb_index = 0;
2055 if(isb_type >= 0x10)
2057 smctr_disable_16bit(dev);
2058 return;
2061 err = HARDWARE_FAILED;
2062 interrupt_ack_code = isb_index;
2063 tp->isb_ptr->IStatus[isb_index].IType |= 0xf0;
2065 interrupt_unmask_bits |= (1 << (__u16)isb_type);
2067 switch(isb_type)
2069 case ISB_IMC_MAC_TYPE_3:
2070 smctr_disable_16bit(dev);
2072 switch(isb_subtype)
2074 case 0:
2075 tp->monitor_state
2076 = MS_MONITOR_FSM_INACTIVE;
2077 break;
2079 case 1:
2080 tp->monitor_state
2081 = MS_REPEAT_BEACON_STATE;
2082 break;
2084 case 2:
2085 tp->monitor_state
2086 = MS_REPEAT_CLAIM_TOKEN_STATE;
2087 break;
2089 case 3:
2090 tp->monitor_state
2091 = MS_TRANSMIT_CLAIM_TOKEN_STATE; break;
2093 case 4:
2094 tp->monitor_state
2095 = MS_STANDBY_MONITOR_STATE;
2096 break;
2098 case 5:
2099 tp->monitor_state
2100 = MS_TRANSMIT_BEACON_STATE;
2101 break;
2103 case 6:
2104 tp->monitor_state
2105 = MS_ACTIVE_MONITOR_STATE;
2106 break;
2108 case 7:
2109 tp->monitor_state
2110 = MS_TRANSMIT_RING_PURGE_STATE;
2111 break;
2113 case 8: /* diagnostic state */
2114 break;
2116 case 9:
2117 tp->monitor_state
2118 = MS_BEACON_TEST_STATE;
2119 if(smctr_lobe_media_test(dev))
2121 tp->ring_status_flags
2122 = RING_STATUS_CHANGED;
2123 tp->ring_status
2124 = AUTO_REMOVAL_ERROR;
2125 smctr_ring_status_chg(dev);
2126 smctr_bypass_state(dev);
2128 else
2129 smctr_issue_insert_cmd(dev);
2130 break;
2132 /* case 0x0a-0xff, illegal states */
2133 default:
2134 break;
2137 tp->ring_status_flags = MONITOR_STATE_CHANGED;
2138 err = smctr_ring_status_chg(dev);
2140 smctr_enable_16bit(dev);
2141 break;
2143 /* Type 0x02 - MAC Error Counters Interrupt
2144 * One or more MAC Error Counter is half full
2145 * MAC Error Counters
2146 * Lost_FR_Error_Counter
2147 * RCV_Congestion_Counter
2148 * FR_copied_Error_Counter
2149 * FREQ_Error_Counter
2150 * Token_Error_Counter
2151 * Line_Error_Counter
2152 * Internal_Error_Count
2154 case ISB_IMC_MAC_ERROR_COUNTERS:
2155 /* Read 802.5 Error Counters */
2156 err = smctr_issue_read_ring_status_cmd(dev);
2157 break;
2159 /* Type 0x04 - MAC Type 2 Interrupt
2160 * HOST needs to enqueue MAC Frame for transmission
2161 * SubType Bit 15 - RQ_INIT_PDU( Request Initialization) * Changed from RQ_INIT_PDU to
2162 * TRC_Status_Changed_Indicate
2164 case ISB_IMC_MAC_TYPE_2:
2165 err = smctr_issue_read_ring_status_cmd(dev);
2166 break;
2169 /* Type 0x05 - TX Frame Interrupt (FI). */
2170 case ISB_IMC_TX_FRAME:
2171 /* BUG QUEUE for TRC stuck receive BUG */
2172 if(isb_subtype & TX_PENDING_PRIORITY_2)
2174 if((err = smctr_tx_complete(dev,
2175 BUG_QUEUE)) != SUCCESS)
2176 break;
2179 /* NON-MAC frames only */
2180 if(isb_subtype & TX_PENDING_PRIORITY_1)
2182 if((err = smctr_tx_complete(dev,
2183 NON_MAC_QUEUE)) != SUCCESS)
2184 break;
2187 /* MAC frames only */
2188 if(isb_subtype & TX_PENDING_PRIORITY_0)
2189 err = smctr_tx_complete(dev, MAC_QUEUE); break;
2191 /* Type 0x06 - TX END OF QUEUE (FE) */
2192 case ISB_IMC_END_OF_TX_QUEUE:
2193 /* BUG queue */
2194 if(isb_subtype & TX_PENDING_PRIORITY_2)
2196 /* ok to clear Receive FIFO overrun
2197 * imask send_BUG now completes.
2199 interrupt_unmask_bits |= 0x800;
2201 tp->tx_queue_status[BUG_QUEUE]
2202 = NOT_TRANSMITING;
2203 if((err = smctr_tx_complete(dev,
2204 BUG_QUEUE)) != SUCCESS)
2205 break;
2206 if((err = smctr_restart_tx_chain(dev,
2207 BUG_QUEUE)) != SUCCESS)
2208 break;
2211 /* NON-MAC queue only */
2212 if(isb_subtype & TX_PENDING_PRIORITY_1)
2214 tp->tx_queue_status[NON_MAC_QUEUE]
2215 = NOT_TRANSMITING;
2216 if((err = smctr_tx_complete(dev,
2217 NON_MAC_QUEUE)) != SUCCESS)
2218 break;
2219 if((err = smctr_restart_tx_chain(dev,
2220 NON_MAC_QUEUE)) != SUCCESS)
2221 break;
2224 /* MAC queue only */
2225 if(isb_subtype & TX_PENDING_PRIORITY_0)
2227 tp->tx_queue_status[MAC_QUEUE]
2228 = NOT_TRANSMITING;
2229 if((err = smctr_tx_complete(dev,
2230 MAC_QUEUE)) != SUCCESS)
2231 break;
2233 err = smctr_restart_tx_chain(dev,
2234 MAC_QUEUE);
2236 break;
2238 /* Type 0x07 - NON-MAC RX Resource Interrupt
2239 * Subtype bit 12 - (BW) BDB warning
2240 * Subtype bit 13 - (FW) FCB warning
2241 * Subtype bit 14 - (BE) BDB End of chain
2242 * Subtype bit 15 - (FE) FCB End of chain
2244 case ISB_IMC_NON_MAC_RX_RESOURCE:
2245 tp->rx_fifo_overrun_count = 0;
2246 tp->receive_queue_number = NON_MAC_QUEUE;
2247 err1 = smctr_rx_frame(dev);
2249 if(isb_subtype & NON_MAC_RX_RESOURCE_FE)
2251 if((err = smctr_issue_resume_rx_fcb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break;
2253 if(tp->ptr_rx_fcb_overruns)
2254 (*tp->ptr_rx_fcb_overruns)++;
2257 if(isb_subtype & NON_MAC_RX_RESOURCE_BE)
2259 if((err = smctr_issue_resume_rx_bdb_cmd( dev, NON_MAC_QUEUE)) != SUCCESS) break;
2261 if(tp->ptr_rx_bdb_overruns)
2262 (*tp->ptr_rx_bdb_overruns)++;
2264 err = err1;
2265 break;
2267 /* Type 0x08 - MAC RX Resource Interrupt
2268 * Subtype bit 12 - (BW) BDB warning
2269 * Subtype bit 13 - (FW) FCB warning
2270 * Subtype bit 14 - (BE) BDB End of chain
2271 * Subtype bit 15 - (FE) FCB End of chain
2273 case ISB_IMC_MAC_RX_RESOURCE:
2274 tp->receive_queue_number = MAC_QUEUE;
2275 err1 = smctr_rx_frame(dev);
2277 if(isb_subtype & MAC_RX_RESOURCE_FE)
2279 if((err = smctr_issue_resume_rx_fcb_cmd( dev, MAC_QUEUE)) != SUCCESS)
2280 break;
2282 if(tp->ptr_rx_fcb_overruns)
2283 (*tp->ptr_rx_fcb_overruns)++;
2286 if(isb_subtype & MAC_RX_RESOURCE_BE)
2288 if((err = smctr_issue_resume_rx_bdb_cmd( dev, MAC_QUEUE)) != SUCCESS)
2289 break;
2291 if(tp->ptr_rx_bdb_overruns)
2292 (*tp->ptr_rx_bdb_overruns)++;
2294 err = err1;
2295 break;
2297 /* Type 0x09 - NON_MAC RX Frame Interrupt */
2298 case ISB_IMC_NON_MAC_RX_FRAME:
2299 tp->rx_fifo_overrun_count = 0;
2300 tp->receive_queue_number = NON_MAC_QUEUE;
2301 err = smctr_rx_frame(dev);
2302 break;
2304 /* Type 0x0A - MAC RX Frame Interrupt */
2305 case ISB_IMC_MAC_RX_FRAME:
2306 tp->receive_queue_number = MAC_QUEUE;
2307 err = smctr_rx_frame(dev);
2308 break;
2310 /* Type 0x0B - TRC status
2311 * TRC has encountered an error condition
2312 * subtype bit 14 - transmit FIFO underrun
2313 * subtype bit 15 - receive FIFO overrun
2315 case ISB_IMC_TRC_FIFO_STATUS:
2316 if(isb_subtype & TRC_FIFO_STATUS_TX_UNDERRUN)
2318 if(tp->ptr_tx_fifo_underruns)
2319 (*tp->ptr_tx_fifo_underruns)++;
2322 if(isb_subtype & TRC_FIFO_STATUS_RX_OVERRUN)
2324 /* update overrun stuck receive counter
2325 * if >= 3, has to clear it by sending
2326 * back to back frames. We pick
2327 * DAT(duplicate address MAC frame)
2329 tp->rx_fifo_overrun_count++;
2331 if(tp->rx_fifo_overrun_count >= 3)
2333 tp->rx_fifo_overrun_count = 0;
2335 /* delay clearing fifo overrun
2336 * imask till send_BUG tx
2337 * complete posted
2339 interrupt_unmask_bits &= (~0x800);
2340 printk("Jay please send bug\n");// smctr_send_bug(dev);
2343 if(tp->ptr_rx_fifo_overruns)
2344 (*tp->ptr_rx_fifo_overruns)++;
2347 err = SUCCESS;
2348 break;
2350 /* Type 0x0C - Action Command Status Interrupt
2351 * Subtype bit 14 - CB end of command chain (CE)
2352 * Subtype bit 15 - CB command interrupt (CI)
2354 case ISB_IMC_COMMAND_STATUS:
2355 err = SUCCESS;
2356 if(tp->acb_head->cmd == ACB_CMD_HIC_NOP)
2358 printk("i1\n");
2359 smctr_disable_16bit(dev);
2361 /* XXXXXXXXXXXXXXXXX */
2362 /* err = UM_Interrupt(dev); */
2364 smctr_enable_16bit(dev);
2366 else
2368 if((tp->acb_head->cmd
2369 == ACB_CMD_READ_TRC_STATUS)
2370 && (tp->acb_head->subcmd
2371 == RW_TRC_STATUS_BLOCK))
2373 if(tp->ptr_bcn_type != 0)
2375 *(tp->ptr_bcn_type)
2376 = (__u32)((SBlock *)tp->misc_command_data)->BCN_Type;
2379 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & ERROR_COUNTERS_CHANGED)
2381 smctr_update_err_stats(dev);
2384 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & TI_NDIS_RING_STATUS_CHANGED)
2386 tp->ring_status
2387 = ((SBlock*)tp->misc_command_data)->TI_NDIS_Ring_Status;
2388 smctr_disable_16bit(dev);
2389 err = smctr_ring_status_chg(dev);
2390 smctr_enable_16bit(dev);
2391 if((tp->ring_status & REMOVE_RECEIVED)
2392 && (tp->config_word0 & NO_AUTOREMOVE))
2394 smctr_issue_remove_cmd(dev);
2397 if(err != SUCCESS)
2399 tp->acb_pending
2400 = 0;
2401 break;
2405 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & UNA_CHANGED)
2407 if(tp->ptr_una)
2409 tp->ptr_una[0]
2410 = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[0]);
2411 tp->ptr_una[1]
2412 = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[1]);
2413 tp->ptr_una[2]
2414 = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[2]);
2419 if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate
2420 & READY_TO_SEND_RQ_INIT) {
2421 err = smctr_send_rq_init(dev);
2426 tp->acb_pending = 0;
2427 break;
2429 /* Type 0x0D - MAC Type 1 interrupt
2430 * Subtype -- 00 FR_BCN received at S12
2431 * 01 FR_BCN received at S21
2432 * 02 FR_DAT(DA=MA, A<>0) received at S21
2433 * 03 TSM_EXP at S21
2434 * 04 FR_REMOVE received at S42
2435 * 05 TBR_EXP, BR_FLAG_SET at S42
2436 * 06 TBT_EXP at S53
2438 case ISB_IMC_MAC_TYPE_1:
2439 if(isb_subtype > 8)
2441 err = HARDWARE_FAILED;
2442 break;
2445 err = SUCCESS;
2446 switch(isb_subtype)
2448 case 0:
2449 tp->join_state = JS_BYPASS_STATE;
2450 if(tp->status != CLOSED)
2452 tp->status = CLOSED;
2453 err = smctr_status_chg(dev);
2455 break;
2457 case 1:
2458 tp->join_state
2459 = JS_LOBE_TEST_STATE;
2460 break;
2462 case 2:
2463 tp->join_state
2464 = JS_DETECT_MONITOR_PRESENT_STATE;
2465 break;
2467 case 3:
2468 tp->join_state
2469 = JS_AWAIT_NEW_MONITOR_STATE;
2470 break;
2472 case 4:
2473 tp->join_state
2474 = JS_DUPLICATE_ADDRESS_TEST_STATE;
2475 break;
2477 case 5:
2478 tp->join_state
2479 = JS_NEIGHBOR_NOTIFICATION_STATE;
2480 break;
2482 case 6:
2483 tp->join_state
2484 = JS_REQUEST_INITIALIZATION_STATE;
2485 break;
2487 case 7:
2488 tp->join_state
2489 = JS_JOIN_COMPLETE_STATE;
2490 tp->status = OPEN;
2491 err = smctr_status_chg(dev);
2492 break;
2494 case 8:
2495 tp->join_state
2496 = JS_BYPASS_WAIT_STATE;
2497 break;
2499 break ;
2501 /* Type 0x0E - TRC Initialization Sequence Interrupt
2502 * Subtype -- 00-FF Initializatin sequence complete
2504 case ISB_IMC_TRC_INTRNL_TST_STATUS:
2505 tp->status = INITIALIZED;
2506 smctr_disable_16bit(dev);
2507 err = smctr_status_chg(dev);
2508 smctr_enable_16bit(dev);
2509 break;
2511 /* other interrupt types, illegal */
2512 default:
2513 break;
2516 if(err != SUCCESS)
2517 break;
2520 /* Checking the ack code instead of the unmask bits here is because :
2521 * while fixing the stuck receive, DAT frame are sent and mask off
2522 * FIFO overrun interrupt temporarily (interrupt_unmask_bits = 0)
2523 * but we still want to issue ack to ISB
2525 if(!(interrupt_ack_code & 0xff00))
2526 smctr_issue_int_ack(dev, interrupt_ack_code,
2527 interrupt_unmask_bits);
2529 smctr_disable_16bit(dev);
2530 smctr_enable_bic_int(dev);
2532 return;
2535 static int smctr_issue_enable_int_cmd(struct net_device *dev,
2536 __u16 interrupt_enable_mask)
2538 struct net_local *tp = (struct net_local *)dev->priv;
2539 int err;
2541 if((err = smctr_wait_while_cbusy(dev)))
2542 return (err);
2544 tp->sclb_ptr->int_mask_control = interrupt_enable_mask;
2545 tp->sclb_ptr->valid_command = SCLB_VALID
2546 | SCLB_CMD_CLEAR_INTERRUPT_MASK;
2548 smctr_set_ctrl_attention(dev);
2550 return (0);
2553 static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code,
2554 __u16 ibits)
2556 struct net_local *tp = (struct net_local *)dev->priv;
2558 if(smctr_wait_while_cbusy(dev))
2559 return (-1);
2561 tp->sclb_ptr->int_mask_control = ibits;
2562 tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0;
2563 tp->sclb_ptr->valid_command =
2564 SCLB_VALID | SCLB_IACK_CODE_VALID
2565 | SCLB_CMD_CLEAR_INTERRUPT_MASK;
2567 smctr_set_ctrl_attention(dev);
2569 return (0);
2572 static int smctr_issue_init_timers_cmd(struct net_device *dev)
2574 struct net_local *tp = (struct net_local *)dev->priv;
2575 unsigned int i;
2576 int err;
2577 __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data;
2579 if((err = smctr_wait_while_cbusy(dev)))
2580 return (err);
2582 if((err = smctr_wait_cmd(dev)))
2583 return (err);
2585 tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE;
2586 tp->config_word1 = 0;
2588 if((tp->media_type == MEDIA_STP_16)
2589 || (tp->media_type == MEDIA_UTP_16)
2590 || (tp->media_type == MEDIA_STP_16_UTP_16))
2592 tp->config_word0 |= FREQ_16MB_BIT;
2595 if(tp->mode_bits & EARLY_TOKEN_REL)
2596 tp->config_word0 |= ETREN;
2598 if(tp->mode_bits & LOOPING_MODE_MASK)
2599 tp->config_word0 |= RX_OWN_BIT;
2600 else
2601 tp->config_word0 &= ~RX_OWN_BIT;
2603 if(tp->receive_mask & PROMISCUOUS_MODE)
2604 tp->config_word0 |= PROMISCUOUS_BIT;
2605 else
2606 tp->config_word0 &= ~PROMISCUOUS_BIT;
2608 if(tp->receive_mask & ACCEPT_ERR_PACKETS)
2609 tp->config_word0 |= SAVBAD_BIT;
2610 else
2611 tp->config_word0 &= ~SAVBAD_BIT;
2613 if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
2614 tp->config_word0 |= RXATMAC;
2615 else
2616 tp->config_word0 &= ~RXATMAC;
2618 if(tp->receive_mask & ACCEPT_MULTI_PROM)
2619 tp->config_word1 |= MULTICAST_ADDRESS_BIT;
2620 else
2621 tp->config_word1 &= ~MULTICAST_ADDRESS_BIT;
2623 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING)
2624 tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS;
2625 else
2627 if(tp->receive_mask & ACCEPT_SOURCE_ROUTING)
2628 tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT;
2629 else
2630 tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
2633 if((tp->media_type == MEDIA_STP_16)
2634 || (tp->media_type == MEDIA_UTP_16)
2635 || (tp->media_type == MEDIA_STP_16_UTP_16))
2637 tp->config_word1 |= INTERFRAME_SPACING_16;
2639 else
2640 tp->config_word1 |= INTERFRAME_SPACING_4;
2642 *pTimer_Struc++ = tp->config_word0;
2643 *pTimer_Struc++ = tp->config_word1;
2645 if((tp->media_type == MEDIA_STP_4)
2646 || (tp->media_type == MEDIA_UTP_4)
2647 || (tp->media_type == MEDIA_STP_4_UTP_4))
2649 *pTimer_Struc++ = 0x00FA; /* prescale */
2650 *pTimer_Struc++ = 0x2710; /* TPT_limit */
2651 *pTimer_Struc++ = 0x2710; /* TQP_limit */
2652 *pTimer_Struc++ = 0x0A28; /* TNT_limit */
2653 *pTimer_Struc++ = 0x3E80; /* TBT_limit */
2654 *pTimer_Struc++ = 0x3A98; /* TSM_limit */
2655 *pTimer_Struc++ = 0x1B58; /* TAM_limit */
2656 *pTimer_Struc++ = 0x00C8; /* TBR_limit */
2657 *pTimer_Struc++ = 0x07D0; /* TER_limit */
2658 *pTimer_Struc++ = 0x000A; /* TGT_limit */
2659 *pTimer_Struc++ = 0x1162; /* THT_limit */
2660 *pTimer_Struc++ = 0x07D0; /* TRR_limit */
2661 *pTimer_Struc++ = 0x1388; /* TVX_limit */
2662 *pTimer_Struc++ = 0x0000; /* reserved */
2664 else
2666 *pTimer_Struc++ = 0x03E8; /* prescale */
2667 *pTimer_Struc++ = 0x9C40; /* TPT_limit */
2668 *pTimer_Struc++ = 0x9C40; /* TQP_limit */
2669 *pTimer_Struc++ = 0x0A28; /* TNT_limit */
2670 *pTimer_Struc++ = 0x3E80; /* TBT_limit */
2671 *pTimer_Struc++ = 0x3A98; /* TSM_limit */
2672 *pTimer_Struc++ = 0x1B58; /* TAM_limit */
2673 *pTimer_Struc++ = 0x00C8; /* TBR_limit */
2674 *pTimer_Struc++ = 0x07D0; /* TER_limit */
2675 *pTimer_Struc++ = 0x000A; /* TGT_limit */
2676 *pTimer_Struc++ = 0x4588; /* THT_limit */
2677 *pTimer_Struc++ = 0x1F40; /* TRR_limit */
2678 *pTimer_Struc++ = 0x4E20; /* TVX_limit */
2679 *pTimer_Struc++ = 0x0000; /* reserved */
2682 /* Set node address. */
2683 *pTimer_Struc++ = dev->dev_addr[0] << 8
2684 | (dev->dev_addr[1] & 0xFF);
2685 *pTimer_Struc++ = dev->dev_addr[2] << 8
2686 | (dev->dev_addr[3] & 0xFF);
2687 *pTimer_Struc++ = dev->dev_addr[4] << 8
2688 | (dev->dev_addr[5] & 0xFF);
2690 /* Set group address. */
2691 *pTimer_Struc++ = tp->group_address_0 << 8
2692 | tp->group_address_0 >> 8;
2693 *pTimer_Struc++ = tp->group_address[0] << 8
2694 | tp->group_address[0] >> 8;
2695 *pTimer_Struc++ = tp->group_address[1] << 8
2696 | tp->group_address[1] >> 8;
2698 /* Set functional address. */
2699 *pTimer_Struc++ = tp->functional_address_0 << 8
2700 | tp->functional_address_0 >> 8;
2701 *pTimer_Struc++ = tp->functional_address[0] << 8
2702 | tp->functional_address[0] >> 8;
2703 *pTimer_Struc++ = tp->functional_address[1] << 8
2704 | tp->functional_address[1] >> 8;
2706 /* Set Bit-Wise group address. */
2707 *pTimer_Struc++ = tp->bitwise_group_address[0] << 8
2708 | tp->bitwise_group_address[0] >> 8;
2709 *pTimer_Struc++ = tp->bitwise_group_address[1] << 8
2710 | tp->bitwise_group_address[1] >> 8;
2712 /* Set ring number address. */
2713 *pTimer_Struc++ = tp->source_ring_number;
2714 *pTimer_Struc++ = tp->target_ring_number;
2716 /* Physical drop number. */
2717 *pTimer_Struc++ = (unsigned short)0;
2718 *pTimer_Struc++ = (unsigned short)0;
2720 /* Product instance ID. */
2721 for(i = 0; i < 9; i++)
2722 *pTimer_Struc++ = (unsigned short)0;
2724 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0);
2726 return (err);
2729 static int smctr_issue_init_txrx_cmd(struct net_device *dev)
2731 struct net_local *tp = (struct net_local *)dev->priv;
2732 unsigned int i;
2733 int err;
2734 void **txrx_ptrs = (void *)tp->misc_command_data;
2736 if((err = smctr_wait_while_cbusy(dev)))
2737 return (err);
2739 if((err = smctr_wait_cmd(dev)))
2740 return (err);
2742 /* Initialize Transmit Queue Pointers that are used, to point to
2743 * a single FCB.
2745 for(i = 0; i < NUM_TX_QS_USED; i++)
2746 *txrx_ptrs++ = (void *)TRC_POINTER(tp->tx_fcb_head[i]);
2748 /* Initialize Transmit Queue Pointers that are NOT used to ZERO. */
2749 for(; i < MAX_TX_QS; i++)
2750 *txrx_ptrs++ = (void *)0;
2752 /* Initialize Receive Queue Pointers (MAC and Non-MAC) that are
2753 * used, to point to a single FCB and a BDB chain of buffers.
2755 for(i = 0; i < NUM_RX_QS_USED; i++)
2757 *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_fcb_head[i]);
2758 *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_bdb_head[i]);
2761 /* Initialize Receive Queue Pointers that are NOT used to ZERO. */
2762 for(; i < MAX_RX_QS; i++)
2764 *txrx_ptrs++ = (void *)0;
2765 *txrx_ptrs++ = (void *)0;
2768 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0);
2770 return (err);
2773 static int smctr_issue_insert_cmd(struct net_device *dev)
2775 int err;
2777 err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP);
2779 return (err);
2782 static int smctr_issue_read_ring_status_cmd(struct net_device *dev)
2784 int err;
2786 if((err = smctr_wait_while_cbusy(dev)))
2787 return (err);
2789 if((err = smctr_wait_cmd(dev)))
2790 return (err);
2792 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS,
2793 RW_TRC_STATUS_BLOCK);
2795 return (err);
2798 static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt)
2800 int err;
2802 if((err = smctr_wait_while_cbusy(dev)))
2803 return (err);
2805 if((err = smctr_wait_cmd(dev)))
2806 return (err);
2808 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE,
2809 aword_cnt);
2811 return (err);
2814 static int smctr_issue_remove_cmd(struct net_device *dev)
2816 struct net_local *tp = (struct net_local *)dev->priv;
2817 int err;
2819 if((err = smctr_wait_while_cbusy(dev)))
2820 return (err);
2822 tp->sclb_ptr->resume_control = 0;
2823 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE;
2825 smctr_set_ctrl_attention(dev);
2827 return (0);
2830 static int smctr_issue_resume_acb_cmd(struct net_device *dev)
2832 struct net_local *tp = (struct net_local *)dev->priv;
2833 int err;
2835 if((err = smctr_wait_while_cbusy(dev)))
2836 return (err);
2838 tp->sclb_ptr->resume_control = SCLB_RC_ACB;
2839 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
2841 tp->acb_pending = 1;
2843 smctr_set_ctrl_attention(dev);
2845 return (0);
2848 static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
2850 struct net_local *tp = (struct net_local *)dev->priv;
2851 int err;
2853 if((err = smctr_wait_while_cbusy(dev)))
2854 return (err);
2856 if(queue == MAC_QUEUE)
2857 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB;
2858 else
2859 tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_BDB;
2861 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
2863 smctr_set_ctrl_attention(dev);
2865 return (0);
2868 static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
2870 struct net_local *tp = (struct net_local *)dev->priv;
2872 if(smctr_debug > 10)
2873 printk("%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name);
2875 if(smctr_wait_while_cbusy(dev))
2876 return (-1);
2878 if(queue == MAC_QUEUE)
2879 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB;
2880 else
2881 tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_FCB;
2883 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
2885 smctr_set_ctrl_attention(dev);
2887 return (0);
2890 static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue)
2892 struct net_local *tp = (struct net_local *)dev->priv;
2894 if(smctr_debug > 10)
2895 printk("%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name);
2897 if(smctr_wait_while_cbusy(dev))
2898 return (-1);
2900 tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue);
2901 tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID;
2903 smctr_set_ctrl_attention(dev);
2905 return (0);
2908 static int smctr_issue_test_internal_rom_cmd(struct net_device *dev)
2910 int err;
2912 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2913 TRC_INTERNAL_ROM_TEST);
2915 return (err);
2918 static int smctr_issue_test_hic_cmd(struct net_device *dev)
2920 int err;
2922 err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST,
2923 TRC_HOST_INTERFACE_REG_TEST);
2925 return (err);
2928 static int smctr_issue_test_mac_reg_cmd(struct net_device *dev)
2930 int err;
2932 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2933 TRC_MAC_REGISTERS_TEST);
2935 return (err);
2938 static int smctr_issue_trc_loopback_cmd(struct net_device *dev)
2940 int err;
2942 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2943 TRC_INTERNAL_LOOPBACK);
2945 return (err);
2948 static int smctr_issue_tri_loopback_cmd(struct net_device *dev)
2950 int err;
2952 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2953 TRC_TRI_LOOPBACK);
2955 return (err);
2958 static int smctr_issue_write_byte_cmd(struct net_device *dev,
2959 short aword_cnt, void *byte)
2961 struct net_local *tp = (struct net_local *)dev->priv;
2962 unsigned int iword, ibyte;
2963 int err;
2965 if((err = smctr_wait_while_cbusy(dev)))
2966 return (err);
2968 if((err = smctr_wait_cmd(dev)))
2969 return (err);
2971 for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff);
2972 iword++, ibyte += 2)
2974 tp->misc_command_data[iword] = (*((__u8 *)byte + ibyte) << 8)
2975 | (*((__u8 *)byte + ibyte + 1));
2978 return (smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
2979 aword_cnt));
2982 static int smctr_issue_write_word_cmd(struct net_device *dev,
2983 short aword_cnt, void *word)
2985 struct net_local *tp = (struct net_local *)dev->priv;
2986 unsigned int i, err;
2988 if((err = smctr_wait_while_cbusy(dev)))
2989 return (err);
2991 if((err = smctr_wait_cmd(dev)))
2992 return (err);
2994 for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++)
2995 tp->misc_command_data[i] = *((__u16 *)word + i);
2997 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
2998 aword_cnt);
3000 return (err);
3003 static int smctr_join_complete_state(struct net_device *dev)
3005 int err;
3007 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
3008 JS_JOIN_COMPLETE_STATE);
3010 return (err);
3013 static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev)
3015 struct net_local *tp = (struct net_local *)dev->priv;
3016 unsigned int i, j;
3017 FCBlock *fcb;
3018 BDBlock *bdb;
3020 for(i = 0; i < NUM_TX_QS_USED; i++)
3022 fcb = tp->tx_fcb_head[i];
3023 bdb = tp->tx_bdb_head[i];
3025 for(j = 0; j < tp->num_tx_fcbs[i]; j++)
3027 fcb->bdb_ptr = bdb;
3028 fcb->trc_bdb_ptr = TRC_POINTER(bdb);
3029 fcb = (FCBlock *)((char *)fcb + sizeof(FCBlock));
3030 bdb = (BDBlock *)((char *)bdb + sizeof(BDBlock));
3034 return (0);
3037 static int smctr_load_firmware(struct net_device *dev)
3039 struct net_local *tp = (struct net_local *)dev->priv;
3040 __u16 i, checksum = 0;
3041 int err = 0;
3043 if(smctr_debug > 10)
3044 printk("%s: smctr_load_firmware\n", dev->name);
3046 tp->ptr_ucode = smctr_code;
3047 tp->num_of_tx_buffs = 4;
3048 tp->mode_bits |= UMAC;
3049 tp->receive_mask = 0;
3050 tp->max_packet_size = 4177;
3052 /* Can only upload the firmware once per adapter reset. */
3053 if(tp->microcode_version != 0)
3054 return (UCODE_PRESENT);
3056 /* Verify the firmware exists and is there in the right amount. */
3057 if((tp->ptr_ucode == 0L)
3058 || (*(tp->ptr_ucode + UCODE_VERSION_OFFSET) < UCODE_VERSION))
3060 return (UCODE_NOT_PRESENT);
3063 /* UCODE_SIZE is not included in Checksum. */
3064 for(i = 0; i < *((__u16 *)(tp->ptr_ucode + UCODE_SIZE_OFFSET)); i += 2)
3065 checksum += *((__u16 *)(tp->ptr_ucode + 2 + i));
3066 if(checksum)
3067 return (UCODE_NOT_PRESENT);
3069 /* At this point we have a valid firmware image, lets kick it on up. */
3070 smctr_enable_adapter_ram(dev);
3071 smctr_enable_16bit(dev);
3072 smctr_set_page(dev, (__u8 *)tp->ram_access);
3074 if((smctr_checksum_firmware(dev))
3075 || (*(tp->ptr_ucode + UCODE_VERSION_OFFSET)
3076 > tp->microcode_version))
3078 smctr_enable_adapter_ctrl_store(dev);
3080 /* Zero out ram space for firmware. */
3081 for(i = 0; i < CS_RAM_SIZE; i += 2)
3082 *((__u16 *)(tp->ram_access + i)) = 0;
3084 smctr_decode_firmware(dev);
3086 tp->microcode_version = *(tp->ptr_ucode + UCODE_VERSION_OFFSET); *((__u16 *)(tp->ram_access + CS_RAM_VERSION_OFFSET))
3087 = (tp->microcode_version << 8);
3088 *((__u16 *)(tp->ram_access + CS_RAM_CHECKSUM_OFFSET))
3089 = ~(tp->microcode_version << 8) + 1;
3091 smctr_disable_adapter_ctrl_store(dev);
3093 if(smctr_checksum_firmware(dev))
3094 err = HARDWARE_FAILED;
3096 else
3097 err = UCODE_PRESENT;
3099 smctr_disable_16bit(dev);
3101 return (err);
3104 static int smctr_load_node_addr(struct net_device *dev)
3106 int ioaddr = dev->base_addr;
3107 unsigned int i;
3108 __u8 r;
3110 /* Check if node address has been specified by user. (non-0) */
3111 for(i = 0; ((i < 6) && (dev->dev_addr[i] == 0)); i++);
3113 if(i != 6)
3115 for(i = 0; i < 6; i++)
3117 r = inb(ioaddr + LAR0 + i);
3118 dev->dev_addr[i] = (char)r;
3120 dev->addr_len = 6;
3122 else /* Node addr. not given by user, read it from board. */
3124 for(i = 0; i < 6; i++)
3126 r = inb(ioaddr + LAR0 + i);
3127 dev->dev_addr[i] = (char)r;
3129 dev->addr_len = 6;
3133 return (0);
3136 /* Lobe Media Test.
3137 * During the transmission of the initial 1500 lobe media MAC frames,
3138 * the phase lock loop in the 805 chip may lock, and then un-lock, causing
3139 * the 825 to go into a PURGE state. When performing a PURGE, the MCT
3140 * microcode will not transmit any frames given to it by the host, and
3141 * will consequently cause a timeout.
3143 * NOTE 1: If the monitor_state is MS_BEACON_TEST_STATE, all transmit
3144 * queues other then the one used for the lobe_media_test should be
3145 * disabled.!?
3147 * NOTE 2: If the monitor_state is MS_BEACON_TEST_STATE and the receive_mask
3148 * has any multi-cast or promiscous bits set, the receive_mask needs to
3149 * be changed to clear the multi-cast or promiscous mode bits, the lobe_test
3150 * run, and then the receive mask set back to its original value if the test
3151 * is successful.
3153 static int smctr_lobe_media_test(struct net_device *dev)
3155 struct net_local *tp = (struct net_local *)dev->priv;
3156 unsigned int i, perror = 0;
3157 unsigned short saved_rcv_mask;
3159 if(smctr_debug > 10)
3160 printk("%s: smctr_lobe_media_test\n", dev->name);
3162 /* Clear receive mask for lobe test. */
3163 saved_rcv_mask = tp->receive_mask;
3164 tp->receive_mask = 0;
3166 smctr_chg_rx_mask(dev);
3168 /* Setup the lobe media test. */
3169 smctr_lobe_media_test_cmd(dev);
3170 if(smctr_wait_cmd(dev))
3172 smctr_reset_adapter(dev);
3173 tp->status = CLOSED;
3174 return (LOBE_MEDIA_TEST_FAILED);
3177 /* Tx lobe media test frames. */
3178 for(i = 0; i < 1500; ++i)
3180 if(smctr_send_lobe_media_test(dev))
3182 if(perror)
3184 smctr_reset_adapter(dev);
3185 tp->state = CLOSED;
3186 return (LOBE_MEDIA_TEST_FAILED);
3188 else
3190 perror = 1;
3191 if(smctr_lobe_media_test_cmd(dev))
3193 smctr_reset_adapter(dev);
3194 tp->state = CLOSED;
3195 return (LOBE_MEDIA_TEST_FAILED);
3201 if(smctr_send_dat(dev))
3203 if(smctr_send_dat(dev))
3205 smctr_reset_adapter(dev);
3206 tp->state = CLOSED;
3207 return (LOBE_MEDIA_TEST_FAILED);
3211 /* Check if any frames received during test. */
3212 if((tp->rx_fcb_curr[MAC_QUEUE]->frame_status)
3213 || (tp->rx_fcb_curr[NON_MAC_QUEUE]->frame_status))
3215 smctr_reset_adapter(dev);
3216 tp->state = CLOSED;
3217 return (LOBE_MEDIA_TEST_FAILED);
3220 /* Set receive mask to "Promisc" mode. */
3221 tp->receive_mask = saved_rcv_mask;
3223 smctr_chg_rx_mask(dev);
3225 return (0);
3228 static int smctr_lobe_media_test_cmd(struct net_device *dev)
3230 struct net_local *tp = (struct net_local *)dev->priv;
3231 int err;
3233 if(smctr_debug > 10)
3234 printk("%s: smctr_lobe_media_test_cmd\n", dev->name);
3236 /* Change to lobe media test state. */
3237 if(tp->monitor_state != MS_BEACON_TEST_STATE)
3239 smctr_lobe_media_test_state(dev);
3240 if(smctr_wait_cmd(dev))
3242 printk("Lobe Failed test state\n");
3243 return (LOBE_MEDIA_TEST_FAILED);
3247 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
3248 TRC_LOBE_MEDIA_TEST);
3250 return (err);
3253 static int smctr_lobe_media_test_state(struct net_device *dev)
3255 int err;
3257 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
3258 JS_LOBE_TEST_STATE);
3260 return (err);
3263 static int smctr_make_8025_hdr(struct net_device *dev,
3264 MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc)
3266 tmf->ac = MSB(ac_fc); /* msb is access control */
3267 tmf->fc = LSB(ac_fc); /* lsb is frame control */
3269 tmf->sa[0] = dev->dev_addr[0];
3270 tmf->sa[1] = dev->dev_addr[1];
3271 tmf->sa[2] = dev->dev_addr[2];
3272 tmf->sa[3] = dev->dev_addr[3];
3273 tmf->sa[4] = dev->dev_addr[4];
3274 tmf->sa[5] = dev->dev_addr[5];
3276 switch(tmf->vc)
3278 /* Send RQ_INIT to RPS */
3279 case RQ_INIT:
3280 tmf->da[0] = 0xc0;
3281 tmf->da[1] = 0x00;
3282 tmf->da[2] = 0x00;
3283 tmf->da[3] = 0x00;
3284 tmf->da[4] = 0x00;
3285 tmf->da[5] = 0x02;
3286 break;
3288 /* Send RPT_TX_FORWARD to CRS */
3289 case RPT_TX_FORWARD:
3290 tmf->da[0] = 0xc0;
3291 tmf->da[1] = 0x00;
3292 tmf->da[2] = 0x00;
3293 tmf->da[3] = 0x00;
3294 tmf->da[4] = 0x00;
3295 tmf->da[5] = 0x10;
3296 break;
3298 /* Everything else goes to sender */
3299 default:
3300 tmf->da[0] = rmf->sa[0];
3301 tmf->da[1] = rmf->sa[1];
3302 tmf->da[2] = rmf->sa[2];
3303 tmf->da[3] = rmf->sa[3];
3304 tmf->da[4] = rmf->sa[4];
3305 tmf->da[5] = rmf->sa[5];
3306 break;
3309 return (0);
3312 static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3314 struct net_local *tp = (struct net_local *)dev->priv;
3316 tsv->svi = AUTHORIZED_ACCESS_PRIORITY;
3317 tsv->svl = S_AUTHORIZED_ACCESS_PRIORITY;
3319 tsv->svv[0] = MSB(tp->authorized_access_priority);
3320 tsv->svv[1] = LSB(tp->authorized_access_priority);
3322 return (0);
3325 static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3327 tsv->svi = ADDRESS_MODIFER;
3328 tsv->svl = S_ADDRESS_MODIFER;
3330 tsv->svv[0] = 0;
3331 tsv->svv[1] = 0;
3333 return (0);
3336 static int smctr_make_auth_funct_class(struct net_device *dev,
3337 MAC_SUB_VECTOR *tsv)
3339 struct net_local *tp = (struct net_local *)dev->priv;
3341 tsv->svi = AUTHORIZED_FUNCTION_CLASS;
3342 tsv->svl = S_AUTHORIZED_FUNCTION_CLASS;
3344 tsv->svv[0] = MSB(tp->authorized_function_classes);
3345 tsv->svv[1] = LSB(tp->authorized_function_classes);
3347 return (0);
3350 static int smctr_make_corr(struct net_device *dev,
3351 MAC_SUB_VECTOR *tsv, __u16 correlator)
3353 tsv->svi = CORRELATOR;
3354 tsv->svl = S_CORRELATOR;
3356 tsv->svv[0] = MSB(correlator);
3357 tsv->svv[1] = LSB(correlator);
3359 return (0);
3362 static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3364 struct net_local *tp = (struct net_local *)dev->priv;
3366 smctr_get_functional_address(dev);
3368 tsv->svi = FUNCTIONAL_ADDRESS;
3369 tsv->svl = S_FUNCTIONAL_ADDRESS;
3371 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3372 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3374 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3375 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3377 return (0);
3380 static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3382 struct net_local *tp = (struct net_local *)dev->priv;
3384 smctr_get_group_address(dev);
3386 tsv->svi = GROUP_ADDRESS;
3387 tsv->svl = S_GROUP_ADDRESS;
3389 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3390 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3392 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3393 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3395 /* Set Group Address Sub-vector to all zeros if only the
3396 * Group Address/Functional Address Indicator is set.
3398 if(tsv->svv[0] == 0x80 && tsv->svv[1] == 0x00
3399 && tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00)
3400 tsv->svv[0] = 0x00;
3402 return (0);
3405 static int smctr_make_phy_drop_num(struct net_device *dev,
3406 MAC_SUB_VECTOR *tsv)
3408 struct net_local *tp = (struct net_local *)dev->priv;
3410 smctr_get_physical_drop_number(dev);
3412 tsv->svi = PHYSICAL_DROP;
3413 tsv->svl = S_PHYSICAL_DROP;
3415 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3416 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3418 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3419 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3421 return (0);
3424 static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3426 int i;
3428 tsv->svi = PRODUCT_INSTANCE_ID;
3429 tsv->svl = S_PRODUCT_INSTANCE_ID;
3431 for(i = 0; i < 18; i++)
3432 tsv->svv[i] = 0xF0;
3434 return (0);
3437 static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3439 struct net_local *tp = (struct net_local *)dev->priv;
3441 smctr_get_station_id(dev);
3443 tsv->svi = STATION_IDENTIFER;
3444 tsv->svl = S_STATION_IDENTIFER;
3446 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3447 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3449 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3450 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3452 tsv->svv[4] = MSB(tp->misc_command_data[2]);
3453 tsv->svv[5] = LSB(tp->misc_command_data[2]);
3455 return (0);
3458 static int smctr_make_ring_station_status(struct net_device *dev,
3459 MAC_SUB_VECTOR * tsv)
3461 tsv->svi = RING_STATION_STATUS;
3462 tsv->svl = S_RING_STATION_STATUS;
3464 tsv->svv[0] = 0;
3465 tsv->svv[1] = 0;
3466 tsv->svv[2] = 0;
3467 tsv->svv[3] = 0;
3468 tsv->svv[4] = 0;
3469 tsv->svv[5] = 0;
3471 return (0);
3474 static int smctr_make_ring_station_version(struct net_device *dev,
3475 MAC_SUB_VECTOR *tsv)
3477 struct net_local *tp = (struct net_local *)dev->priv;
3479 tsv->svi = RING_STATION_VERSION_NUMBER;
3480 tsv->svl = S_RING_STATION_VERSION_NUMBER;
3482 tsv->svv[0] = 0xe2; /* EBCDIC - S */
3483 tsv->svv[1] = 0xd4; /* EBCDIC - M */
3484 tsv->svv[2] = 0xc3; /* EBCDIC - C */
3485 tsv->svv[3] = 0x40; /* EBCDIC - */
3486 tsv->svv[4] = 0xe5; /* EBCDIC - V */
3487 tsv->svv[5] = 0xF0 + (tp->microcode_version >> 4);
3488 tsv->svv[6] = 0xF0 + (tp->microcode_version & 0x0f);
3489 tsv->svv[7] = 0x40; /* EBCDIC - */
3490 tsv->svv[8] = 0xe7; /* EBCDIC - X */
3492 if(tp->extra_info & CHIP_REV_MASK)
3493 tsv->svv[9] = 0xc5; /* EBCDIC - E */
3494 else
3495 tsv->svv[9] = 0xc4; /* EBCDIC - D */
3497 return (0);
3500 static int smctr_make_tx_status_code(struct net_device *dev,
3501 MAC_SUB_VECTOR *tsv, __u16 tx_fstatus)
3503 tsv->svi = TRANSMIT_STATUS_CODE;
3504 tsv->svl = S_TRANSMIT_STATUS_CODE;
3506 tsv->svv[0] = ((tx_fstatus & 0x0100 >> 6) || IBM_PASS_SOURCE_ADDR);
3508 /* Stripped frame status of Transmitted Frame */
3509 tsv->svv[1] = tx_fstatus & 0xff;
3511 return (0);
3514 static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
3515 MAC_SUB_VECTOR *tsv)
3517 struct net_local *tp = (struct net_local *)dev->priv;
3519 smctr_get_upstream_neighbor_addr(dev);
3521 tsv->svi = UPSTREAM_NEIGHBOR_ADDRESS;
3522 tsv->svl = S_UPSTREAM_NEIGHBOR_ADDRESS;
3524 tsv->svv[0] = MSB(tp->misc_command_data[0]);
3525 tsv->svv[1] = LSB(tp->misc_command_data[0]);
3527 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3528 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3530 tsv->svv[4] = MSB(tp->misc_command_data[2]);
3531 tsv->svv[5] = LSB(tp->misc_command_data[2]);
3533 return (0);
3536 static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3538 tsv->svi = WRAP_DATA;
3539 tsv->svl = S_WRAP_DATA;
3541 return (0);
3545 * Open/initialize the board. This is called sometime after
3546 * booting when the 'ifconfig' program is run.
3548 * This routine should set everything up anew at each open, even
3549 * registers that "should" only need to be set once at boot, so that
3550 * there is non-reboot way to recover if something goes wrong.
3552 static int smctr_open(struct net_device *dev)
3554 struct net_local *tp = (struct net_local *)dev->priv;
3555 int err;
3557 if(smctr_debug > 10)
3558 printk("%s: smctr_open\n", dev->name);
3560 tp->status = NOT_INITIALIZED;
3562 err = smctr_load_firmware(dev);
3563 if(err < 0)
3564 return (err);
3566 err = smctr_init_adapter(dev);
3567 if(err < 0)
3568 return (err);
3570 #ifdef MODULE
3571 MOD_INC_USE_COUNT;
3572 #endif
3574 return (err);
3577 /* Interrupt driven open of Token card. */
3578 static int smctr_open_tr(struct net_device *dev)
3580 struct net_local *tp = (struct net_local *)dev->priv;
3581 unsigned long flags;
3582 int err;
3584 if(smctr_debug > 10)
3585 printk("%s: smctr_open_tr\n", dev->name);
3587 /* Now we can actually open the adapter. */
3588 if(tp->status == OPEN)
3589 return (0);
3590 if(tp->status != INITIALIZED)
3591 return (-1);
3593 save_flags(flags);
3594 cli();
3596 smctr_set_page(dev, (__u8 *)tp->ram_access);
3598 if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)MAC_QUEUE)))
3599 return (err);
3601 if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)MAC_QUEUE)))
3602 return (err);
3604 if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)NON_MAC_QUEUE)))
3605 return (err);
3607 if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)NON_MAC_QUEUE)))
3608 return (err);
3610 tp->status = CLOSED;
3612 /* Insert into the Ring or Enter Loopback Mode. */
3613 if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_1)
3615 tp->status = CLOSED;
3617 if(!(err = smctr_issue_trc_loopback_cmd(dev)))
3619 if(!(err = smctr_wait_cmd(dev)))
3620 tp->status = OPEN;
3623 smctr_status_chg(dev);
3625 else
3627 if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_2)
3629 tp->status = CLOSED;
3630 if(!(err = smctr_issue_tri_loopback_cmd(dev)))
3632 if(!(err = smctr_wait_cmd(dev)))
3633 tp->status = OPEN;
3636 smctr_status_chg(dev);
3638 else
3640 if((tp->mode_bits & LOOPING_MODE_MASK)
3641 == LOOPBACK_MODE_3)
3643 tp->status = CLOSED;
3644 if(!(err = smctr_lobe_media_test_cmd(dev)))
3646 if(!(err = smctr_wait_cmd(dev)))
3647 tp->status = OPEN;
3649 smctr_status_chg(dev);
3651 else
3653 if(!(err = smctr_lobe_media_test(dev)))
3654 err = smctr_issue_insert_cmd(dev);
3659 restore_flags(flags);
3661 return (err);
3664 /* Check for a network adapter of this type, and return '0 if one exists.
3665 * If dev->base_addr == 0, probe all likely locations.
3666 * If dev->base_addr == 1, always return failure.
3668 int __init smctr_probe (struct net_device *dev)
3670 int i;
3671 int base_addr = dev ? dev->base_addr : 0;
3673 if(base_addr > 0x1ff) /* Check a single specified location. */
3674 return (smctr_probe1(dev, base_addr));
3675 else if(base_addr != 0) /* Don't probe at all. */
3676 return (-ENXIO);
3678 for(i = 0; smctr_portlist[i]; i++)
3680 int ioaddr = smctr_portlist[i];
3681 if(check_region(ioaddr, SMCTR_IO_EXTENT))
3682 continue;
3683 if (!smctr_probe1(dev, ioaddr))
3684 return (0);
3687 return (-ENODEV);
3690 static int __init smctr_probe1(struct net_device *dev, int ioaddr)
3692 static unsigned version_printed = 0;
3693 struct net_local *tp;
3694 int err;
3695 __u32 *ram;
3697 if(smctr_debug && version_printed++ == 0)
3698 printk("%s", version);
3700 #ifndef MODULE
3701 dev = init_trdev(dev, 0);
3702 if(dev == NULL)
3703 return (-ENOMEM);
3704 #endif
3706 /* Setup this devices private information structure */
3707 tp = (struct net_local *)kmalloc(sizeof(struct net_local),
3708 GFP_KERNEL);
3709 if(tp == NULL)
3710 return (-ENOMEM);
3711 memset(tp, 0, sizeof(struct net_local));
3712 dev->priv = tp;
3713 dev->base_addr = ioaddr;
3715 /* Actually detect an adapter now. */
3716 err = smctr_chk_isa(dev);
3717 if(err < 0)
3719 err = smctr_chk_mca(dev);
3720 if(err < 0)
3722 kfree(tp);
3723 return (-ENODEV);
3727 tp = (struct net_local *)dev->priv;
3728 dev->rmem_start = dev->mem_start = tp->ram_base;
3729 dev->rmem_end = dev->mem_end = dev->mem_start + 0x10000;
3730 ram = (__u32 *)phys_to_virt(dev->mem_start);
3731 tp->ram_access = *(__u32 *)&ram;
3733 /* Allow user to specify ring speed on module insert. */
3734 if(ringspeed == 4)
3735 tp->media_type = MEDIA_UTP_4;
3736 else
3737 tp->media_type = MEDIA_UTP_16;
3739 printk("%s: %s %s at Io %#4x, Irq %d, Rom %#4x, Ram %#4x.\n",
3740 dev->name, smctr_name, smctr_model,
3741 (unsigned int)dev->base_addr,
3742 dev->irq, tp->rom_base, tp->ram_base);
3744 dev->init = smctr_init_card;
3745 dev->open = smctr_open;
3746 dev->stop = smctr_close;
3747 dev->hard_start_xmit = smctr_send_packet;
3748 dev->tx_timeout = smctr_timeout;
3749 dev->watchdog_timeo = HZ;
3750 dev->get_stats = smctr_get_stats;
3751 dev->set_multicast_list = &smctr_set_multicast_list;
3753 return (0);
3756 static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3757 struct net_device *dev, __u16 rx_status)
3759 struct net_local *tp = (struct net_local *)dev->priv;
3760 struct sk_buff *skb;
3761 __u16 rcode, correlator;
3762 int err = 0;
3763 __u8 xframe = 1;
3764 __u16 tx_fstatus;
3766 rmf->vl = SWAP_BYTES(rmf->vl);
3767 if(rx_status & FCB_RX_STATUS_DA_MATCHED)
3769 switch(rmf->vc)
3771 /* Received MAC Frames Processed by RS. */
3772 case INIT:
3773 if((rcode = smctr_rcv_init(dev, rmf,
3774 &correlator)) == HARDWARE_FAILED)
3776 return (rcode);
3779 if((err = smctr_send_rsp(dev, rmf, rcode,
3780 correlator)))
3782 return (err);
3784 break;
3786 case CHG_PARM:
3787 if((rcode = smctr_rcv_chg_param(dev, rmf,
3788 &correlator)) ==HARDWARE_FAILED)
3790 return (rcode);
3793 if((err = smctr_send_rsp(dev, rmf, rcode,
3794 correlator)))
3796 return (err);
3798 break;
3800 case RQ_ADDR:
3801 if((rcode = smctr_rcv_rq_addr_state_attch(dev,
3802 rmf, &correlator)) != POSITIVE_ACK)
3804 if(rcode == HARDWARE_FAILED)
3805 return (rcode);
3806 else
3807 return (smctr_send_rsp(dev, rmf,
3808 rcode, correlator));
3811 if((err = smctr_send_rpt_addr(dev, rmf,
3812 correlator)))
3814 return (err);
3816 break;
3818 case RQ_ATTCH:
3819 if((rcode = smctr_rcv_rq_addr_state_attch(dev,
3820 rmf, &correlator)) != POSITIVE_ACK)
3822 if(rcode == HARDWARE_FAILED)
3823 return (rcode);
3824 else
3825 return (smctr_send_rsp(dev, rmf,
3826 rcode,
3827 correlator));
3830 if((err = smctr_send_rpt_attch(dev, rmf,
3831 correlator)))
3833 return (err);
3835 break;
3837 case RQ_STATE:
3838 if((rcode = smctr_rcv_rq_addr_state_attch(dev,
3839 rmf, &correlator)) != POSITIVE_ACK)
3841 if(rcode == HARDWARE_FAILED)
3842 return (rcode);
3843 else
3844 return (smctr_send_rsp(dev, rmf,
3845 rcode,
3846 correlator));
3849 if((err = smctr_send_rpt_state(dev, rmf,
3850 correlator)))
3852 return (err);
3854 break;
3856 case TX_FORWARD:
3857 if((rcode = smctr_rcv_tx_forward(dev, rmf))
3858 != POSITIVE_ACK)
3860 if(rcode == HARDWARE_FAILED)
3861 return (rcode);
3862 else
3863 return (smctr_send_rsp(dev, rmf,
3864 rcode,
3865 correlator));
3868 if((err = smctr_send_tx_forward(dev, rmf,
3869 &tx_fstatus)) == HARDWARE_FAILED)
3871 return (err);
3874 if(err == A_FRAME_WAS_FORWARDED)
3876 if((err = smctr_send_rpt_tx_forward(dev,
3877 rmf, tx_fstatus))
3878 == HARDWARE_FAILED)
3880 return (err);
3883 break;
3885 /* Received MAC Frames Processed by CRS/REM/RPS. */
3886 case RSP:
3887 case RQ_INIT:
3888 case RPT_NEW_MON:
3889 case RPT_SUA_CHG:
3890 case RPT_ACTIVE_ERR:
3891 case RPT_NN_INCMP:
3892 case RPT_ERROR:
3893 case RPT_ATTCH:
3894 case RPT_STATE:
3895 case RPT_ADDR:
3896 break;
3898 /* Rcvd Att. MAC Frame (if RXATMAC set) or UNKNOWN */
3899 default:
3900 xframe = 0;
3901 if(!(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES))
3903 rcode = smctr_rcv_unknown(dev, rmf,
3904 &correlator);
3905 if((err = smctr_send_rsp(dev, rmf,rcode,
3906 correlator)))
3908 return (err);
3912 break;
3915 else
3917 /* 1. DA doesn't match (Promiscuous Mode).
3918 * 2. Parse for Extended MAC Frame Type.
3920 switch(rmf->vc)
3922 case RSP:
3923 case INIT:
3924 case RQ_INIT:
3925 case RQ_ADDR:
3926 case RQ_ATTCH:
3927 case RQ_STATE:
3928 case CHG_PARM:
3929 case RPT_ADDR:
3930 case RPT_ERROR:
3931 case RPT_ATTCH:
3932 case RPT_STATE:
3933 case RPT_NEW_MON:
3934 case RPT_SUA_CHG:
3935 case RPT_NN_INCMP:
3936 case RPT_ACTIVE_ERR:
3937 break;
3939 default:
3940 xframe = 0;
3941 break;
3945 /* NOTE: UNKNOWN MAC frames will NOT be passed up unless
3946 * ACCEPT_ATT_MAC_FRAMES is set.
3948 if(((tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
3949 && (xframe == (__u8)0))
3950 || ((tp->receive_mask & ACCEPT_EXT_MAC_FRAMES)
3951 && (xframe == (__u8)1)))
3953 rmf->vl = SWAP_BYTES(rmf->vl);
3955 skb = dev_alloc_skb(size);
3956 skb->len = size;
3958 /* Slide data into a sleek skb. */
3959 skb_put(skb, skb->len);
3960 memcpy(skb->data, rmf, skb->len);
3962 /* Update Counters */
3963 tp->MacStat.rx_packets++;
3964 tp->MacStat.rx_bytes += skb->len;
3966 /* Kick the packet on up. */
3967 skb->dev = dev;
3968 skb->protocol = tr_type_trans(skb, dev);
3969 netif_rx(skb);
3970 err = 0;
3973 return (err);
3976 /* Test for RAM in adapter RAM space. */
3977 static int smctr_ram_conflict_test(struct net_device *dev)
3979 struct net_local *tp = (struct net_local *)dev->priv;
3980 unsigned int i;
3981 __u16 sword;
3983 for(i = 0; i < (unsigned int)(tp->ram_usable * 1024); i += 1024)
3985 sword = *(__u16 *)(tp->ram_access + i);
3986 *(__u16 *)(tp->ram_access + i) = 0x1234;
3987 if(*(__u16 *)(tp->ram_access + i) == 0x1234)
3989 *(__u16 *)(tp->ram_access + i) = sword;
3990 return (-1);
3994 return (0);
3997 /* Adapter RAM test. Incremental word ODD boundry data test. */
3998 static int smctr_ram_memory_test(struct net_device *dev)
4000 struct net_local *tp = (struct net_local *)dev->priv;
4001 __u16 page, pages_of_ram, start_pattern = 0, word_pattern = 0,
4002 word_read = 0, err_word = 0, err_pattern = 0;
4003 unsigned int err_offset;
4004 __u32 j, pword;
4005 __u8 err = 0;
4007 if(smctr_debug > 10)
4008 printk("%s: smctr_ram_memory_test\n", dev->name);
4010 start_pattern = 0x0001;
4011 pages_of_ram = tp->ram_size / tp->ram_usable;
4012 pword = tp->ram_access;
4014 /* Incremental word ODD boundry test. */
4015 for(page = 0; (page < pages_of_ram) && (~err);
4016 page++, start_pattern += 0x8000)
4018 smctr_set_page(dev, (__u8 *)(tp->ram_access
4019 + (page * tp->ram_usable * 1024) + 1));
4020 word_pattern = start_pattern;
4022 for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1; j += 2)
4023 *(__u16 *)(pword + j) = word_pattern++;
4025 word_pattern = start_pattern;
4027 for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1
4028 && (~err); j += 2, word_pattern++)
4030 word_read = *(__u16 *)(pword + j);
4031 if(word_read != word_pattern)
4033 err = (__u8)1;
4034 err_offset = j;
4035 err_word = word_read;
4036 err_pattern = word_pattern;
4037 return (-1);
4042 /* Zero out memory. */
4043 for(page = 0; page < pages_of_ram && (~err); page++)
4045 smctr_set_page(dev, (__u8 *)(tp->ram_access
4046 + (page * tp->ram_usable * 1024)));
4047 word_pattern = 0;
4049 for(j = 0; j < (__u32)tp->ram_usable * 1024; j +=2)
4050 *(__u16 *)(pword + j) = word_pattern;
4052 for(j =0; j < (__u32)tp->ram_usable * 1024
4053 && (~err); j += 2)
4055 word_read = *(__u16 *)(pword + j);
4056 if(word_read != word_pattern)
4058 err = (__u8)1;
4059 err_offset = j;
4060 err_word = word_read;
4061 err_pattern = word_pattern;
4062 return (-1);
4067 smctr_set_page(dev, (__u8 *)tp->ram_access);
4069 return (0);
4072 static unsigned int __init smctr_read_584_chksum(int ioaddr)
4074 __u8 pg_no, r1, r2;
4075 __u16 byte_no, csum_val = 0;
4077 for(pg_no = 0; pg_no < 16; pg_no++)
4079 r1 = inb(ioaddr + 0x01);
4080 r1 &= 0x04;
4081 r1 |= 0x02;
4083 outb(r1, ioaddr + 0x01);
4085 r1 = inb(ioaddr + 0x03);
4086 r1 &= 0x0f;
4087 r1 |= (pg_no << 4);
4089 outb(r1, ioaddr + 0x03);
4091 r1 = inb(ioaddr + 0x01);
4092 r1 &= 0x04;
4093 r1 |= 0x12;
4095 outb(r1, ioaddr + 0x01);
4097 do {
4098 r1 = inb(ioaddr + 0x01);
4099 } while(r1 & 0x10);
4101 r2 = 0;
4102 for(byte_no = 0x08; byte_no < 0x10; byte_no++)
4104 r1 = inb(ioaddr + byte_no);
4105 r2 += r1;
4108 csum_val += r2;
4111 r1 = inb(ioaddr + 0x01);
4112 r1 &= 0x04;
4113 r1 |= 0x10;
4114 outb(r1, ioaddr + 0x01);
4116 csum_val &= 0xff;
4117 if(csum_val == 0xff)
4118 return (0);
4119 else
4120 return (-1);
4123 static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
4124 __u16 *correlator)
4126 MAC_SUB_VECTOR *rsv;
4127 signed short vlen;
4128 __u16 rcode = POSITIVE_ACK;
4129 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4131 /* This Frame can only come from a CRS */
4132 if((rmf->dc_sc & SC_MASK) != SC_CRS)
4133 return(E_INAPPROPRIATE_SOURCE_CLASS);
4135 /* Remove MVID Length from total length. */
4136 vlen = (signed short)rmf->vl - 4;
4138 /* Point to First SVID */
4139 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4141 /* Search for Appropriate SVID's. */
4142 while((vlen > 0) && (rcode == POSITIVE_ACK))
4144 switch(rsv->svi)
4146 case CORRELATOR:
4147 svectors |= F_CORRELATOR;
4148 rcode = smctr_set_corr(dev, rsv, correlator);
4149 break;
4151 case LOCAL_RING_NUMBER:
4152 svectors |= F_LOCAL_RING_NUMBER;
4153 rcode = smctr_set_local_ring_num(dev, rsv);
4154 break;
4156 case ASSIGN_PHYSICAL_DROP:
4157 svectors |= F_ASSIGN_PHYSICAL_DROP;
4158 rcode = smctr_set_phy_drop(dev, rsv);
4159 break;
4161 case ERROR_TIMER_VALUE:
4162 svectors |= F_ERROR_TIMER_VALUE;
4163 rcode = smctr_set_error_timer_value(dev, rsv);
4164 break;
4166 case AUTHORIZED_FUNCTION_CLASS:
4167 svectors |= F_AUTHORIZED_FUNCTION_CLASS;
4168 rcode = smctr_set_auth_funct_class(dev, rsv);
4169 break;
4171 case AUTHORIZED_ACCESS_PRIORITY:
4172 svectors |= F_AUTHORIZED_ACCESS_PRIORITY;
4173 rcode = smctr_set_auth_access_pri(dev, rsv);
4174 break;
4176 default:
4177 rcode = E_SUB_VECTOR_UNKNOWN;
4178 break;
4181 /* Let Sender Know if SUM of SV length's is
4182 * larger then length in MVID length field
4184 if((vlen -= rsv->svl) < 0)
4185 rcode = E_VECTOR_LENGTH_ERROR;
4187 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4190 if(rcode == POSITIVE_ACK)
4192 /* Let Sender Know if MVID length field
4193 * is larger then SUM of SV length's
4195 if(vlen != 0)
4196 rcode = E_VECTOR_LENGTH_ERROR;
4197 else
4199 /* Let Sender Know if Expected SVID Missing */
4200 if((svectors & R_CHG_PARM) ^ R_CHG_PARM)
4201 rcode = E_MISSING_SUB_VECTOR;
4205 return (rcode);
4208 static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
4209 __u16 *correlator)
4211 MAC_SUB_VECTOR *rsv;
4212 signed short vlen;
4213 __u16 rcode = POSITIVE_ACK;
4214 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4216 /* This Frame can only come from a RPS */
4217 if((rmf->dc_sc & SC_MASK) != SC_RPS)
4218 return (E_INAPPROPRIATE_SOURCE_CLASS);
4220 /* Remove MVID Length from total length. */
4221 vlen = (signed short)rmf->vl - 4;
4223 /* Point to First SVID */
4224 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4226 /* Search for Appropriate SVID's */
4227 while((vlen > 0) && (rcode == POSITIVE_ACK))
4229 switch(rsv->svi)
4231 case CORRELATOR:
4232 svectors |= F_CORRELATOR;
4233 rcode = smctr_set_corr(dev, rsv, correlator);
4234 break;
4236 case LOCAL_RING_NUMBER:
4237 svectors |= F_LOCAL_RING_NUMBER;
4238 rcode = smctr_set_local_ring_num(dev, rsv);
4239 break;
4241 case ASSIGN_PHYSICAL_DROP:
4242 svectors |= F_ASSIGN_PHYSICAL_DROP;
4243 rcode = smctr_set_phy_drop(dev, rsv);
4244 break;
4246 case ERROR_TIMER_VALUE:
4247 svectors |= F_ERROR_TIMER_VALUE;
4248 rcode = smctr_set_error_timer_value(dev, rsv);
4249 break;
4251 default:
4252 rcode = E_SUB_VECTOR_UNKNOWN;
4253 break;
4256 /* Let Sender Know if SUM of SV length's is
4257 * larger then length in MVID length field
4259 if((vlen -= rsv->svl) < 0)
4260 rcode = E_VECTOR_LENGTH_ERROR;
4262 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4265 if(rcode == POSITIVE_ACK)
4267 /* Let Sender Know if MVID length field
4268 * is larger then SUM of SV length's
4270 if(vlen != 0)
4271 rcode = E_VECTOR_LENGTH_ERROR;
4272 else
4274 /* Let Sender Know if Expected SV Missing */
4275 if((svectors & R_INIT) ^ R_INIT)
4276 rcode = E_MISSING_SUB_VECTOR;
4280 return (rcode);
4283 static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
4285 MAC_SUB_VECTOR *rsv;
4286 signed short vlen;
4287 __u16 rcode = POSITIVE_ACK;
4288 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4290 /* This Frame can only come from a CRS */
4291 if((rmf->dc_sc & SC_MASK) != SC_CRS)
4292 return (E_INAPPROPRIATE_SOURCE_CLASS);
4294 /* Remove MVID Length from total length */
4295 vlen = (signed short)rmf->vl - 4;
4297 /* Point to First SVID */
4298 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4300 /* Search for Appropriate SVID's */
4301 while((vlen > 0) && (rcode == POSITIVE_ACK))
4303 switch(rsv->svi)
4305 case FRAME_FORWARD:
4306 svectors |= F_FRAME_FORWARD;
4307 rcode = smctr_set_frame_forward(dev, rsv,
4308 rmf->dc_sc);
4309 break;
4311 default:
4312 rcode = E_SUB_VECTOR_UNKNOWN;
4313 break;
4316 /* Let Sender Know if SUM of SV length's is
4317 * larger then length in MVID length field
4319 if((vlen -= rsv->svl) < 0)
4320 rcode = E_VECTOR_LENGTH_ERROR;
4322 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4325 if(rcode == POSITIVE_ACK)
4327 /* Let Sender Know if MVID length field
4328 * is larger then SUM of SV length's
4330 if(vlen != 0)
4331 rcode = E_VECTOR_LENGTH_ERROR;
4332 else
4334 /* Let Sender Know if Expected SV Missing */
4335 if((svectors & R_TX_FORWARD) ^ R_TX_FORWARD)
4336 rcode = E_MISSING_SUB_VECTOR;
4340 return (rcode);
4343 static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
4344 MAC_HEADER *rmf, __u16 *correlator)
4346 MAC_SUB_VECTOR *rsv;
4347 signed short vlen;
4348 __u16 rcode = POSITIVE_ACK;
4349 unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
4351 /* Remove MVID Length from total length */
4352 vlen = (signed short)rmf->vl - 4;
4354 /* Point to First SVID */
4355 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4357 /* Search for Appropriate SVID's */
4358 while((vlen > 0) && (rcode == POSITIVE_ACK))
4360 switch(rsv->svi)
4362 case CORRELATOR:
4363 svectors |= F_CORRELATOR;
4364 rcode = smctr_set_corr(dev, rsv, correlator);
4365 break;
4367 default:
4368 rcode = E_SUB_VECTOR_UNKNOWN;
4369 break;
4372 /* Let Sender Know if SUM of SV length's is
4373 * larger then length in MVID length field
4375 if((vlen -= rsv->svl) < 0)
4376 rcode = E_VECTOR_LENGTH_ERROR;
4378 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4381 if(rcode == POSITIVE_ACK)
4383 /* Let Sender Know if MVID length field
4384 * is larger then SUM of SV length's
4386 if(vlen != 0)
4387 rcode = E_VECTOR_LENGTH_ERROR;
4388 else
4390 /* Let Sender Know if Expected SVID Missing */
4391 if((svectors & R_RQ_ATTCH_STATE_ADDR)
4392 ^ R_RQ_ATTCH_STATE_ADDR)
4393 rcode = E_MISSING_SUB_VECTOR;
4397 return (rcode);
4400 static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
4401 __u16 *correlator)
4403 MAC_SUB_VECTOR *rsv;
4404 signed short vlen;
4406 *correlator = 0;
4408 /* Remove MVID Length from total length */
4409 vlen = (signed short)rmf->vl - 4;
4411 /* Point to First SVID */
4412 rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
4414 /* Search for CORRELATOR for RSP to UNKNOWN */
4415 while((vlen > 0) && (*correlator == 0))
4417 switch(rsv->svi)
4419 case CORRELATOR:
4420 smctr_set_corr(dev, rsv, correlator);
4421 break;
4423 default:
4424 break;
4427 vlen -= rsv->svl;
4428 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4431 return (E_UNRECOGNIZED_VECTOR_ID);
4435 * Reset the 825 NIC and exit w:
4436 * 1. The NIC reset cleared (non-reset state), halted and un-initialized.
4437 * 2. TINT masked.
4438 * 3. CBUSY masked.
4439 * 4. TINT clear.
4440 * 5. CBUSY clear.
4442 static int smctr_reset_adapter(struct net_device *dev)
4444 struct net_local *tp = (struct net_local *)dev->priv;
4445 int ioaddr = dev->base_addr;
4447 /* Reseting the NIC will put it in a halted and un-initialized state. */ smctr_set_trc_reset(ioaddr);
4448 udelay(200000); /* ~2 ms */
4450 smctr_clear_trc_reset(ioaddr);
4451 udelay(200000); /* ~2 ms */
4453 /* Remove any latched interrupts that occured prior to reseting the
4454 * adapter or possibily caused by line glitches due to the reset.
4456 outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR);
4458 return (0);
4461 static int smctr_restart_tx_chain(struct net_device *dev, short queue)
4463 struct net_local *tp = (struct net_local *)dev->priv;
4464 int err = 0;
4466 if(smctr_debug > 10)
4467 printk("%s: smctr_restart_tx_chain\n", dev->name);
4469 if(tp->num_tx_fcbs_used[queue] != 0
4470 && tp->tx_queue_status[queue] == NOT_TRANSMITING)
4472 tp->tx_queue_status[queue] = TRANSMITING;
4473 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
4476 return (err);
4479 static int smctr_ring_status_chg(struct net_device *dev)
4481 struct net_local *tp = (struct net_local *)dev->priv;
4483 if(smctr_debug > 10)
4484 printk("%s: smctr_ring_status_chg\n", dev->name);
4486 /* Check for ring_status_flag: whenever MONITOR_STATE_BIT
4487 * Bit is set, check value of monitor_state, only then we
4488 * enable and start transmit/receive timeout (if and only
4489 * if it is MS_ACTIVE_MONITOR_STATE or MS_STANDBY_MONITOR_STATE)
4491 if(tp->ring_status_flags == MONITOR_STATE_CHANGED)
4493 if((tp->monitor_state == MS_ACTIVE_MONITOR_STATE)
4494 || (tp->monitor_state == MS_STANDBY_MONITOR_STATE))
4496 tp->monitor_state_ready = 1;
4498 else
4500 /* if adapter is NOT in either active monitor
4501 * or standby monitor state => Disable
4502 * transmit/receive timeout.
4504 tp->monitor_state_ready = 0;
4506 /* Ring speed problem, switching to auto mode. */
4507 if(tp->monitor_state == MS_MONITOR_FSM_INACTIVE
4508 && !tp->cleanup)
4510 printk(KERN_INFO "%s: Incorrect ring speed switching.\n",
4511 dev->name);
4512 smctr_set_ring_speed(dev);
4517 if(!(tp->ring_status_flags & RING_STATUS_CHANGED))
4518 return (0);
4520 switch(tp->ring_status)
4522 case RING_RECOVERY:
4523 printk(KERN_INFO "%s: Ring Recovery\n", dev->name);
4524 tp->current_ring_status |= RING_RECOVERY;
4525 break;
4527 case SINGLE_STATION:
4528 printk(KERN_INFO "%s: Single Statinon\n", dev->name);
4529 tp->current_ring_status |= SINGLE_STATION;
4530 break;
4532 case COUNTER_OVERFLOW:
4533 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
4534 tp->current_ring_status |= COUNTER_OVERFLOW;
4535 break;
4537 case REMOVE_RECEIVED:
4538 printk(KERN_INFO "%s: Remove Received\n", dev->name);
4539 tp->current_ring_status |= REMOVE_RECEIVED;
4540 break;
4542 case AUTO_REMOVAL_ERROR:
4543 printk(KERN_INFO "%s: Auto Remove Error\n", dev->name);
4544 tp->current_ring_status |= AUTO_REMOVAL_ERROR;
4545 break;
4547 case LOBE_WIRE_FAULT:
4548 printk(KERN_INFO "%s: Lobe Wire Fault\n", dev->name);
4549 tp->current_ring_status |= LOBE_WIRE_FAULT;
4550 break;
4552 case TRANSMIT_BEACON:
4553 printk(KERN_INFO "%s: Transmit Beacon\n", dev->name);
4554 tp->current_ring_status |= TRANSMIT_BEACON;
4555 break;
4557 case SOFT_ERROR:
4558 printk(KERN_INFO "%s: Soft Error\n", dev->name);
4559 tp->current_ring_status |= SOFT_ERROR;
4560 break;
4562 case HARD_ERROR:
4563 printk(KERN_INFO "%s: Hard Error\n", dev->name);
4564 tp->current_ring_status |= HARD_ERROR;
4565 break;
4567 case SIGNAL_LOSS:
4568 printk(KERN_INFO "%s: Singal Loss\n", dev->name);
4569 tp->current_ring_status |= SIGNAL_LOSS;
4570 break;
4572 default:
4573 printk(KERN_INFO "%s: Unknown ring status change\n",
4574 dev->name);
4575 break;
4578 return (0);
4581 /* Test for ROM signature within adapter RAM space. */
4582 static int smctr_rom_conflict_test(struct net_device *dev)
4584 struct net_local *tp = (struct net_local *)dev->priv;
4585 unsigned int i;
4587 for(i = 0; i < (unsigned int)tp->ram_usable * 1024; i += 4096)
4589 if(*(__u16 *)(tp->ram_access + i) == 0xaa55)
4590 return (-1);
4593 return (0);
4596 static int smctr_rx_frame(struct net_device *dev)
4598 struct net_local *tp = (struct net_local *)dev->priv;
4599 __u16 queue, status, rx_size, err = 0;
4600 __u8 *pbuff;
4602 if(smctr_debug > 10)
4603 printk("%s: smctr_rx_frame\n", dev->name);
4605 cli();
4606 queue = tp->receive_queue_number;
4608 while((status = tp->rx_fcb_curr[queue]->frame_status) != SUCCESS)
4610 err = HARDWARE_FAILED;
4612 if(((status & 0x007f) == 0)
4613 || ((tp->receive_mask & ACCEPT_ERR_PACKETS) != 0))
4615 /* frame length less the CRC (4 bytes) + FS (1 byte) */
4616 rx_size = tp->rx_fcb_curr[queue]->frame_length - 5;
4618 pbuff = smctr_get_rx_pointer(dev, queue);
4620 smctr_set_page(dev, pbuff);
4621 smctr_disable_16bit(dev);
4623 /* pbuff points to addr within one page */
4624 pbuff = (__u8 *)PAGE_POINTER(pbuff);
4626 if(queue == NON_MAC_QUEUE)
4628 struct sk_buff *skb;
4630 skb = dev_alloc_skb(rx_size);
4631 skb_put(skb, rx_size);
4633 memcpy(skb->data, pbuff, rx_size);
4634 sti();
4636 /* Update Counters */
4637 tp->MacStat.rx_packets++;
4638 tp->MacStat.rx_bytes += skb->len;
4640 /* Kick the packet on up. */
4641 skb->dev = dev;
4642 skb->protocol = tr_type_trans(skb, dev);
4643 netif_rx(skb);
4645 else
4646 smctr_process_rx_packet((MAC_HEADER *)pbuff,
4647 rx_size, dev, status);
4650 smctr_enable_16bit(dev);
4651 smctr_set_page(dev, (__u8 *)tp->ram_access);
4652 smctr_update_rx_chain(dev, queue);
4654 if(err != SUCCESS)
4655 break;
4658 return (err);
4661 static int smctr_send_dat(struct net_device *dev)
4663 struct net_local *tp = (struct net_local *)dev->priv;
4664 unsigned int i, err;
4665 MAC_HEADER *tmf;
4666 FCBlock *fcb;
4668 if(smctr_debug > 10)
4669 printk("%s: smctr_send_dat\n", dev->name);
4671 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE,
4672 sizeof(MAC_HEADER))) == (FCBlock *)(-1L))
4674 return (OUT_OF_RESOURCES);
4677 /* Initialize DAT Data Fields. */
4678 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4679 tmf->ac = MSB(AC_FC_DAT);
4680 tmf->fc = LSB(AC_FC_DAT);
4682 for(i = 0; i < 6; i++)
4684 tmf->sa[i] = dev->dev_addr[i];
4685 tmf->da[i] = dev->dev_addr[i];
4689 tmf->vc = DAT;
4690 tmf->dc_sc = DC_RS | SC_RS;
4691 tmf->vl = 4;
4692 tmf->vl = SWAP_BYTES(tmf->vl);
4694 /* Start Transmit. */
4695 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4696 return (err);
4698 /* Wait for Transmit to Complete */
4699 for(i = 0; i < 10000; i++)
4701 if(fcb->frame_status & FCB_COMMAND_DONE)
4702 break;
4703 udelay(1000);
4706 /* Check if GOOD frame Tx'ed. */
4707 if(!(fcb->frame_status & FCB_COMMAND_DONE)
4708 || fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
4710 return (INITIALIZE_FAILED);
4713 /* De-allocated Tx FCB and Frame Buffer
4714 * The FCB must be de-allocated manually if executing with
4715 * interrupts disabled, other wise the ISR (LM_Service_Events)
4716 * will de-allocate it when the interrupt occurs.
4718 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4719 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4721 return (0);
4724 static void smctr_timeout(struct net_device *dev)
4727 * If we get here, some higher level has decided we are broken.
4728 * There should really be a "kick me" function call instead.
4730 * Resetting the token ring adapter takes a long time so just
4731 * fake transmission time and go on trying. Our own timeout
4732 * routine is in sktr_timer_chk()
4734 dev->trans_start = jiffies;
4735 netif_wake_queue(dev);
4739 * Gets skb from system, queues it and checks if it can be sent
4741 static int smctr_send_packet(struct sk_buff *skb, struct net_device *dev)
4743 struct net_local *tp = (struct net_local *)dev->priv;
4745 if(smctr_debug > 10)
4746 printk("%s: smctr_send_packet\n", dev->name);
4749 * Block a transmit overlap
4752 netif_stop_queue(dev);
4754 if(tp->QueueSkb == 0)
4755 return (1); /* Return with tbusy set: queue full */
4757 tp->QueueSkb--;
4758 skb_queue_tail(&tp->SendSkbQueue, skb);
4759 smctr_hardware_send_packet(dev, tp);
4760 if(tp->QueueSkb > 0)
4761 netif_wake_queue(dev);
4763 return (0);
4766 static int smctr_send_lobe_media_test(struct net_device *dev)
4768 struct net_local *tp = (struct net_local *)dev->priv;
4769 MAC_SUB_VECTOR *tsv;
4770 MAC_HEADER *tmf;
4771 FCBlock *fcb;
4772 __u32 i;
4773 int err;
4775 if(smctr_debug > 15)
4776 printk("%s: smctr_send_lobe_media_test\n", dev->name);
4778 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr)
4779 + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L))
4781 return (OUT_OF_RESOURCES);
4784 /* Initialize DAT Data Fields. */
4785 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4786 tmf->ac = MSB(AC_FC_LOBE_MEDIA_TEST);
4787 tmf->fc = LSB(AC_FC_LOBE_MEDIA_TEST);
4789 for(i = 0; i < 6; i++)
4791 tmf->da[i] = 0;
4792 tmf->sa[i] = dev->dev_addr[i];
4795 tmf->vc = LOBE_MEDIA_TEST;
4796 tmf->dc_sc = DC_RS | SC_RS;
4797 tmf->vl = 4;
4799 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4800 smctr_make_wrap_data(dev, tsv);
4801 tmf->vl += tsv->svl;
4803 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4804 smctr_make_wrap_data(dev, tsv);
4805 tmf->vl += tsv->svl;
4807 /* Start Transmit. */
4808 tmf->vl = SWAP_BYTES(tmf->vl);
4809 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4810 return (err);
4812 /* Wait for Transmit to Complete. (10 ms). */
4813 for(i=0; i < 10000; i++)
4815 if(fcb->frame_status & FCB_COMMAND_DONE)
4816 break;
4817 udelay(1000);
4820 /* Check if GOOD frame Tx'ed */
4821 if(!(fcb->frame_status & FCB_COMMAND_DONE)
4822 || fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
4824 return (LOBE_MEDIA_TEST_FAILED);
4827 /* De-allocated Tx FCB and Frame Buffer
4828 * The FCB must be de-allocated manually if executing with
4829 * interrupts disabled, other wise the ISR (LM_Service_Events)
4830 * will de-allocate it when the interrupt occurs.
4832 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4833 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4835 return (0);
4838 static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
4839 __u16 correlator)
4841 MAC_HEADER *tmf;
4842 MAC_SUB_VECTOR *tsv;
4843 FCBlock *fcb;
4845 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4846 + S_CORRELATOR + S_PHYSICAL_DROP + S_UPSTREAM_NEIGHBOR_ADDRESS
4847 + S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS))
4848 == (FCBlock *)(-1L))
4850 return (0);
4853 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4854 tmf->vc = RPT_ADDR;
4855 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4856 tmf->vl = 4;
4858 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ADDR);
4860 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4861 smctr_make_corr(dev, tsv, correlator);
4863 tmf->vl += tsv->svl;
4864 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4865 smctr_make_phy_drop_num(dev, tsv);
4867 tmf->vl += tsv->svl;
4868 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4869 smctr_make_upstream_neighbor_addr(dev, tsv);
4871 tmf->vl += tsv->svl;
4872 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4873 smctr_make_addr_mod(dev, tsv);
4875 tmf->vl += tsv->svl;
4876 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4877 smctr_make_group_addr(dev, tsv);
4879 tmf->vl += tsv->svl;
4880 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4881 smctr_make_funct_addr(dev, tsv);
4883 tmf->vl += tsv->svl;
4885 /* Subtract out MVID and MVL which is
4886 * include in both vl and MAC_HEADER
4888 /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4889 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4891 tmf->vl = SWAP_BYTES(tmf->vl);
4893 return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
4896 static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
4897 __u16 correlator)
4899 MAC_HEADER *tmf;
4900 MAC_SUB_VECTOR *tsv;
4901 FCBlock *fcb;
4903 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4904 + S_CORRELATOR + S_PRODUCT_INSTANCE_ID + S_FUNCTIONAL_ADDRESS
4905 + S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY))
4906 == (FCBlock *)(-1L))
4908 return (0);
4911 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4912 tmf->vc = RPT_ATTCH;
4913 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4914 tmf->vl = 4;
4916 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ATTCH);
4918 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4919 smctr_make_corr(dev, tsv, correlator);
4921 tmf->vl += tsv->svl;
4922 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4923 smctr_make_product_id(dev, tsv);
4925 tmf->vl += tsv->svl;
4926 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4927 smctr_make_funct_addr(dev, tsv);
4929 tmf->vl += tsv->svl;
4930 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4931 smctr_make_auth_funct_class(dev, tsv);
4933 tmf->vl += tsv->svl;
4934 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4935 smctr_make_access_pri(dev, tsv);
4937 tmf->vl += tsv->svl;
4939 /* Subtract out MVID and MVL which is
4940 * include in both vl and MAC_HEADER
4942 /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4943 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4945 tmf->vl = SWAP_BYTES(tmf->vl);
4947 return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
4950 static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
4951 __u16 correlator)
4953 MAC_HEADER *tmf;
4954 MAC_SUB_VECTOR *tsv;
4955 FCBlock *fcb;
4957 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4958 + S_CORRELATOR + S_RING_STATION_VERSION_NUMBER
4959 + S_RING_STATION_STATUS + S_STATION_IDENTIFER))
4960 == (FCBlock *)(-1L))
4962 return (0);
4965 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
4966 tmf->vc = RPT_STATE;
4967 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
4968 tmf->vl = 4;
4970 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_STATE);
4972 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4973 smctr_make_corr(dev, tsv, correlator);
4975 tmf->vl += tsv->svl;
4976 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4977 smctr_make_ring_station_version(dev, tsv);
4979 tmf->vl += tsv->svl;
4980 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4981 smctr_make_ring_station_status(dev, tsv);
4983 tmf->vl += tsv->svl;
4984 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
4985 smctr_make_station_id(dev, tsv);
4987 tmf->vl += tsv->svl;
4989 /* Subtract out MVID and MVL which is
4990 * include in both vl and MAC_HEADER
4992 /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4993 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
4995 tmf->vl = SWAP_BYTES(tmf->vl);
4997 return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
5000 static int smctr_send_rpt_tx_forward(struct net_device *dev,
5001 MAC_HEADER *rmf, __u16 tx_fstatus)
5003 MAC_HEADER *tmf;
5004 MAC_SUB_VECTOR *tsv;
5005 FCBlock *fcb;
5007 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
5008 + S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L))
5010 return (0);
5013 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
5014 tmf->vc = RPT_TX_FORWARD;
5015 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
5016 tmf->vl = 4;
5018 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_TX_FORWARD);
5020 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
5021 smctr_make_tx_status_code(dev, tsv, tx_fstatus);
5023 tmf->vl += tsv->svl;
5025 /* Subtract out MVID and MVL which is
5026 * include in both vl and MAC_HEADER
5028 /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
5029 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
5031 tmf->vl = SWAP_BYTES(tmf->vl);
5033 return(smctr_trc_send_packet(dev, fcb, MAC_QUEUE));
5036 static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
5037 __u16 rcode, __u16 correlator)
5039 MAC_HEADER *tmf;
5040 MAC_SUB_VECTOR *tsv;
5041 FCBlock *fcb;
5043 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
5044 + S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L))
5046 return (0);
5049 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
5050 tmf->vc = RSP;
5051 tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
5052 tmf->vl = 4;
5054 smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RSP);
5056 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
5057 smctr_make_corr(dev, tsv, correlator);
5059 return (0);
5062 static int smctr_send_rq_init(struct net_device *dev)
5064 struct net_local *tp = (struct net_local *)dev->priv;
5065 MAC_HEADER *tmf;
5066 MAC_SUB_VECTOR *tsv;
5067 FCBlock *fcb;
5068 unsigned int i, count = 0;
5069 __u16 fstatus;
5070 int err;
5072 do {
5073 if(((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
5074 + S_PRODUCT_INSTANCE_ID + S_UPSTREAM_NEIGHBOR_ADDRESS
5075 + S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER))
5076 == (FCBlock *)(-1L)))
5078 return (0);
5081 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
5082 tmf->vc = RQ_INIT;
5083 tmf->dc_sc = DC_RPS | SC_RS;
5084 tmf->vl = 4;
5086 smctr_make_8025_hdr(dev, 0L, tmf, AC_FC_RQ_INIT);
5088 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
5089 smctr_make_product_id(dev, tsv);
5091 tmf->vl += tsv->svl;
5092 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
5093 smctr_make_upstream_neighbor_addr(dev, tsv);
5095 tmf->vl += tsv->svl;
5096 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
5097 smctr_make_ring_station_version(dev, tsv);
5099 tmf->vl += tsv->svl;
5100 tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
5101 smctr_make_addr_mod(dev, tsv);
5103 tmf->vl += tsv->svl;
5105 /* Subtract out MVID and MVL which is
5106 * include in both vl and MAC_HEADER
5108 /* fcb->frame_length = tmf->vl + sizeof(MAC_HEADER) - 4;
5109 fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
5111 tmf->vl = SWAP_BYTES(tmf->vl);
5113 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
5114 return (err);
5116 /* Wait for Transmit to Complete */
5117 for(i = 0; i < 10000; i++)
5119 if(fcb->frame_status & FCB_COMMAND_DONE)
5120 break;
5121 udelay(1000);
5124 /* Check if GOOD frame Tx'ed */
5125 fstatus = fcb->frame_status;
5127 if(!(fstatus & FCB_COMMAND_DONE))
5128 return (HARDWARE_FAILED);
5130 if(!(fstatus & FCB_TX_STATUS_E))
5131 count++;
5133 /* De-allocated Tx FCB and Frame Buffer
5134 * The FCB must be de-allocated manually if executing with
5135 * interrupts disabled, other wise the ISR (LM_Service_Events)
5136 * will de-allocate it when the interrupt occurs.
5138 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
5139 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
5140 } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS));
5142 return (smctr_join_complete_state(dev));
5145 static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
5146 __u16 *tx_fstatus)
5148 struct net_local *tp = (struct net_local *)dev->priv;
5149 FCBlock *fcb;
5150 unsigned int i;
5151 int err;
5153 /* Check if this is the END POINT of the Transmit Forward Chain. */
5154 if(rmf->vl <= 18)
5155 return (0);
5157 /* Allocate Transmit FCB only by requesting 0 bytes
5158 * of data buffer.
5160 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L))
5161 return (0);
5163 /* Set pointer to Transmit Frame Buffer to the data
5164 * portion of the received TX Forward frame, making
5165 * sure to skip over the Vector Code (vc) and Vector
5166 * length (vl).
5168 fcb->bdb_ptr->trc_data_block_ptr = TRC_POINTER((__u32)rmf
5169 + sizeof(MAC_HEADER) + 2);
5170 fcb->bdb_ptr->data_block_ptr = (__u16 *)((__u32)rmf
5171 + sizeof(MAC_HEADER) + 2);
5173 fcb->frame_length = rmf->vl - 4 - 2;
5174 fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2;
5176 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
5177 return (err);
5179 /* Wait for Transmit to Complete */
5180 for(i = 0; i < 10000; i++)
5182 if(fcb->frame_status & FCB_COMMAND_DONE)
5183 break;
5184 udelay(1000);
5187 /* Check if GOOD frame Tx'ed */
5188 if(!(fcb->frame_status & FCB_COMMAND_DONE))
5190 if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE)))
5191 return (err);
5193 for(i = 0; i < 10000; i++)
5195 if(fcb->frame_status & FCB_COMMAND_DONE)
5196 break;
5197 udelay(1000);
5200 if(!(fcb->frame_status & FCB_COMMAND_DONE))
5201 return (HARDWARE_FAILED);
5204 *tx_fstatus = fcb->frame_status;
5206 return (A_FRAME_WAS_FORWARDED);
5209 static int smctr_set_auth_access_pri(struct net_device *dev,
5210 MAC_SUB_VECTOR *rsv)
5212 struct net_local *tp = (struct net_local *)dev->priv;
5214 if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY)
5215 return (E_SUB_VECTOR_LENGTH_ERROR);
5217 tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]);
5219 return (POSITIVE_ACK);
5222 static int smctr_set_auth_funct_class(struct net_device *dev,
5223 MAC_SUB_VECTOR *rsv)
5225 struct net_local *tp = (struct net_local *)dev->priv;
5227 if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS)
5228 return (E_SUB_VECTOR_LENGTH_ERROR);
5230 tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]);
5232 return (POSITIVE_ACK);
5235 static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
5236 __u16 *correlator)
5238 if(rsv->svl != S_CORRELATOR)
5239 return (E_SUB_VECTOR_LENGTH_ERROR);
5241 *correlator = (rsv->svv[0] << 8 | rsv->svv[1]);
5243 return (POSITIVE_ACK);
5246 static int smctr_set_error_timer_value(struct net_device *dev,
5247 MAC_SUB_VECTOR *rsv)
5249 __u16 err_tval;
5250 int err;
5252 if(rsv->svl != S_ERROR_TIMER_VALUE)
5253 return (E_SUB_VECTOR_LENGTH_ERROR);
5255 err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10;
5257 smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval);
5259 if((err = smctr_wait_cmd(dev)))
5260 return (err);
5262 return (POSITIVE_ACK);
5265 static int smctr_set_frame_forward(struct net_device *dev,
5266 MAC_SUB_VECTOR *rsv, __u8 dc_sc)
5268 if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD))
5269 return (E_SUB_VECTOR_LENGTH_ERROR);
5271 if((dc_sc & DC_MASK) != DC_CRS)
5273 if(rsv->svl >= 2 && rsv->svl < 20)
5274 return (E_TRANSMIT_FORWARD_INVALID);
5276 if((rsv->svv[0] != 0) || (rsv->svv[1] != 0))
5277 return (E_TRANSMIT_FORWARD_INVALID);
5280 return (POSITIVE_ACK);
5283 static int smctr_set_local_ring_num(struct net_device *dev,
5284 MAC_SUB_VECTOR *rsv)
5286 struct net_local *tp = (struct net_local *)dev->priv;
5288 if(rsv->svl != S_LOCAL_RING_NUMBER)
5289 return (E_SUB_VECTOR_LENGTH_ERROR);
5291 if(tp->ptr_local_ring_num)
5292 *(__u16 *)(tp->ptr_local_ring_num)
5293 = (rsv->svv[0] << 8 | rsv->svv[1]);
5295 return (POSITIVE_ACK);
5298 static unsigned short smctr_set_ctrl_attention(struct net_device *dev)
5300 struct net_local *tp = (struct net_local *)dev->priv;
5301 int ioaddr = dev->base_addr;
5303 if(tp->bic_type == BIC_585_CHIP)
5304 outb((tp->trc_mask | HWR_CA), ioaddr + HWR);
5305 else
5307 outb((tp->trc_mask | CSR_CA), ioaddr + CSR);
5308 outb(tp->trc_mask, ioaddr + CSR);
5311 return (0);
5314 static void smctr_set_multicast_list(struct net_device *dev)
5316 if(smctr_debug > 10)
5317 printk("%s: smctr_set_multicast_list\n", dev->name);
5319 return;
5322 static int smctr_set_page(struct net_device *dev, __u8 *buf)
5324 struct net_local *tp = (struct net_local *)dev->priv;
5325 __u8 amask;
5326 __u32 tptr;
5328 tptr = (__u32)buf - (__u32)tp->ram_access;
5329 amask = (__u8)((tptr & PR_PAGE_MASK) >> 8);
5330 outb(amask, dev->base_addr + PR);
5332 return (0);
5335 static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv)
5337 int err;
5339 if(rsv->svl != S_PHYSICAL_DROP)
5340 return (E_SUB_VECTOR_LENGTH_ERROR);
5342 smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]);
5343 if((err = smctr_wait_cmd(dev)))
5344 return (err);
5346 return (POSITIVE_ACK);
5349 /* Reset the ring speed to the oposite of what it was. This auto-pilot
5350 * mode requires a complete reset and re-init of the adapter.
5352 static int smctr_set_ring_speed(struct net_device *dev)
5354 struct net_local *tp = (struct net_local *)dev->priv;
5355 int err;
5357 if(tp->media_type == MEDIA_UTP_16)
5358 tp->media_type = MEDIA_UTP_4;
5359 else
5360 tp->media_type = MEDIA_UTP_16;
5362 smctr_enable_16bit(dev);
5364 /* Re-Initialize adapter's internal registers */
5365 smctr_reset_adapter(dev);
5367 if((err = smctr_init_card_real(dev)))
5368 return (err);
5370 smctr_enable_bic_int(dev);
5372 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
5373 return (err);
5375 smctr_disable_16bit(dev);
5377 return (0);
5380 static int smctr_set_rx_look_ahead(struct net_device *dev)
5382 struct net_local *tp = (struct net_local *)dev->priv;
5383 __u16 sword, rword;
5385 if(smctr_debug > 10)
5386 printk("%s: smctr_set_rx_look_ahead_flag\n", dev->name);
5388 tp->adapter_flags &= ~(FORCED_16BIT_MODE);
5389 tp->adapter_flags |= RX_VALID_LOOKAHEAD;
5391 if(tp->adapter_bus == BUS_ISA16_TYPE)
5393 sword = *((__u16 *)(tp->ram_access));
5394 *((__u16 *)(tp->ram_access)) = 0x1234;
5396 smctr_disable_16bit(dev);
5397 rword = *((__u16 *)(tp->ram_access));
5398 smctr_enable_16bit(dev);
5400 if(rword != 0x1234)
5401 tp->adapter_flags |= FORCED_16BIT_MODE;
5403 *((__u16 *)(tp->ram_access)) = sword;
5406 return (0);
5409 static int smctr_set_trc_reset(int ioaddr)
5411 __u8 r;
5413 r = inb(ioaddr + MSR);
5414 outb(MSR_RST | r, ioaddr + MSR);
5416 return (0);
5420 * This function can be called if the adapter is busy or not.
5422 static int smctr_setup_single_cmd(struct net_device *dev,
5423 __u16 command, __u16 subcommand)
5425 struct net_local *tp = (struct net_local *)dev->priv;
5426 unsigned int err;
5428 if(smctr_debug > 10)
5429 printk("%s: smctr_setup_single_cmd\n", dev->name);
5431 if((err = smctr_wait_while_cbusy(dev)))
5432 return (err);
5434 if((err = (unsigned int)smctr_wait_cmd(dev)))
5435 return (err);
5437 tp->acb_head->cmd_done_status = 0;
5438 tp->acb_head->cmd = command;
5439 tp->acb_head->subcmd = subcommand;
5441 err = smctr_issue_resume_acb_cmd(dev);
5443 return (err);
5447 * This function can not be called with the adapter busy.
5449 static int smctr_setup_single_cmd_w_data(struct net_device *dev,
5450 __u16 command, __u16 subcommand)
5452 struct net_local *tp = (struct net_local *)dev->priv;
5454 tp->acb_head->cmd_done_status = 0;
5455 tp->acb_head->cmd = command;
5456 tp->acb_head->subcmd = subcommand;
5457 tp->acb_head->data_offset_lo
5458 = (__u16)TRC_POINTER(tp->misc_command_data);
5460 return(smctr_issue_resume_acb_cmd(dev));
5463 static char *smctr_malloc(struct net_device *dev, __u16 size)
5465 struct net_local *tp = (struct net_local *)dev->priv;
5466 char *m;
5468 m = (char *)(tp->ram_access + tp->sh_mem_used);
5469 tp->sh_mem_used += (__u32)size;
5471 return (m);
5474 static int smctr_status_chg(struct net_device *dev)
5476 struct net_local *tp = (struct net_local *)dev->priv;
5478 if(smctr_debug > 10)
5479 printk("%s: smctr_status_chg\n", dev->name);
5481 switch(tp->status)
5483 case OPEN:
5484 break;
5486 case CLOSED:
5487 break;
5489 /* Interrupt driven open() completion. XXX */
5490 case INITIALIZED:
5491 tp->group_address_0 = 0;
5492 tp->group_address[0] = 0;
5493 tp->group_address[1] = 0;
5494 tp->functional_address_0 = 0;
5495 tp->functional_address[0] = 0;
5496 tp->functional_address[1] = 0;
5497 smctr_open_tr(dev);
5498 break;
5500 default:
5501 printk(KERN_INFO "%s: status change unknown %x\n",
5502 dev->name, tp->status);
5503 break;
5506 return (0);
5509 static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
5510 __u16 queue)
5512 struct net_local *tp = (struct net_local *)dev->priv;
5513 int err = 0;
5515 if(smctr_debug > 10)
5516 printk("%s: smctr_trc_send_packet\n", dev->name);
5518 fcb->info = FCB_CHAIN_END | FCB_ENABLE_TFS;
5519 if(tp->num_tx_fcbs[queue] != 1)
5520 fcb->back_ptr->info = FCB_INTERRUPT_ENABLE | FCB_ENABLE_TFS;
5522 if(tp->tx_queue_status[queue] == NOT_TRANSMITING)
5524 tp->tx_queue_status[queue] = TRANSMITING;
5525 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
5528 return (err);
5531 static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue)
5533 struct net_local *tp = (struct net_local *)dev->priv;
5534 __u16 status, err = 0;
5535 int cstatus;
5537 if(smctr_debug > 10)
5538 printk("%s: smctr_tx_complete\n", dev->name);
5540 while((status = tp->tx_fcb_end[queue]->frame_status) != SUCCESS)
5542 if(status & 0x7e00 )
5544 err = HARDWARE_FAILED;
5545 break;
5548 if((err = smctr_update_tx_chain(dev, tp->tx_fcb_end[queue],
5549 queue)) != SUCCESS)
5550 break;
5552 smctr_disable_16bit(dev);
5554 if(tp->mode_bits & UMAC)
5556 if(!(status & (FCB_TX_STATUS_AR1 | FCB_TX_STATUS_AR2)))
5557 cstatus = NO_SUCH_DESTINATION;
5558 else
5560 if(!(status & (FCB_TX_STATUS_CR1 | FCB_TX_STATUS_CR2)))
5561 cstatus = DEST_OUT_OF_RESOURCES;
5562 else
5564 if(status & FCB_TX_STATUS_E)
5565 cstatus = MAX_COLLISIONS;
5566 else
5567 cstatus = SUCCESS;
5571 else
5572 cstatus = SUCCESS;
5574 if(queue == BUG_QUEUE)
5575 err = SUCCESS;
5577 smctr_enable_16bit(dev);
5578 if(err != SUCCESS)
5579 break;
5582 return (err);
5585 static unsigned short smctr_tx_move_frame(struct net_device *dev,
5586 struct sk_buff *skb, __u8 *pbuff, unsigned int bytes)
5588 struct net_local *tp = (struct net_local *)dev->priv;
5589 unsigned int ram_usable;
5590 __u32 flen, len, offset = 0;
5591 __u8 *frag, *page;
5593 if(smctr_debug > 10)
5594 printk("%s: smctr_tx_move_frame\n", dev->name);
5596 ram_usable = ((unsigned int)tp->ram_usable) << 10;
5597 frag = skb->data;
5598 flen = skb->len;
5600 while(flen > 0 && bytes > 0)
5602 smctr_set_page(dev, pbuff);
5604 offset = SMC_PAGE_OFFSET(pbuff);
5606 if(offset + flen > ram_usable)
5607 len = ram_usable - offset;
5608 else
5609 len = flen;
5611 if(len > bytes)
5612 len = bytes;
5614 page = (char *) (offset + tp->ram_access);
5615 memcpy(page, frag, len);
5617 flen -=len;
5618 bytes -= len;
5619 frag += len;
5620 pbuff += len;
5623 return (0);
5626 /* Update the error statistic counters for this adapter. */
5627 static int smctr_update_err_stats(struct net_device *dev)
5629 struct net_local *tp = (struct net_local *)dev->priv;
5630 struct tr_statistics *tstat = &tp->MacStat;
5632 if(tstat->internal_errors)
5633 tstat->internal_errors
5634 += *(tp->misc_command_data + 0) & 0x00ff;
5636 if(tstat->line_errors)
5637 tstat->line_errors += *(tp->misc_command_data + 0) >> 8;
5639 if(tstat->A_C_errors)
5640 tstat->A_C_errors += *(tp->misc_command_data + 1) & 0x00ff;
5642 if(tstat->burst_errors)
5643 tstat->burst_errors += *(tp->misc_command_data + 1) >> 8;
5645 if(tstat->abort_delimiters)
5646 tstat->abort_delimiters += *(tp->misc_command_data + 2) >> 8;
5648 if(tstat->recv_congest_count)
5649 tstat->recv_congest_count
5650 += *(tp->misc_command_data + 3) & 0x00ff;
5652 if(tstat->lost_frames)
5653 tstat->lost_frames
5654 += *(tp->misc_command_data + 3) >> 8;
5656 if(tstat->frequency_errors)
5657 tstat->frequency_errors += *(tp->misc_command_data + 4) & 0x00ff;
5659 if(tstat->frame_copied_errors)
5660 tstat->frame_copied_errors
5661 += *(tp->misc_command_data + 4) >> 8;
5663 if(tstat->token_errors)
5664 tstat->token_errors += *(tp->misc_command_data + 5) >> 8;
5666 return (0);
5669 static int smctr_update_rx_chain(struct net_device *dev, __u16 queue)
5671 struct net_local *tp = (struct net_local *)dev->priv;
5672 FCBlock *fcb;
5673 BDBlock *bdb;
5674 __u16 size, len;
5676 fcb = tp->rx_fcb_curr[queue];
5677 len = fcb->frame_length;
5679 fcb->frame_status = 0;
5680 fcb->info = FCB_CHAIN_END;
5681 fcb->back_ptr->info = FCB_WARNING;
5683 tp->rx_fcb_curr[queue] = tp->rx_fcb_curr[queue]->next_ptr;
5685 /* update RX BDBs */
5686 size = (len >> RX_BDB_SIZE_SHIFT);
5687 if(len & RX_DATA_BUFFER_SIZE_MASK)
5688 size += sizeof(BDBlock);
5689 size &= (~RX_BDB_SIZE_MASK);
5691 /* check if wrap around */
5692 bdb = (BDBlock *)((__u32)(tp->rx_bdb_curr[queue]) + (__u32)(size));
5693 if((__u32)bdb >= (__u32)tp->rx_bdb_end[queue])
5695 bdb = (BDBlock *)((__u32)(tp->rx_bdb_head[queue])
5696 + (__u32)(bdb) - (__u32)(tp->rx_bdb_end[queue]));
5699 bdb->back_ptr->info = BDB_CHAIN_END;
5700 tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END;
5701 tp->rx_bdb_curr[queue] = bdb;
5703 return (0);
5706 static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
5707 __u16 queue)
5709 struct net_local *tp = (struct net_local *)dev->priv;
5711 if(smctr_debug > 20)
5712 printk("smctr_update_tx_chain\n");
5714 if(tp->num_tx_fcbs_used[queue] <= 0)
5715 return (HARDWARE_FAILED);
5716 else
5718 if(tp->tx_buff_used[queue] < fcb->memory_alloc)
5720 tp->tx_buff_used[queue] = 0;
5721 return (HARDWARE_FAILED);
5724 tp->tx_buff_used[queue] -= fcb->memory_alloc;
5726 /* if all transmit buffer are cleared
5727 * need to set the tx_buff_curr[] to tx_buff_head[]
5728 * otherwise, tx buffer will be segregate and cannot
5729 * accomodate and buffer greater than (curr - head) and
5730 * (end - curr) since we do not allow wrap around allocation.
5732 if(tp->tx_buff_used[queue] == 0)
5733 tp->tx_buff_curr[queue] = tp->tx_buff_head[queue];
5735 tp->num_tx_fcbs_used[queue]--;
5736 fcb->frame_status = 0;
5737 tp->tx_fcb_end[queue] = fcb->next_ptr;
5738 netif_wake_queue(dev);
5739 return (0);
5743 static int smctr_wait_cmd(struct net_device *dev)
5745 struct net_local *tp = (struct net_local *)dev->priv;
5746 unsigned int loop_count = 0x20000;
5748 if(smctr_debug > 10)
5749 printk("%s: smctr_wait_cmd\n", dev->name);
5751 while(loop_count)
5753 if(tp->acb_head->cmd_done_status & ACB_COMMAND_DONE)
5754 break;
5755 loop_count--;
5758 if(loop_count == 0)
5759 return(-1);
5761 if(tp->acb_head->cmd_done_status & 0xff)
5762 return(-1);
5764 return (0);
5767 static int smctr_wait_while_cbusy(struct net_device *dev)
5769 struct net_local *tp = (struct net_local *)dev->priv;
5770 unsigned int timeout = 0x20000;
5771 int ioaddr = dev->base_addr;
5772 __u8 r;
5774 if(tp->bic_type == BIC_585_CHIP)
5776 while(timeout)
5778 r = inb(ioaddr + HWR);
5779 if((r & HWR_CBUSY) == 0)
5780 break;
5781 timeout--;
5784 else
5786 while(timeout)
5788 r = inb(ioaddr + CSR);
5789 if((r & CSR_CBUSY) == 0)
5790 break;
5791 timeout--;
5795 if(timeout)
5796 return (0);
5797 else
5798 return (-1);
5801 #ifdef MODULE
5803 static struct net_device* dev_smctr[SMCTR_MAX_ADAPTERS];
5804 static int io[SMCTR_MAX_ADAPTERS];
5805 static int irq[SMCTR_MAX_ADAPTERS];
5806 static int mem[SMCTR_MAX_ADAPTERS];
5808 MODULE_PARM(io, "1-" __MODULE_STRING(SMCTR_MAX_ADAPTERS) "i");
5809 MODULE_PARM(irq, "1-" __MODULE_STRING(SMCTR_MAX_ADAPTERS) "i");
5810 MODULE_PARM(mem, "1-" __MODULE_STRING(SMCTR_MAX_ADAPTERS) "i");
5811 MODULE_PARM(ringspeed, "1-" __MODULE_STRING(SMCTR_MAX_ADAPTERS) "i");
5813 int init_module(void)
5815 int i;
5817 for(i = 0; i < SMCTR_MAX_ADAPTERS; i++)
5819 irq[i] = 0;
5820 mem[i] = 0;
5821 dev_smctr[i] = NULL;
5822 dev_smctr[i] = init_trdev(dev_smctr[i], 0);
5823 if(dev_smctr[i] == NULL)
5824 return (-ENOMEM);
5826 dev_smctr[i]->base_addr = io[i];
5827 dev_smctr[i]->irq = irq[i];
5828 dev_smctr[i]->mem_start = mem[i];
5829 dev_smctr[i]->init = &smctr_probe;
5831 if(register_trdev(dev_smctr[i]) != 0)
5833 kfree(dev_smctr[i]);
5834 dev_smctr[i] = NULL;
5835 if(i == 0)
5837 printk("%s: register_trdev() returned (<0).\n",
5838 cardname);
5839 return (-EIO);
5841 else
5842 return (0);
5846 return (0);
5849 void cleanup_module(void)
5851 int i;
5853 for(i = 0; i < SMCTR_MAX_ADAPTERS; i++)
5855 if(dev_smctr[i])
5857 #ifdef CONFIG_MCA
5858 struct net_local *tp
5859 = (struct net_local *)dev_smctr[i]->priv;
5860 if(tp->slot_num)
5861 mca_mark_as_unused(tp->slot_num);
5862 #endif
5863 unregister_trdev(dev_smctr[i]);
5864 release_region(dev_smctr[i]->base_addr,
5865 SMCTR_IO_EXTENT);
5866 if(dev_smctr[i]->irq)
5867 free_irq(dev_smctr[i]->irq, dev_smctr[i]);
5868 if(dev_smctr[i]->priv)
5869 kfree(dev_smctr[i]->priv);
5870 kfree(dev_smctr[i]);
5871 dev_smctr[i] = NULL;
5875 #endif /* MODULE */