Import 2.3.18pre1
[davej-history.git] / drivers / net / fc / iph5526.c
blob9cad49031d4a97b675e540a4b346b19d4bf94472
1 /**********************************************************************
2 * iph5526.c: IP/SCSI driver for the Interphase 5526 PCI Fibre Channel
3 * Card.
4 * Copyright (C) 1999 Vineet M Abraham <vma@iol.unh.edu>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *********************************************************************/
16 /**********************************************************************
17 Log:
18 Vineet M Abraham
19 02.12.99 Support multiple cards.
20 03.15.99 Added Fabric support.
21 04.04.99 Added N_Port support.
22 04.15.99 Added SCSI support.
23 06.18.99 Added ABTS Protocol.
24 06.24.99 Fixed data corruption when multiple XFER_RDYs are received.
25 07.07.99 Can be loaded as part of the Kernel. Changed semaphores. Added
26 more checks before invalidating SEST entries.
27 07.08.99 Added Broadcast IP stuff and fixed an unicast timeout bug.
28 ***********************************************************************/
29 /* TODO:
30 R_T_TOV set to 15msec in Loop topology. Need to be 100 msec.
31 SMP testing.
32 Fix ADISC Tx before completing FLOGI.
33 */
35 static const char *version =
36 "iph5526.c:v1.0 07.08.99 Vineet Abraham (vma@iol.unh.edu)\n";
38 #include <linux/module.h>
39 #include <linux/config.h>
40 #include <linux/kernel.h>
41 #include <linux/sched.h>
42 #include <linux/errno.h>
43 #include <linux/pci.h>
44 #include <linux/init.h>
45 #include <linux/mm.h>
46 #include <linux/delay.h>
47 #include <linux/skbuff.h>
48 #include <linux/if_arp.h>
49 #include <linux/timer.h>
50 #include <linux/spinlock.h>
51 #include <asm/system.h>
52 #include <asm/io.h>
54 #include <linux/netdevice.h>
55 #include <linux/fcdevice.h> /* had the declarations for init_fcdev among others + includes if_fcdevice.h */
57 #include <linux/blk.h>
58 #include "../../scsi/sd.h"
59 #include "../../scsi/scsi.h"
60 #include "../../scsi/hosts.h"
61 #include "../../fc4/fcp.h"
63 /* driver specific header files */
64 #include "tach.h"
65 #include "tach_structs.h"
66 #include "iph5526_ip.h"
67 #include "iph5526_scsi.h"
68 #include "iph5526_novram.c"
70 #define RUN_AT(x) (jiffies + (x))
72 #define DEBUG_5526_0 0
73 #define DEBUG_5526_1 0
74 #define DEBUG_5526_2 0
76 #if DEBUG_5526_0
77 #define DPRINTK(format, a...) {printk("%s: ", fi->name); \
78 printk(format, ##a); \
79 printk("\n");}
80 #define ENTER(x) {printk("%s: ", fi->name); \
81 printk("iph5526.c : entering %s()\n", x);}
82 #define LEAVE(x) {printk("%s: ", fi->name); \
83 printk("iph5526.c : leaving %s()\n",x);}
85 #else
86 #define DPRINTK(format, a...) {}
87 #define ENTER(x) {}
88 #define LEAVE(x) {}
89 #endif
91 #if DEBUG_5526_1
92 #define DPRINTK1(format, a...) {printk("%s: ", fi->name); \
93 printk(format, ##a); \
94 printk("\n");}
95 #else
96 #define DPRINTK1(format, a...) {}
97 #endif
99 #if DEBUG_5526_2
100 #define DPRINTK2(format, a...) {printk("%s: ", fi->name); \
101 printk(format, ##a); \
102 printk("\n");}
103 #else
104 #define DPRINTK2(format, a...) {}
105 #endif
107 #define T_MSG(format, a...) {printk("%s: ", fi->name); \
108 printk(format, ##a);\
109 printk("\n");}
111 #define ALIGNED_SFS_ADDR(addr) ((((unsigned long)(addr) + (SFS_BUFFER_SIZE - 1)) & ~(SFS_BUFFER_SIZE - 1)) - (unsigned long)(addr))
112 #define ALIGNED_ADDR(addr, len) ((((unsigned long)(addr) + (len - 1)) & ~(len - 1)) - (unsigned long)(addr))
115 #define MAX_FC_CARDS 2
116 static struct fc_info *fc[MAX_FC_CARDS+1];
117 static unsigned int pci_irq_line = 0;
118 static struct {
119 unsigned short vendor_id;
120 unsigned short device_id;
121 char *name;
123 clone_list[] __initdata = {
124 {PCI_VENDOR_ID_INTERPHASE, PCI_DEVICE_ID_INTERPHASE_5526, "Interphase Fibre Channel HBA"},
125 {PCI_VENDOR_ID_INTERPHASE, PCI_DEVICE_ID_INTERPHASE_55x6, "Interphase Fibre Channel HBA"},
126 {0,}
129 static void tachyon_interrupt(int irq, void *dev_id, struct pt_regs *regs);
130 static void tachyon_interrupt_handler(int irq, void* dev_id, struct pt_regs* regs);
132 static int initialize_register_pointers(struct fc_info *fi);
133 void clean_up_memory(struct fc_info *fi);
135 static int tachyon_init(struct fc_info *fi);
136 static int build_queues(struct fc_info *fi);
137 static void build_tachyon_header(struct fc_info *fi, u_int my_id, u_int r_ctl, u_int d_id, u_int type, u_char seq_id, u_char df_ctl, u_short ox_id, u_short rx_id, char *data);
138 static int get_free_header(struct fc_info *fi);
139 static void build_EDB(struct fc_info *fi, char *data, u_short flags, u_short len);
140 static int get_free_EDB(struct fc_info *fi);
141 static void build_ODB(struct fc_info *fi, u_char seq_id, u_int d_id, u_int len, u_int cntl, u_short mtu, u_short ox_id, u_short rx_id, int NW_header, int int_required, u_int frame_class);
142 static void write_to_tachyon_registers(struct fc_info *fi);
143 static void reset_latch(struct fc_info *fi);
144 static void reset_tachyon(struct fc_info *fi, u_int value);
145 static void take_tachyon_offline(struct fc_info *fi);
146 static void read_novram(struct fc_info *fi);
147 static void reset_ichip(struct fc_info *fi);
148 static void update_OCQ_indx(struct fc_info *fi);
149 static void update_IMQ_indx(struct fc_info *fi, int count);
150 static void update_SFSBQ_indx(struct fc_info *fi);
151 static void update_MFSBQ_indx(struct fc_info *fi, int count);
152 static void update_tachyon_header_indx(struct fc_info *fi);
153 static void update_EDB_indx(struct fc_info *fi);
154 static void handle_FM_interrupt(struct fc_info *fi);
155 static void handle_MFS_interrupt(struct fc_info *fi);
156 static void handle_OOO_interrupt(struct fc_info *fi);
157 static void handle_SFS_interrupt(struct fc_info *fi);
158 static void handle_OCI_interrupt(struct fc_info *fi);
159 static void handle_SFS_BUF_WARN_interrupt(struct fc_info *fi);
160 static void handle_MFS_BUF_WARN_interrupt(struct fc_info *fi);
161 static void handle_IMQ_BUF_WARN_interrupt(struct fc_info *fi);
162 static void handle_Unknown_Frame_interrupt(struct fc_info *fi);
163 static void handle_Busied_Frame_interrupt(struct fc_info *fi);
164 static void handle_Bad_SCSI_Frame_interrupt(struct fc_info *fi);
165 static void handle_Inbound_SCSI_Status_interrupt(struct fc_info *fi);
166 static void handle_Inbound_SCSI_Command_interrupt(struct fc_info *fi);
167 static void completion_message_handler(struct fc_info *fi, u_int imq_int_type);
168 static void fill_login_frame(struct fc_info *fi, u_int logi);
170 static int tx_exchange(struct fc_info *fi, char *data, u_int len, u_int r_ctl, u_int type, u_int d_id, u_int mtu, int int_required, u_short ox_id, u_int frame_class);
171 static int tx_sequence(struct fc_info *fi, char *data, u_int len, u_int mtu, u_int d_id, u_short ox_id, u_short rx_id, u_char seq_id, int NW_flag, int int_required, u_int frame_class);
172 static int validate_login(struct fc_info *fi, u_int *base_ptr);
173 static void add_to_address_cache(struct fc_info *fi, u_int *base_ptr);
174 static void remove_from_address_cache(struct fc_info *fi, u_int *data, u_int cmnd_code);
175 static int node_logged_in_prev(struct fc_info *fi, u_int *buff_addr);
176 static int sid_logged_in(struct fc_info *fi, u_int s_id);
177 static struct fc_node_info *look_up_cache(struct fc_info *fi, char *data);
178 static int display_cache(struct fc_info *fi);
180 static void tx_logi(struct fc_info *fi, u_int logi, u_int d_id);
181 static void tx_logi_acc(struct fc_info *fi, u_int logi, u_int d_id, u_short received_ox_id);
182 static void tx_prli(struct fc_info *fi, u_int command_code, u_int d_id, u_short received_ox_id);
183 static void tx_logo(struct fc_info *fi, u_int d_id, u_short received_ox_id);
184 static void tx_adisc(struct fc_info *fi, u_int cmnd_code, u_int d_id, u_short received_ox_id);
185 static void tx_ls_rjt(struct fc_info *fi, u_int d_id, u_short received_ox_id, u_short reason_code, u_short expln_code);
186 static u_int plogi_ok(struct fc_info *fi, u_int *buff_addr, int size);
187 static void tx_acc(struct fc_info *fi, u_int d_id, u_short received_ox_id);
188 static void tx_name_server_req(struct fc_info *fi, u_int req);
189 static void rscn_handler(struct fc_info *fi, u_int node_id);
190 static void tx_scr(struct fc_info *fi);
191 static void scr_timer(unsigned long data);
192 static void explore_fabric(struct fc_info *fi, u_int *buff_addr);
193 static void perform_adisc(struct fc_info *fi);
194 static void local_port_discovery(struct fc_info *fi);
195 static void add_to_ox_id_list(struct fc_info *fi, u_int transaction_id, u_int cmnd_code);
196 static u_int remove_from_ox_id_list(struct fc_info *fi, u_short received_ox_id);
197 static void add_display_cache_timer(struct fc_info *fi);
199 /* Timers... */
200 static void nos_ols_timer(unsigned long data);
201 static void loop_timer(unsigned long data);
202 static void fabric_explore_timer(unsigned long data);
203 static void port_discovery_timer(unsigned long data);
204 static void display_cache_timer(unsigned long data);
206 /* SCSI Stuff */
207 static int add_to_sest(struct fc_info *fi, Scsi_Cmnd *Cmnd, struct fc_node_info *ni);
208 static struct fc_node_info *resolve_target(struct fc_info *fi, u_char target);
209 static void update_FCP_CMND_indx(struct fc_info *fi);
210 static int get_free_SDB(struct fc_info *fi);
211 static void update_SDB_indx(struct fc_info *fi);
212 static void mark_scsi_sid(struct fc_info *fi, u_int *buff_addr, u_char action);
213 static void invalidate_SEST_entry(struct fc_info *fi, u_short received_ox_id);
214 static int abort_exchange(struct fc_info *fi, u_short ox_id);
215 static void flush_tachyon_cache(struct fc_info *fi, u_short ox_id);
216 static int get_scsi_oxid(struct fc_info *fi);
217 static void update_scsi_oxid(struct fc_info *fi);
219 Scsi_Host_Template driver_template = IPH5526_SCSI_FC;
222 #ifdef CONFIG_PCI
223 static int iph5526_probe_pci(struct net_device *dev);
224 #endif
227 int __init iph5526_probe(struct net_device *dev)
229 #ifdef CONFIG_PCI
230 if (pci_present() && (iph5526_probe_pci(dev) == 0))
231 return 0;
232 #endif
233 return -ENODEV;
236 #ifdef CONFIG_PCI
237 static int __init iph5526_probe_pci(struct net_device *dev)
239 #ifndef MODULE
240 struct fc_info *fi;
241 static int count = 0;
242 #endif
243 #ifdef MODULE
244 struct fc_info *fi = (struct fc_info *)dev->priv;
245 #endif
247 #ifndef MODULE
248 if(fc[count] != NULL) {
249 if (dev == NULL) {
250 dev = init_fcdev(NULL, 0);
251 if (dev == NULL)
252 return -ENOMEM;
254 fi = fc[count];
255 #endif
256 fi->dev = dev;
257 dev->base_addr = fi->base_addr;
258 dev->irq = fi->irq;
259 if (dev->priv == NULL)
260 dev->priv = fi;
261 fcdev_init(dev);
262 /* Assign ur MAC address.
264 dev->dev_addr[0] = (fi->g.my_port_name_high & 0x0000FF00) >> 8;
265 dev->dev_addr[1] = fi->g.my_port_name_high;
266 dev->dev_addr[2] = (fi->g.my_port_name_low & 0xFF000000) >> 24;
267 dev->dev_addr[3] = (fi->g.my_port_name_low & 0x00FF0000) >> 16;
268 dev->dev_addr[4] = (fi->g.my_port_name_low & 0x0000FF00) >> 8;
269 dev->dev_addr[5] = fi->g.my_port_name_low;
270 #ifndef MODULE
271 count++;
273 else
274 return -ENODEV;
275 #endif
276 display_cache(fi);
277 return 0;
279 #endif /* CONFIG_PCI */
281 static int __init fcdev_init(struct net_device *dev)
283 dev->open = iph5526_open;
284 dev->stop = iph5526_close;
285 dev->hard_start_xmit = iph5526_send_packet;
286 dev->get_stats = iph5526_get_stats;
287 dev->set_multicast_list = NULL;
288 dev->change_mtu = iph5526_change_mtu;
289 #ifndef MODULE
290 fc_setup(dev);
291 #endif
292 return 0;
295 /* initialize tachyon and take it OnLine */
296 static int tachyon_init(struct fc_info *fi)
298 ENTER("tachyon_init");
299 if (build_queues(fi) == 0) {
300 T_MSG("build_queues() failed");
301 return 0;
304 /* Retrieve your port/node name.
306 read_novram(fi);
308 reset_ichip(fi);
310 reset_tachyon(fi, SOFTWARE_RESET);
312 LEAVE("tachyon_init");
313 return 1;
316 /* Build the 4 Qs - IMQ, OCQ, MFSBQ, SFSBQ */
317 /* Lots of dma_pages needed as Tachyon DMAs almost everything into
318 * host memory.
320 static int build_queues(struct fc_info *fi)
322 int i,j;
323 u_char *addr;
324 ENTER("build_queues");
325 /* Initializing Queue Variables.
327 fi->q.ptr_host_ocq_cons_indx = NULL;
328 fi->q.ptr_host_hpcq_cons_indx = NULL;
329 fi->q.ptr_host_imq_prod_indx = NULL;
331 fi->q.ptr_ocq_base = NULL;
332 fi->q.ocq_len = 0;
333 fi->q.ocq_end = 0;
334 fi->q.ocq_prod_indx = 0;
336 fi->q.ptr_imq_base = NULL;
337 fi->q.imq_len = 0;
338 fi->q.imq_end = 0;
339 fi->q.imq_cons_indx = 0;
340 fi->q.imq_prod_indx = 0;
342 fi->q.ptr_mfsbq_base = NULL;
343 fi->q.mfsbq_len = 0;
344 fi->q.mfsbq_end = 0;
345 fi->q.mfsbq_prod_indx = 0;
346 fi->q.mfsbq_cons_indx = 0;
347 fi->q.mfsbuff_len = 0;
348 fi->q.mfsbuff_end = 0;
349 fi->g.mfs_buffer_count = 0;
351 fi->q.ptr_sfsbq_base = NULL;
352 fi->q.sfsbq_len = 0;
353 fi->q.sfsbq_end = 0;
354 fi->q.sfsbq_prod_indx = 0;
355 fi->q.sfsbq_cons_indx = 0;
356 fi->q.sfsbuff_len = 0;
357 fi->q.sfsbuff_end = 0;
359 fi->q.sdb_indx = 0;
360 fi->q.fcp_cmnd_indx = 0;
362 fi->q.ptr_edb_base = NULL;
363 fi->q.edb_buffer_indx = 0;
364 fi->q.ptr_tachyon_header_base = NULL;
365 fi->q.tachyon_header_indx = 0;
366 fi->node_info_list = NULL;
367 fi->ox_id_list = NULL;
368 fi->g.loop_up = FALSE;
369 fi->g.ptp_up = FALSE;
370 fi->g.link_up = FALSE;
371 fi->g.fabric_present = FALSE;
372 fi->g.n_port_try = FALSE;
373 fi->g.dont_init = FALSE;
374 fi->g.nport_timer_set = FALSE;
375 fi->g.lport_timer_set = FALSE;
376 fi->g.no_of_targets = 0;
377 fi->g.sem = 0;
378 fi->g.perform_adisc = FALSE;
379 fi->g.e_i = 0;
381 /* build OCQ */
382 if ( (fi->q.ptr_ocq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
383 T_MSG("failed to get OCQ page");
384 return 0;
386 /* set up the OCQ structures */
387 for (i = 0; i < OCQ_LENGTH; i++)
388 fi->q.ptr_odb[i] = fi->q.ptr_ocq_base + NO_OF_ENTRIES*i;
390 /* build IMQ */
391 if ( (fi->q.ptr_imq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
392 T_MSG("failed to get IMQ page");
393 return 0;
395 for (i = 0; i < IMQ_LENGTH; i++)
396 fi->q.ptr_imqe[i] = fi->q.ptr_imq_base + NO_OF_ENTRIES*i;
398 /* build MFSBQ */
399 if ( (fi->q.ptr_mfsbq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
400 T_MSG("failed to get MFSBQ page");
401 return 0;
403 memset((char *)fi->q.ptr_mfsbq_base, 0, MFSBQ_LENGTH * 32);
404 /* Allocate one huge chunk of memory... helps while reassembling
405 * frames.
407 if ( (addr = (u_char *)__get_free_pages(GFP_KERNEL, 5) ) == 0) {
408 T_MSG("failed to get MFSBQ page");
409 return 0;
411 /* fill in addresses of empty buffers */
412 for (i = 0; i < MFSBQ_LENGTH; i++) {
413 for (j = 0; j < NO_OF_ENTRIES; j++) {
414 *(fi->q.ptr_mfsbq_base + i*NO_OF_ENTRIES + j) = htonl(virt_to_bus(addr));
415 addr += MFS_BUFFER_SIZE;
419 /* The number of entries in each MFS buffer is 8. There are 8
420 * MFS buffers. That leaves us with 4096-256 bytes. We use them
421 * as temporary space for ELS frames. This is done to make sure that
422 * the addresses are aligned.
424 fi->g.els_buffer[0] = fi->q.ptr_mfsbq_base + MFSBQ_LENGTH*NO_OF_ENTRIES;
425 for (i = 1; i < MAX_PENDING_FRAMES; i++)
426 fi->g.els_buffer[i] = fi->g.els_buffer[i-1] + 64;
428 /* build SFSBQ */
429 if ( (fi->q.ptr_sfsbq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
430 T_MSG("failed to get SFSBQ page");
431 return 0;
433 memset((char *)fi->q.ptr_sfsbq_base, 0, SFSBQ_LENGTH * 32);
434 /* fill in addresses of empty buffers */
435 for (i = 0; i < SFSBQ_LENGTH; i++)
436 for (j = 0; j < NO_OF_ENTRIES; j++){
437 addr = kmalloc(SFS_BUFFER_SIZE*2, GFP_KERNEL);
438 if (addr == NULL){
439 T_MSG("ptr_sfs_buffer : memory not allocated");
440 return 0;
442 else {
443 int offset = ALIGNED_SFS_ADDR(addr);
444 memset((char *)addr, 0, SFS_BUFFER_SIZE);
445 fi->q.ptr_sfs_buffers[i*NO_OF_ENTRIES +j] = (u_int *)addr;
446 addr += offset;
447 *(fi->q.ptr_sfsbq_base + i*NO_OF_ENTRIES + j) = htonl(virt_to_bus(addr));
451 /* The number of entries in each SFS buffer is 8. There are 8
452 * MFS buffers. That leaves us with 4096-256 bytes. We use them
453 * as temporary space for ARP frames. This is done inorder to
454 * support HW_Types of 0x1 and 0x6.
456 fi->g.arp_buffer = (char *)fi->q.ptr_sfsbq_base + SFSBQ_LENGTH*NO_OF_ENTRIES*4;
458 /* build EDB */
459 if ((fi->q.ptr_edb_base = (u_int *)__get_free_pages(GFP_KERNEL, 5) ) == 0) {
460 T_MSG("failed to get EDB page");
461 return 0;
463 for (i = 0; i < EDB_LEN; i++)
464 fi->q.ptr_edb[i] = fi->q.ptr_edb_base + 2*i;
466 /* build SEST */
468 /* OX_IDs range from 0x0 - 0x4FFF.
470 if ((fi->q.ptr_sest_base = (u_int *)__get_free_pages(GFP_KERNEL, 5)) == 0) {
471 T_MSG("failed to get SEST page");
472 return 0;
474 for (i = 0; i < SEST_LENGTH; i++)
475 fi->q.ptr_sest[i] = fi->q.ptr_sest_base + NO_OF_ENTRIES*i;
477 if ((fi->q.ptr_sdb_base = (u_int *)__get_free_pages(GFP_KERNEL, 5)) == 0) {
478 T_MSG("failed to get SDB page");
479 return 0;
481 for (i = 0 ; i < NO_OF_SDB_ENTRIES; i++)
482 fi->q.ptr_sdb_slot[i] = fi->q.ptr_sdb_base + (SDB_SIZE/4)*i;
484 if ((fi->q.ptr_fcp_cmnd_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
485 T_MSG("failed to get FCP_CMND page");
486 return 0;
488 for (i = 0; i < NO_OF_FCP_CMNDS; i++)
489 fi->q.ptr_fcp_cmnd[i] = fi->q.ptr_fcp_cmnd_base + NO_OF_ENTRIES*i;
491 /* Allocate space for Tachyon Header as well...
493 if ((fi->q.ptr_tachyon_header_base = (u_int *)__get_free_pages(GFP_KERNEL, 0) ) == 0) {
494 T_MSG("failed to get tachyon_header page");
495 return 0;
497 for (i = 0; i < NO_OF_TACH_HEADERS; i++)
498 fi->q.ptr_tachyon_header[i] = fi->q.ptr_tachyon_header_base + 16*i;
500 /* Allocate memory for indices.
501 * Indices should be aligned on 32 byte boundries.
503 fi->q.host_ocq_cons_indx = kmalloc(2*32, GFP_KERNEL);
504 if (fi->q.host_ocq_cons_indx == NULL){
505 T_MSG("fi->q.host_ocq_cons_indx : memory not allocated");
506 return 0;
508 fi->q.ptr_host_ocq_cons_indx = fi->q.host_ocq_cons_indx;
509 if ((u_long)(fi->q.host_ocq_cons_indx) % 32)
510 fi->q.host_ocq_cons_indx++;
512 fi->q.host_hpcq_cons_indx = kmalloc(2*32, GFP_KERNEL);
513 if (fi->q.host_hpcq_cons_indx == NULL){
514 T_MSG("fi->q.host_hpcq_cons_indx : memory not allocated");
515 return 0;
517 fi->q.ptr_host_hpcq_cons_indx= fi->q.host_hpcq_cons_indx;
518 if ((u_long)(fi->q.host_hpcq_cons_indx) % 32)
519 fi->q.host_hpcq_cons_indx++;
521 fi->q.host_imq_prod_indx = kmalloc(2*32, GFP_KERNEL);
522 if (fi->q.host_imq_prod_indx == NULL){
523 T_MSG("fi->q.host_imq_prod_indx : memory not allocated");
524 return 0;
526 fi->q.ptr_host_imq_prod_indx = fi->q.host_imq_prod_indx;
527 if ((u_long)(fi->q.host_imq_prod_indx) % 32)
528 fi->q.host_imq_prod_indx++;
530 LEAVE("build_queues");
531 return 1;
535 static void write_to_tachyon_registers(struct fc_info *fi)
537 u_int bus_addr, bus_indx_addr, i;
539 ENTER("write_to_tachyon_registers");
541 /* Clear Queues each time Tachyon is reset */
542 memset((char *)fi->q.ptr_ocq_base, 0, OCQ_LENGTH * 32);
543 memset((char *)fi->q.ptr_imq_base, 0, IMQ_LENGTH * 32);
544 memset((char *)fi->q.ptr_edb_base, 0, EDB_LEN * 8);
545 memset((char *)fi->q.ptr_sest_base, 0, SEST_LENGTH * 32);
546 memset((char *)fi->q.ptr_sdb_base, 0, NO_OF_SDB_ENTRIES * SDB_SIZE);
547 memset((char *)fi->q.ptr_tachyon_header_base, 0xFF, NO_OF_TACH_HEADERS * TACH_HEADER_SIZE);
548 for (i = 0; i < SEST_LENGTH; i++)
549 fi->q.free_scsi_oxid[i] = OXID_AVAILABLE;
550 for (i = 0; i < NO_OF_SDB_ENTRIES; i++)
551 fi->q.sdb_slot_status[i] = SDB_FREE;
553 take_tachyon_offline(fi);
554 writel(readl(fi->t_r.ptr_tach_config_reg) | SCSI_ENABLE | WRITE_STREAM_SIZE | READ_STREAM_SIZE | PARITY_EVEN | OOO_REASSEMBLY_DISABLE, fi->t_r.ptr_tach_config_reg);
556 /* Write OCQ registers */
557 fi->q.ocq_prod_indx = 0;
558 *(fi->q.host_ocq_cons_indx) = 0;
560 /* The Tachyon needs to be passed the "real" address */
561 bus_addr = virt_to_bus(fi->q.ptr_ocq_base);
562 writel(bus_addr, fi->t_r.ptr_ocq_base_reg);
563 writel(OCQ_LENGTH - 1, fi->t_r. ptr_ocq_len_reg);
564 bus_indx_addr = virt_to_bus(fi->q.host_ocq_cons_indx);
565 writel(bus_indx_addr, fi->t_r.ptr_ocq_cons_indx_reg);
567 /* Write IMQ registers */
568 fi->q.imq_cons_indx = 0;
569 *(fi->q.host_imq_prod_indx) = 0;
570 bus_addr = virt_to_bus(fi->q.ptr_imq_base);
571 writel(bus_addr, fi->t_r.ptr_imq_base_reg);
572 writel(IMQ_LENGTH - 1, fi->t_r.ptr_imq_len_reg);
573 bus_indx_addr = virt_to_bus(fi->q.host_imq_prod_indx);
574 writel(bus_indx_addr, fi->t_r.ptr_imq_prod_indx_reg);
576 /* Write MFSBQ registers */
577 fi->q.mfsbq_prod_indx = MFSBQ_LENGTH - 1;
578 fi->q.mfsbuff_end = MFS_BUFFER_SIZE - 1;
579 fi->q.mfsbq_cons_indx = 0;
580 bus_addr = virt_to_bus(fi->q.ptr_mfsbq_base);
581 writel(bus_addr, fi->t_r.ptr_mfsbq_base_reg);
582 writel(MFSBQ_LENGTH - 1, fi->t_r.ptr_mfsbq_len_reg);
583 writel(fi->q.mfsbuff_end, fi->t_r.ptr_mfsbuff_len_reg);
584 /* Do this last as tachyon will prefetch the
585 * first entry as soon as we write to it.
587 writel(fi->q.mfsbq_prod_indx, fi->t_r.ptr_mfsbq_prod_reg);
589 /* Write SFSBQ registers */
590 fi->q.sfsbq_prod_indx = SFSBQ_LENGTH - 1;
591 fi->q.sfsbuff_end = SFS_BUFFER_SIZE - 1;
592 fi->q.sfsbq_cons_indx = 0;
593 bus_addr = virt_to_bus(fi->q.ptr_sfsbq_base);
594 writel(bus_addr, fi->t_r.ptr_sfsbq_base_reg);
595 writel(SFSBQ_LENGTH - 1, fi->t_r.ptr_sfsbq_len_reg);
596 writel(fi->q.sfsbuff_end, fi->t_r.ptr_sfsbuff_len_reg);
597 /* Do this last as tachyon will prefetch the first
598 * entry as soon as we write to it.
600 writel(fi->q.sfsbq_prod_indx, fi->t_r.ptr_sfsbq_prod_reg);
602 /* Write SEST registers */
603 bus_addr = virt_to_bus(fi->q.ptr_sest_base);
604 writel(bus_addr, fi->t_r.ptr_sest_base_reg);
605 writel(SEST_LENGTH - 1, fi->t_r.ptr_sest_len_reg);
606 /* the last 2 bits _should_ be 1 */
607 writel(SEST_BUFFER_SIZE - 1, fi->t_r.ptr_scsibuff_len_reg);
609 /* write AL_TIME & E_D_TOV into the registers */
610 writel(TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
611 /* Tell Tachyon to pick a Soft Assigned AL_PA */
612 writel(LOOP_INIT_SOFT_ADDRESS, fi->t_r.ptr_fm_config_reg);
614 /* Read the WWN from EEPROM . But, for now we assign it here. */
615 writel(WORLD_WIDE_NAME_LOW, fi->t_r.ptr_fm_wwn_low_reg);
616 writel(WORLD_WIDE_NAME_HIGH, fi->t_r.ptr_fm_wwn_hi_reg);
618 DPRINTK1("TACHYON initializing as L_Port...\n");
619 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
621 LEAVE("write_to_tachyon_registers");
625 static void tachyon_interrupt(int irq, void* dev_id, struct pt_regs* regs)
627 struct Scsi_Host *host = dev_id;
628 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
629 struct fc_info *fi = hostdata->fi;
630 u_long flags;
631 spin_lock_irqsave(&fi->fc_lock, flags);
632 tachyon_interrupt_handler(irq, dev_id, regs);
633 spin_unlock_irqrestore(&fi->fc_lock, flags);
636 static void tachyon_interrupt_handler(int irq, void* dev_id, struct pt_regs* regs)
638 struct Scsi_Host *host = dev_id;
639 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
640 struct fc_info *fi = hostdata->fi;
641 u_int *ptr_imq_entry;
642 u_int imq_int_type, current_IMQ_index = 0, prev_IMQ_index;
643 int index, no_of_entries = 0;
645 DPRINTK("\n");
646 ENTER("tachyon_interrupt");
647 if (fi->q.host_imq_prod_indx != NULL) {
648 current_IMQ_index = ntohl(*(fi->q.host_imq_prod_indx));
650 else {
651 /* _Should not_ happen */
652 T_MSG("IMQ_indx NULL. DISABLING INTERRUPTS!!!\n");
653 writel(0x0, fi->i_r.ptr_ichip_hw_control_reg);
656 if (current_IMQ_index > fi->q.imq_cons_indx)
657 no_of_entries = current_IMQ_index - fi->q.imq_cons_indx;
658 else
659 if (current_IMQ_index < fi->q.imq_cons_indx)
660 no_of_entries = IMQ_LENGTH - (fi->q.imq_cons_indx - current_IMQ_index);
662 if (no_of_entries == 0) {
663 u_int ichip_status;
664 ichip_status = readl(fi->i_r.ptr_ichip_hw_status_reg);
665 if (ichip_status & 0x20) {
666 /* Should _never_ happen. Might require a hard reset */
667 T_MSG("Too bad... PCI Bus Error. Resetting (i)chip");
668 reset_ichip(fi);
669 T_MSG("DISABLING INTERRUPTS!!!\n");
670 writel(0x0, fi->i_r.ptr_ichip_hw_control_reg);
674 prev_IMQ_index = current_IMQ_index;
675 for (index = 0; index < no_of_entries; index++) {
676 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
677 imq_int_type = ntohl(*ptr_imq_entry);
679 completion_message_handler(fi, imq_int_type);
680 if ((fi->g.link_up == FALSE) && ((imq_int_type == MFS_BUF_WARN) || (imq_int_type == SFS_BUF_WARN) || (imq_int_type == IMQ_BUF_WARN)))
681 break;
682 update_IMQ_indx(fi, 1);
684 /* Check for more entries */
685 current_IMQ_index = ntohl(*(fi->q.host_imq_prod_indx));
686 if (current_IMQ_index != prev_IMQ_index) {
687 no_of_entries++;
688 prev_IMQ_index = current_IMQ_index;
690 } /*end of for loop*/
691 return;
692 LEAVE("tachyon_interrupt");
696 static void handle_SFS_BUF_WARN_interrupt(struct fc_info *fi)
698 int i;
699 ENTER("handle_SFS_BUF_WARN_interrupt");
700 if (fi->g.link_up == FALSE) {
701 reset_tachyon(fi, SOFTWARE_RESET);
702 return;
704 /* Free up all but one entry in the Q.
706 for (i = 0; i < ((SFSBQ_LENGTH - 1) * NO_OF_ENTRIES); i++) {
707 handle_SFS_interrupt(fi);
708 update_IMQ_indx(fi, 1);
710 LEAVE("handle_SFS_BUF_WARN_interrupt");
713 /* Untested_Code_Begin */
714 static void handle_MFS_BUF_WARN_interrupt(struct fc_info *fi)
716 int i;
717 ENTER("handle_MFS_BUF_WARN_interrupt");
718 if (fi->g.link_up == FALSE) {
719 reset_tachyon(fi, SOFTWARE_RESET);
720 return;
722 /* FIXME: freeing up 8 entries.
724 for (i = 0; i < NO_OF_ENTRIES; i++) {
725 handle_MFS_interrupt(fi);
726 update_IMQ_indx(fi, 1);
728 LEAVE("handle_MFS_BUF_WARN_interrupt");
730 /*Untested_Code_End */
732 static void handle_IMQ_BUF_WARN_interrupt(struct fc_info *fi)
734 u_int *ptr_imq_entry;
735 u_int imq_int_type, current_IMQ_index = 0, temp_imq_cons_indx;
736 int index, no_of_entries = 0;
738 ENTER("handle_IMQ_BUF_WARN_interrupt");
739 if (fi->g.link_up == FALSE) {
740 reset_tachyon(fi, SOFTWARE_RESET);
741 return;
743 current_IMQ_index = ntohl(*(fi->q.host_imq_prod_indx));
745 if (current_IMQ_index > fi->q.imq_cons_indx)
746 no_of_entries = current_IMQ_index - fi->q.imq_cons_indx;
747 else
748 if (current_IMQ_index < fi->q.imq_cons_indx)
749 no_of_entries = IMQ_LENGTH - (fi->q.imq_cons_indx - current_IMQ_index);
750 /* We dont want to look at the same IMQ entry again.
752 temp_imq_cons_indx = fi->q.imq_cons_indx + 1;
753 if (no_of_entries != 0)
754 no_of_entries -= 1;
755 for (index = 0; index < no_of_entries; index++) {
756 ptr_imq_entry = fi->q.ptr_imqe[temp_imq_cons_indx];
757 imq_int_type = ntohl(*ptr_imq_entry);
758 if (imq_int_type != IMQ_BUF_WARN)
759 completion_message_handler(fi, imq_int_type);
760 temp_imq_cons_indx++;
761 if (temp_imq_cons_indx == IMQ_LENGTH)
762 temp_imq_cons_indx = 0;
763 } /*end of for loop*/
764 if (no_of_entries != 0)
765 update_IMQ_indx(fi, no_of_entries);
766 LEAVE("handle_IMQ_BUF_WARN_interrupt");
769 static void completion_message_handler(struct fc_info *fi, u_int imq_int_type)
771 switch(imq_int_type) {
772 case OUTBOUND_COMPLETION:
773 DPRINTK("OUTBOUND_COMPLETION message received");
774 break;
775 case OUTBOUND_COMPLETION_I:
776 DPRINTK("OUTBOUND_COMPLETION_I message received");
777 handle_OCI_interrupt(fi);
778 break;
779 case OUT_HI_PRI_COMPLETION:
780 DPRINTK("OUT_HI_PRI_COMPLETION message received");
781 break;
782 case OUT_HI_PRI_COMPLETION_I:
783 DPRINTK("OUT_HI_PRI_COMPLETION_I message received");
784 break;
785 case INBOUND_MFS_COMPLETION:
786 DPRINTK("INBOUND_MFS_COMPLETION message received");
787 handle_MFS_interrupt(fi);
788 break;
789 case INBOUND_OOO_COMPLETION:
790 DPRINTK("INBOUND_OOO_COMPLETION message received");
791 handle_OOO_interrupt(fi);
792 break;
793 case INBOUND_SFS_COMPLETION:
794 DPRINTK("INBOUND_SFS_COMPLETION message received");
795 handle_SFS_interrupt(fi);
796 break;
797 case INBOUND_UNKNOWN_FRAME_I:
798 DPRINTK("INBOUND_UNKNOWN_FRAME message received");
799 handle_Unknown_Frame_interrupt(fi);
800 break;
801 case INBOUND_BUSIED_FRAME:
802 DPRINTK("INBOUND_BUSIED_FRAME message received");
803 handle_Busied_Frame_interrupt(fi);
804 break;
805 case FRAME_MGR_INTERRUPT:
806 DPRINTK("FRAME_MGR_INTERRUPT message received");
807 handle_FM_interrupt(fi);
808 break;
809 case READ_STATUS:
810 DPRINTK("READ_STATUS message received");
811 break;
812 case SFS_BUF_WARN:
813 DPRINTK("SFS_BUF_WARN message received");
814 handle_SFS_BUF_WARN_interrupt(fi);
815 break;
816 case MFS_BUF_WARN:
817 DPRINTK("MFS_BUF_WARN message received");
818 handle_MFS_BUF_WARN_interrupt(fi);
819 break;
820 case IMQ_BUF_WARN:
821 DPRINTK("IMQ_BUF_WARN message received");
822 handle_IMQ_BUF_WARN_interrupt(fi);
823 break;
824 case INBOUND_C1_TIMEOUT:
825 DPRINTK("INBOUND_C1_TIMEOUT message received");
826 break;
827 case BAD_SCSI_FRAME:
828 DPRINTK("BAD_SCSI_FRAME message received");
829 handle_Bad_SCSI_Frame_interrupt(fi);
830 break;
831 case INB_SCSI_STATUS_COMPLETION:
832 DPRINTK("INB_SCSI_STATUS_COMPL message received");
833 handle_Inbound_SCSI_Status_interrupt(fi);
834 break;
835 case INBOUND_SCSI_COMMAND:
836 DPRINTK("INBOUND_SCSI_COMMAND message received");
837 handle_Inbound_SCSI_Command_interrupt(fi);
838 break;
839 case INBOUND_SCSI_DATA_COMPLETION:
840 DPRINTK("INBOUND_SCSI_DATA message received");
841 /* Only for targets */
842 break;
843 default:
844 T_MSG("DEFAULT message received, type = %x", imq_int_type);
845 return;
847 reset_latch(fi);
850 static void handle_OCI_interrupt(struct fc_info *fi)
852 u_int *ptr_imq_entry;
853 u_long transaction_id = 0;
854 unsigned short status, seq_count, transmitted_ox_id;
855 struct Scsi_Host *host = fi->host;
856 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
857 Scsi_Cmnd *Cmnd;
858 u_int tag;
860 ENTER("handle_OCI_interrupt");
861 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
862 transaction_id = ntohl(*(ptr_imq_entry + 1));
863 status = ntohl(*(ptr_imq_entry + 2)) >> 16;
864 seq_count = ntohl(*(ptr_imq_entry + 3));
865 DPRINTK("transaction_id= %x", (u_int)transaction_id);
866 tag = transaction_id & 0xFFFF0000;
867 transmitted_ox_id = transaction_id;
869 /* The INT could be either due to TIME_OUT | BAD_ALPA.
870 * But we check only for TimeOuts. Bad AL_PA will
871 * caught by FM_interrupt handler.
874 if ((status == OCM_TIMEOUT_OR_BAD_ALPA) && (!fi->g.port_discovery) && (!fi->g.perform_adisc)){
875 DPRINTK("Frame TimeOut on OX_ID = %x", (u_int)transaction_id);
877 /* Is it a SCSI frame that is timing out ? Not a very good check...
879 if ((transmitted_ox_id <= MAX_SCSI_OXID) && ((tag == FC_SCSI_BAD_TARGET) || (tag < 0x00FF0000))) {
880 /* If it is a Bad AL_PA, we report it as BAD_TARGET.
881 * Else, we allow the command to time-out. A Link
882 * re-initialization could be taking place.
884 if (tag == FC_SCSI_BAD_TARGET) {
885 Cmnd = hostdata->cmnd_handler[transmitted_ox_id & MAX_SCSI_XID];
886 hostdata->cmnd_handler[transmitted_ox_id & MAX_SCSI_XID] = NULL;
887 if (Cmnd != NULL) {
888 Cmnd->result = DID_BAD_TARGET << 16;
889 (*Cmnd->scsi_done) (Cmnd);
891 else
892 T_MSG("NULL Command out of handler!");
893 } /* if Bad Target */
894 else {
895 u_char missing_target = tag >> 16;
896 struct fc_node_info *q = fi->node_info_list;
897 /* A Node that we thought was logged in has gone
898 * away. We are the optimistic kind and we keep
899 * hoping that our dear little Target will come back
900 * to us. For now we log him out.
902 DPRINTK2("Missing Target = %d", missing_target);
903 while (q != NULL) {
904 if (q->target_id == missing_target) {
905 T_MSG("Target %d Logged out", q->target_id);
906 q->login = LOGIN_ATTEMPTED;
907 if (fi->num_nodes > 0)
908 fi->num_nodes--;
909 tx_logi(fi, ELS_PLOGI, q->d_id);
910 break;
912 else
913 q = q->next;
916 } /* End of SCSI frame timing out. */
917 else {
918 if (seq_count > 1) {
919 /* An IP frame was transmitted to a Bad AL_PA. Free up
920 * the skb used.
922 dev_kfree_skb((struct sk_buff *)(bus_to_virt(transaction_id)));
924 } /* End of IP frame timing out. */
925 } /* End of frame timing out. */
926 else {
927 /* Frame was transmitted successfully. Check if it was an ELS
928 * frame or an IP frame or a Bad_Target_Notification frame (in
929 * case of a ptp_link). Ugly!
931 if ((status == 0) && (seq_count == 0)) {
932 u_int tag = transaction_id & 0xFFFF0000;
933 /* Continue with port discovery after an ELS is successfully
934 * transmitted. (status == 0).
936 DPRINTK("tag = %x", tag);
937 switch(tag) {
938 case ELS_FLOGI:
939 /* Letz use the Name Server instead */
940 fi->g.explore_fabric = TRUE;
941 fi->g.port_discovery = FALSE;
942 fi->g.alpa_list_index = MAX_NODES;
943 add_to_ox_id_list(fi, transaction_id, tag);
944 break;
945 case ELS_PLOGI:
946 if (fi->g.fabric_present && (fi->g.name_server == FALSE))
947 add_to_ox_id_list(fi,transaction_id,ELS_NS_PLOGI);
948 else
949 add_to_ox_id_list(fi, transaction_id, tag);
950 break;
951 case FC_SCSI_BAD_TARGET:
952 Cmnd = hostdata->cmnd_handler[transmitted_ox_id & MAX_SCSI_XID];
953 hostdata->cmnd_handler[transmitted_ox_id & MAX_SCSI_XID] = NULL;
954 if (Cmnd != NULL) {
955 Cmnd->result = DID_BAD_TARGET << 16;
956 (*Cmnd->scsi_done) (Cmnd);
958 else
959 T_MSG("NULL Command out of handler!");
960 break;
961 default:
962 add_to_ox_id_list(fi, transaction_id, tag);
965 if (fi->g.alpa_list_index >= MAX_NODES) {
966 if (fi->g.port_discovery == TRUE) {
967 fi->g.port_discovery = FALSE;
968 add_display_cache_timer(fi);
970 fi->g.alpa_list_index = MAX_NODES;
972 if (fi->g.port_discovery == TRUE)
973 local_port_discovery(fi);
975 else {
976 /* An IP frame has been successfully transmitted.
977 * Free the skb that was used for this IP frame.
979 if ((status == 0) && (seq_count > 1)) {
980 dev_kfree_skb((struct sk_buff *)(bus_to_virt(transaction_id)));
984 LEAVE("handle_OCI_interrupt");
987 /* Right now we discard OOO frames */
988 static void handle_OOO_interrupt(struct fc_info *fi)
990 u_int *ptr_imq_entry;
991 int queue_indx, offset, payload_size;
992 int no_of_buffers = 1; /* header is in a separate buffer */
993 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
994 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
995 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
996 queue_indx = queue_indx >> 16;
997 payload_size = ntohl(*(ptr_imq_entry + 2)) - TACHYON_HEADER_LEN;
998 /* Calculate total number of buffers */
999 no_of_buffers += payload_size / MFS_BUFFER_SIZE;
1000 if (payload_size % MFS_BUFFER_SIZE)
1001 no_of_buffers++;
1003 /* provide Tachyon will another set of buffers */
1004 fi->g.mfs_buffer_count += no_of_buffers;
1005 if (fi->g.mfs_buffer_count >= NO_OF_ENTRIES) {
1006 int count = fi->g.mfs_buffer_count / NO_OF_ENTRIES;
1007 fi->g.mfs_buffer_count -= NO_OF_ENTRIES * count;
1008 update_MFSBQ_indx(fi, count);
1012 static void handle_MFS_interrupt(struct fc_info *fi)
1014 u_int *ptr_imq_entry, *buff_addr;
1015 u_int type_of_frame, s_id;
1016 int queue_indx, offset, payload_size, starting_indx, starting_offset;
1017 u_short received_ox_id;
1018 int no_of_buffers = 1; /* header is in a separate buffer */
1019 struct sk_buff *skb;
1020 int wrap_around = FALSE, no_of_wrap_buffs = NO_OF_ENTRIES - 1;
1021 ENTER("handle_MFS_interrupt");
1022 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1023 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1024 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1025 queue_indx = queue_indx >> 16;
1026 DPRINTK("queue_indx = %d, offset = %d\n", queue_indx, offset);
1027 payload_size = ntohl(*(ptr_imq_entry + 2)) - TACHYON_HEADER_LEN;
1028 DPRINTK("payload_size = %d", payload_size);
1029 /* Calculate total number of buffers */
1030 no_of_buffers += payload_size / MFS_BUFFER_SIZE;
1031 if (payload_size % MFS_BUFFER_SIZE)
1032 no_of_buffers++;
1033 DPRINTK("no_of_buffers = %d", no_of_buffers);
1035 if ((no_of_buffers - 1) <= offset) {
1036 starting_offset = offset - (no_of_buffers - 1);
1037 starting_indx = queue_indx;
1039 else {
1040 int temp = no_of_buffers - (offset + 1);
1041 int no_of_queues = temp / NO_OF_ENTRIES;
1042 starting_offset = temp % NO_OF_ENTRIES;
1043 if (starting_offset != 0) {
1044 no_of_wrap_buffs = starting_offset - 1; //exclude header
1045 starting_offset = NO_OF_ENTRIES - starting_offset;
1046 no_of_queues++;
1048 starting_indx = queue_indx - no_of_queues;
1049 if (starting_indx < 0) {
1050 no_of_wrap_buffs -= (starting_indx + 1) * NO_OF_ENTRIES;
1051 starting_indx = MFSBQ_LENGTH + starting_indx;
1052 wrap_around = TRUE;
1056 DPRINTK("starting_indx = %d, starting offset = %d no_of_wrap_buffs = %d\n", starting_indx, starting_offset, no_of_wrap_buffs);
1057 /* Get Tachyon Header from first buffer */
1058 buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_mfsbq_base + starting_indx*NO_OF_ENTRIES + starting_offset)));
1061 /* extract Type of Frame */
1062 type_of_frame = (u_int)ntohl(*(buff_addr + 4)) & 0xFF000000;
1063 s_id = (u_int)ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
1064 received_ox_id = ntohl(*(buff_addr + 6)) >> 16;
1065 buff_addr += MFS_BUFFER_SIZE/4;
1066 DPRINTK("type_of_frame = %x, s_id = %x, ox_id = %x", type_of_frame, s_id, received_ox_id);
1068 switch(type_of_frame) {
1069 case TYPE_LLC_SNAP:
1070 skb = dev_alloc_skb(payload_size);
1071 if (skb == NULL) {
1072 printk(KERN_NOTICE "%s: In handle_MFS_interrupt() Memory squeeze, dropping packet.\n", fi->name);
1073 fi->fc_stats.rx_dropped++;
1074 fi->g.mfs_buffer_count += no_of_buffers;
1075 if (fi->g.mfs_buffer_count >= NO_OF_ENTRIES) {
1076 int count = fi->g.mfs_buffer_count / NO_OF_ENTRIES;
1077 fi->g.mfs_buffer_count -= NO_OF_ENTRIES * count;
1078 update_MFSBQ_indx(fi, count);
1079 return;
1082 if (wrap_around) {
1083 int wrap_size = no_of_wrap_buffs * MFS_BUFFER_SIZE;
1084 int tail_size = payload_size - wrap_size;
1085 DPRINTK("wrap_size = %d, tail_size = %d\n", wrap_size, tail_size);
1086 if (no_of_wrap_buffs)
1087 memcpy(skb_put(skb, wrap_size), buff_addr, wrap_size);
1088 buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_mfsbq_base)));
1089 memcpy(skb_put(skb, tail_size), buff_addr, tail_size);
1091 else
1092 memcpy(skb_put(skb, payload_size), buff_addr, payload_size);
1093 rx_net_mfs_packet(fi, skb);
1094 break;
1095 default:
1096 T_MSG("Unknown Frame Type received. Type = %x", type_of_frame);
1099 /* provide Tachyon will another set of buffers */
1100 fi->g.mfs_buffer_count += no_of_buffers;
1101 if (fi->g.mfs_buffer_count >= NO_OF_ENTRIES) {
1102 int count = fi->g.mfs_buffer_count / NO_OF_ENTRIES;
1103 fi->g.mfs_buffer_count -= NO_OF_ENTRIES * count;
1104 update_MFSBQ_indx(fi, count);
1106 LEAVE("handle_MFS_interrupt");
1109 static void handle_Unknown_Frame_interrupt(struct fc_info *fi)
1111 u_int *ptr_imq_entry;
1112 int queue_indx, offset;
1113 ENTER("handle_Unknown_Frame_interrupt");
1114 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1115 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1116 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1117 queue_indx = queue_indx >> 16;
1118 /* We discard the "unknown" frame */
1119 /* provide Tachyon will another set of buffers */
1120 if (offset == (NO_OF_ENTRIES - 1))
1121 update_SFSBQ_indx(fi);
1122 LEAVE("handle_Unknown_Frame_interrupt");
1125 static void handle_Busied_Frame_interrupt(struct fc_info *fi)
1127 u_int *ptr_imq_entry;
1128 int queue_indx, offset;
1129 ENTER("handle_Busied_Frame_interrupt");
1130 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1131 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1132 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1133 queue_indx = queue_indx >> 16;
1134 /* We discard the "busied" frame */
1135 /* provide Tachyon will another set of buffers */
1136 if (offset == (NO_OF_ENTRIES - 1))
1137 update_SFSBQ_indx(fi);
1138 LEAVE("handle_Busied_Frame_interrupt");
1141 static void handle_Bad_SCSI_Frame_interrupt(struct fc_info *fi)
1143 u_int *ptr_imq_entry, *buff_addr, *tach_header, *ptr_edb;
1144 u_int s_id, rctl, frame_class, burst_len, transfered_len, len = 0;
1145 int queue_indx, offset, payload_size, i;
1146 u_short ox_id, rx_id, x_id, mtu = 512;
1147 u_char target_id = 0xFF;
1149 ENTER("handle_Bad_SCSI_Frame_interrupt");
1150 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1151 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1152 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1153 queue_indx = queue_indx >> 16;
1154 payload_size = ntohl(*(ptr_imq_entry + 2));
1156 buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_sfsbq_base + queue_indx*NO_OF_ENTRIES + offset)));
1158 rctl = ntohl(*(buff_addr + 2)) & 0xFF000000;
1159 s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
1160 ox_id = ntohl(*(buff_addr + 6)) >> 16;
1161 rx_id = ntohl(*(buff_addr + 6));
1162 x_id = ox_id & MAX_SCSI_XID;
1164 /* Any frame that comes in with OX_ID that matches an OX_ID
1165 * that has been allocated for SCSI, will be called a Bad
1166 * SCSI frame if the Exchange is not valid any more.
1168 * We will also get a Bad SCSI frame interrupt if we receive
1169 * a XFER_RDY with offset != 0. Tachyon washes its hands off
1170 * this Exchange. We have to take care of ourselves. Grrr...
1172 if (rctl == DATA_DESCRIPTOR) {
1173 struct fc_node_info *q = fi->node_info_list;
1174 while (q != NULL) {
1175 if (q->d_id == s_id) {
1176 target_id = q->target_id;
1177 mtu = q->mtu;
1178 break;
1180 else
1181 q = q->next;
1183 frame_class = target_id;
1184 transfered_len = ntohl(*(buff_addr + 8));
1185 burst_len = ntohl(*(buff_addr + 9));
1187 build_ODB(fi, fi->g.seq_id, s_id, burst_len, 0, mtu, ox_id, rx_id, 0, 0, frame_class << 16);
1188 /* Update the SEQ_ID and Relative Offset in the
1189 * Tachyon Header Structure.
1191 tach_header = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 5)));
1192 *(tach_header + 5) = htonl(fi->g.seq_id << 24);
1193 *(tach_header + 7) = htonl(transfered_len);
1194 fi->g.odb.hdr_addr = *(fi->q.ptr_sest[x_id] + 5);
1196 /* Invalidate the EDBs used
1198 ptr_edb = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 7)));
1200 for (i = 0; i < EDB_LEN; i++)
1201 if (fi->q.ptr_edb[i] == ptr_edb)
1202 break;
1203 ptr_edb--;
1205 if (i < EDB_LEN) {
1206 int j;
1207 do {
1208 ptr_edb += 2;
1209 len += (htonl(*ptr_edb) & 0xFFFF);
1210 j = i;
1211 fi->q.free_edb_list[i++] = EDB_FREE;
1212 if (i == EDB_LEN) {
1213 i = 0;
1214 ptr_edb = fi->q.ptr_edb_base - 1;
1216 } while (len < transfered_len);
1217 if (len > transfered_len) {
1218 ptr_edb--;
1219 fi->q.free_edb_list[j] = EDB_BUSY;
1221 else
1222 ptr_edb++;
1224 else {
1225 T_MSG("EDB not found while freeing");
1226 if (offset == (NO_OF_ENTRIES - 1))
1227 update_SFSBQ_indx(fi);
1228 return;
1231 /* Update the EDB pointer in the ODB.
1233 fi->g.odb.edb_addr = htonl(virt_to_bus(ptr_edb));
1234 memcpy(fi->q.ptr_odb[fi->q.ocq_prod_indx], &(fi->g.odb), sizeof(ODB));
1235 /* Update the EDB pointer in the SEST entry. We might need
1236 * this if get another XFER_RDY for the same Exchange.
1238 *(fi->q.ptr_sest[x_id] + 7) = htonl(virt_to_bus(ptr_edb));
1240 update_OCQ_indx(fi);
1241 if (fi->g.seq_id == MAX_SEQ_ID)
1242 fi->g.seq_id = 0;
1243 else
1244 fi->g.seq_id++;
1246 else
1247 /* Could be a BA_ACC or a BA_RJT.
1249 if (rctl == RCTL_BASIC_ACC) {
1250 u_int bls_type = remove_from_ox_id_list(fi, ox_id);
1251 DPRINTK1("BA_ACC received from S_ID 0x%x with OX_ID = %x in response to %x", s_id, ox_id, bls_type);
1252 if (bls_type == RCTL_BASIC_ABTS) {
1253 u_int STE_bit;
1254 /* Invalidate resources for that Exchange.
1256 STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
1257 if (STE_bit & SEST_V) {
1258 *(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
1259 invalidate_SEST_entry(fi, ox_id);
1263 else
1264 if (rctl == RCTL_BASIC_RJT) {
1265 u_int bls_type = remove_from_ox_id_list(fi, ox_id);
1266 DPRINTK1("BA_RJT received from S_ID 0x%x with OX_ID = %x in response to %x", s_id, ox_id, bls_type);
1267 if (bls_type == RCTL_BASIC_ABTS) {
1268 u_int STE_bit;
1269 /* Invalidate resources for that Exchange.
1271 STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
1272 if (STE_bit & SEST_V) {
1273 *(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
1274 invalidate_SEST_entry(fi, ox_id);
1278 else
1279 DPRINTK1("Frame with R_CTL = %x received from S_ID 0x%x with OX_ID %x", rctl, s_id, ox_id);
1281 /* Else, discard the "Bad" SCSI frame.
1284 /* provide Tachyon will another set of buffers
1286 if (offset == (NO_OF_ENTRIES - 1))
1287 update_SFSBQ_indx(fi);
1288 LEAVE("handle_Bad_SCSI_Frame_interrupt");
1291 static void handle_Inbound_SCSI_Status_interrupt(struct fc_info *fi)
1293 struct Scsi_Host *host = fi->host;
1294 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
1295 u_int *ptr_imq_entry, *buff_addr, *ptr_rsp_info, *ptr_sense_info = NULL;
1296 int queue_indx, offset, payload_size;
1297 u_short received_ox_id, x_id;
1298 Scsi_Cmnd *Cmnd;
1299 u_int fcp_status, fcp_rsp_info_len = 0, fcp_sense_info_len = 0, s_id;
1300 ENTER("handle_SCSI_status_interrupt");
1302 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1303 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1304 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1305 queue_indx = queue_indx >> 16;
1306 buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_sfsbq_base + queue_indx*NO_OF_ENTRIES + offset)));
1307 payload_size = ntohl(*(ptr_imq_entry + 2));
1308 received_ox_id = ntohl(*(buff_addr + 6)) >> 16;
1310 buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_sfsbq_base + queue_indx*NO_OF_ENTRIES + offset)));
1312 fcp_status = ntohl(*(buff_addr + 10));
1313 ptr_rsp_info = buff_addr + 14;
1314 if (fcp_status & FCP_STATUS_RSP_LEN)
1315 fcp_rsp_info_len = ntohl(*(buff_addr + 13));
1317 if (fcp_status & FCP_STATUS_SENSE_LEN) {
1318 ptr_sense_info = ptr_rsp_info + fcp_rsp_info_len / 4;
1319 fcp_sense_info_len = ntohl(*(buff_addr + 12));
1320 DPRINTK("sense_info = %x", (u_int)ntohl(*ptr_sense_info));
1322 DPRINTK("fcp_status = %x, fcp_rsp_len = %x", fcp_status, fcp_rsp_info_len);
1323 x_id = received_ox_id & MAX_SCSI_XID;
1324 Cmnd = hostdata->cmnd_handler[x_id];
1325 hostdata->cmnd_handler[x_id] = NULL;
1326 if (Cmnd != NULL) {
1327 memset(Cmnd->sense_buffer, 0, sizeof(Cmnd->sense_buffer));
1328 /* Check if there is a Sense field */
1329 if (fcp_status & FCP_STATUS_SENSE_LEN) {
1330 int size = sizeof(Cmnd->sense_buffer);
1331 if (fcp_sense_info_len < size)
1332 size = fcp_sense_info_len;
1333 memcpy(Cmnd->sense_buffer, (char *)ptr_sense_info, size);
1335 Cmnd->result = fcp_status & FCP_STATUS_MASK;
1336 (*Cmnd->scsi_done) (Cmnd);
1338 else
1339 T_MSG("NULL Command out of handler!");
1341 invalidate_SEST_entry(fi, received_ox_id);
1342 s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
1343 fi->q.free_scsi_oxid[x_id] = OXID_AVAILABLE;
1345 /* provide Tachyon will another set of buffers */
1346 if (offset == (NO_OF_ENTRIES - 1))
1347 update_SFSBQ_indx(fi);
1348 LEAVE("handle_SCSI_status_interrupt");
1351 static void invalidate_SEST_entry(struct fc_info *fi, u_short received_ox_id)
1353 u_short x_id = received_ox_id & MAX_SCSI_XID;
1354 /* Invalidate SEST entry if it is an OutBound SEST Entry
1356 if (!(received_ox_id & SCSI_READ_BIT)) {
1357 u_int *ptr_tach_header, *ptr_edb;
1358 u_short temp_ox_id = NOT_SCSI_XID;
1359 int i;
1360 *(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
1362 /* Invalidate the Tachyon Header structure
1364 ptr_tach_header = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 5)));
1365 for (i = 0; i < NO_OF_TACH_HEADERS; i++)
1366 if(fi->q.ptr_tachyon_header[i] == ptr_tach_header)
1367 break;
1368 if (i < NO_OF_TACH_HEADERS)
1369 memset(ptr_tach_header, 0xFF, 32);
1370 else
1371 T_MSG("Tachyon Header not found while freeing in invalidate_SEST_entry()");
1373 /* Invalidate the EDB used
1375 ptr_edb = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 7)));
1376 for (i = 0; i < EDB_LEN; i++)
1377 if (fi->q.ptr_edb[i] == ptr_edb)
1378 break;
1379 ptr_edb--;
1380 if (i < EDB_LEN) {
1381 do {
1382 ptr_edb += 2;
1383 fi->q.free_edb_list[i++] = EDB_FREE;
1384 if (i == EDB_LEN) {
1385 i = 0;
1386 ptr_edb = fi->q.ptr_edb_base - 1;
1388 } while ((htonl(*ptr_edb) & 0x80000000) != 0x80000000);
1390 else
1391 T_MSG("EDB not found while freeing in invalidate_SEST_entry()");
1393 /* Search for its other header structure and destroy it!
1395 if ((ptr_tach_header + 16) < (fi->q.ptr_tachyon_header_base + (MY_PAGE_SIZE/4)))
1396 ptr_tach_header += 16;
1397 else
1398 ptr_tach_header = fi->q.ptr_tachyon_header_base;
1399 while (temp_ox_id != x_id) {
1400 temp_ox_id = ntohl(*(ptr_tach_header + 6)) >> 16;
1401 if (temp_ox_id == x_id) {
1402 /* Paranoid checking...
1404 for (i = 0; i < NO_OF_TACH_HEADERS; i++)
1405 if(fi->q.ptr_tachyon_header[i] == ptr_tach_header)
1406 break;
1407 if (i < NO_OF_TACH_HEADERS)
1408 memset(ptr_tach_header, 0xFF, 32);
1409 else
1410 T_MSG("Tachyon Header not found while freeing in invalidate_SEST_entry()");
1411 break;
1413 else {
1414 if ((ptr_tach_header + 16) < (fi->q.ptr_tachyon_header_base + (MY_PAGE_SIZE/4)))
1415 ptr_tach_header += 16;
1416 else
1417 ptr_tach_header = fi->q.ptr_tachyon_header_base;
1421 else {
1422 u_short sdb_table_indx;
1423 /* An Inbound Command has completed or needs to be Aborted.
1424 * Clear up the SDB buffers.
1426 sdb_table_indx = *(fi->q.ptr_sest[x_id] + 5);
1427 fi->q.sdb_slot_status[sdb_table_indx] = SDB_FREE;
1431 static void handle_Inbound_SCSI_Command_interrupt(struct fc_info *fi)
1433 u_int *ptr_imq_entry;
1434 int queue_indx, offset;
1435 ENTER("handle_Inbound_SCSI_Command_interrupt");
1436 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1437 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1438 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1439 queue_indx = queue_indx >> 16;
1440 /* We discard the SCSI frame as we shouldn't be receiving
1441 * a SCSI Command in the first place
1443 /* provide Tachyon will another set of buffers */
1444 if (offset == (NO_OF_ENTRIES - 1))
1445 update_SFSBQ_indx(fi);
1446 LEAVE("handle_Inbound_SCSI_Command_interrupt");
1449 static void handle_SFS_interrupt(struct fc_info *fi)
1451 u_int *ptr_imq_entry, *buff_addr;
1452 u_int class_of_frame, type_of_frame, s_id, els_type = 0, rctl;
1453 int queue_indx, offset, payload_size, login_state;
1454 u_short received_ox_id, fs_cmnd_code;
1455 ENTER("handle_SFS_interrupt");
1456 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1457 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1458 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1459 queue_indx = queue_indx >> 16;
1460 DPRINTK("queue_indx = %d, offset = %d\n", queue_indx, offset);
1461 payload_size = ntohl(*(ptr_imq_entry + 2));
1462 DPRINTK("payload_size = %d", payload_size);
1464 buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_sfsbq_base + queue_indx*NO_OF_ENTRIES + offset)));
1466 /* extract Type of Frame */
1467 type_of_frame = ntohl(*(buff_addr + 4)) & 0xFF000000;
1468 s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
1469 received_ox_id = ntohl(*(buff_addr + 6)) >> 16;
1470 switch(type_of_frame) {
1471 case TYPE_BLS:
1472 rctl = ntohl(*(buff_addr + 2)) & 0xFF000000;
1473 switch(rctl) {
1474 case RCTL_BASIC_ABTS:
1475 /* As an Initiator, we should never be receiving
1476 * this.
1478 DPRINTK1("ABTS received from S_ID 0x%x with OX_ID = %x", s_id, received_ox_id);
1479 break;
1481 break;
1482 case TYPE_ELS:
1483 class_of_frame = ntohl(*(buff_addr + 8));
1484 login_state = sid_logged_in(fi, s_id);
1485 switch(class_of_frame & 0xFF000000) {
1486 case ELS_PLOGI:
1487 if (s_id != fi->g.my_id) {
1488 u_int ret_code;
1489 DPRINTK1("PLOGI received from D_ID 0x%x with 0X_ID = %x", s_id, received_ox_id);
1490 if ((ret_code = plogi_ok(fi, buff_addr, payload_size)) == 0){
1491 tx_logi_acc(fi, ELS_ACC, s_id, received_ox_id);
1492 add_to_address_cache(fi, buff_addr);
1494 else {
1495 u_short cmnd_code = ret_code >> 16;
1496 u_short expln_code = ret_code;
1497 tx_ls_rjt(fi, s_id, received_ox_id, cmnd_code, expln_code);
1500 break;
1501 case ELS_ACC:
1502 els_type = remove_from_ox_id_list(fi, received_ox_id);
1503 DPRINTK1("ELS_ACC received from D_ID 0x%x in response to ELS %x", s_id, els_type);
1504 switch(els_type) {
1505 case ELS_PLOGI:
1506 add_to_address_cache(fi, buff_addr);
1507 tx_prli(fi, ELS_PRLI, s_id, OX_ID_FIRST_SEQUENCE);
1508 break;
1509 case ELS_FLOGI:
1510 add_to_address_cache(fi, buff_addr);
1511 fi->g.my_id = ntohl(*(buff_addr + 2)) & 0x00FFFFFF;
1512 fi->g.fabric_present = TRUE;
1513 fi->g.my_ddaa = fi->g.my_id & 0xFFFF00;
1514 /* Login to the Name Server
1516 tx_logi(fi, ELS_PLOGI, DIRECTORY_SERVER);
1517 break;
1518 case ELS_NS_PLOGI:
1519 fi->g.name_server = TRUE;
1520 add_to_address_cache(fi, buff_addr);
1521 tx_name_server_req(fi, FCS_RFC_4);
1522 tx_scr(fi);
1523 /* Some devices have a delay before
1524 * registering with the Name Server
1526 udelay(500);
1527 tx_name_server_req(fi, FCS_GP_ID4);
1528 break;
1529 case ELS_PRLI:
1530 mark_scsi_sid(fi, buff_addr, ADD_ENTRY);
1531 break;
1532 case ELS_ADISC:
1533 if (!(validate_login(fi, buff_addr)))
1534 tx_logo(fi, s_id, OX_ID_FIRST_SEQUENCE);
1535 break;
1537 break;
1538 case ELS_PDISC:
1539 DPRINTK1("ELS_PDISC received from D_ID 0x%x", s_id);
1540 tx_logo(fi, s_id, received_ox_id);
1541 break;
1542 case ELS_ADISC:
1543 DPRINTK1("ELS_ADISC received from D_ID 0x%x", s_id);
1544 if (node_logged_in_prev(fi, buff_addr))
1545 tx_adisc(fi, ELS_ACC, s_id, received_ox_id);
1546 else
1547 tx_logo(fi, s_id, received_ox_id);
1548 break;
1549 case ELS_PRLI:
1550 DPRINTK1("ELS_PRLI received from D_ID 0x%x", s_id);
1551 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN)) {
1552 tx_prli(fi, ELS_ACC, s_id, received_ox_id);
1553 mark_scsi_sid(fi, buff_addr, ADD_ENTRY);
1555 else
1556 tx_logo(fi, s_id, received_ox_id);
1557 break;
1558 case ELS_PRLO:
1559 DPRINTK1("ELS_PRLO received from D_ID 0x%x", s_id);
1560 if ((login_state == NODE_LOGGED_OUT) || (login_state == NODE_NOT_PRESENT))
1561 tx_logo(fi, s_id, received_ox_id);
1562 else
1563 if (login_state == NODE_LOGGED_IN)
1565 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1566 else
1567 if (login_state == NODE_PROCESS_LOGGED_IN) {
1568 tx_prli(fi, ELS_ACC, s_id, received_ox_id);
1569 mark_scsi_sid(fi, buff_addr, DELETE_ENTRY);
1571 break;
1572 case ELS_LS_RJT:
1573 els_type = remove_from_ox_id_list(fi, received_ox_id);
1574 DPRINTK1("ELS_LS_RJT received from D_ID 0x%x in response to %x", s_id, els_type);
1575 /* We should be chking the reason code.
1577 switch (els_type) {
1578 case ELS_ADISC:
1579 tx_logi(fi, ELS_PLOGI, s_id);
1580 break;
1582 break;
1583 case ELS_LOGO:
1584 els_type = remove_from_ox_id_list(fi, received_ox_id);
1585 DPRINTK1("ELS_LOGO received from D_ID 0x%x in response to %x", s_id, els_type);
1586 remove_from_address_cache(fi, buff_addr, ELS_LOGO);
1587 tx_acc(fi, s_id, received_ox_id);
1588 if (els_type == ELS_ADISC)
1589 tx_logi(fi, ELS_PLOGI, s_id);
1590 break;
1591 case ELS_RSCN:
1592 DPRINTK1("ELS_RSCN received from D_ID 0x%x", s_id);
1593 tx_acc(fi, s_id, received_ox_id);
1594 remove_from_address_cache(fi, buff_addr, ELS_RSCN);
1595 break;
1596 case ELS_FARP_REQ:
1597 /* We do not support FARP.
1598 So, silently discard it */
1599 DPRINTK1("ELS_FARP_REQ received from D_ID 0x%x", s_id);
1600 break;
1601 case ELS_ABTX:
1602 DPRINTK1("ELS_ABTX received from D_ID 0x%x", s_id);
1603 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1604 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1605 else
1606 tx_logo(fi, s_id, received_ox_id);
1607 break;
1608 case ELS_FLOGI:
1609 DPRINTK1("ELS_FLOGI received from D_ID 0x%x", s_id);
1610 if (fi->g.ptp_up == TRUE) {
1611 /* The node could have come up as an N_Port
1612 * in a Loop! So,try initializing as an NL_port
1614 take_tachyon_offline(fi);
1615 /* write AL_TIME & E_D_TOV into the registers */
1616 writel(TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
1617 writel(LOOP_INIT_SOFT_ADDRESS, fi->t_r.ptr_fm_config_reg);
1618 DPRINTK1("FLOGI received, TACHYON initializing as L_Port...\n");
1619 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
1621 else {
1622 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1623 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1624 else
1625 tx_logo(fi, s_id, received_ox_id);
1627 break;
1628 case ELS_ADVC:
1629 DPRINTK1("ELS_ADVC received from D_ID 0x%x", s_id);
1630 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1631 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1632 else
1633 tx_logo(fi, s_id, received_ox_id);
1634 break;
1635 case ELS_ECHO:
1636 DPRINTK1("ELS_ECHO received from D_ID 0x%x", s_id);
1637 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1638 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1639 else
1640 tx_logo(fi, s_id, received_ox_id);
1641 break;
1642 case ELS_ESTC:
1643 DPRINTK1("ELS_ESTC received from D_ID 0x%x", s_id);
1644 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1645 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1646 else
1647 tx_logo(fi, s_id, received_ox_id);
1648 break;
1649 case ELS_ESTS:
1650 DPRINTK1("ELS_ESTS received from D_ID 0x%x", s_id);
1651 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1652 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1653 else
1654 tx_logo(fi, s_id, received_ox_id);
1655 break;
1656 case ELS_RCS:
1657 DPRINTK1("ELS_RCS received from D_ID 0x%x", s_id);
1658 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1659 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1660 else
1661 tx_logo(fi, s_id, received_ox_id);
1662 break;
1663 case ELS_RES:
1664 DPRINTK1("ELS_RES received from D_ID 0x%x", s_id);
1665 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1666 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1667 else
1668 tx_logo(fi, s_id, received_ox_id);
1669 break;
1670 case ELS_RLS:
1671 DPRINTK1("ELS_RLS received from D_ID 0x%x", s_id);
1672 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1673 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1674 else
1675 tx_logo(fi, s_id, received_ox_id);
1676 break;
1677 case ELS_RRQ:
1678 DPRINTK1("ELS_RRQ received from D_ID 0x%x", s_id);
1679 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1680 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1681 else
1682 tx_logo(fi, s_id, received_ox_id);
1683 break;
1684 case ELS_RSS:
1685 DPRINTK1("ELS_RSS received from D_ID 0x%x", s_id);
1686 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1687 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1688 else
1689 tx_logo(fi, s_id, received_ox_id);
1690 break;
1691 case ELS_RTV:
1692 DPRINTK1("ELS_RTV received from D_ID 0x%x", s_id);
1693 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1694 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1695 else
1696 tx_logo(fi, s_id, received_ox_id);
1697 break;
1698 case ELS_RSI:
1699 DPRINTK1("ELS_RSI received from D_ID 0x%x", s_id);
1700 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1701 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1702 else
1703 tx_logo(fi, s_id, received_ox_id);
1704 break;
1705 case ELS_TEST:
1706 /* No reply sequence */
1707 DPRINTK1("ELS_TEST received from D_ID 0x%x", s_id);
1708 break;
1709 case ELS_RNC:
1710 DPRINTK1("ELS_RNC received from D_ID 0x%x", s_id);
1711 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1712 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1713 else
1714 tx_logo(fi, s_id, received_ox_id);
1715 break;
1716 case ELS_RVCS:
1717 DPRINTK1("ELS_RVCS received from D_ID 0x%x", s_id);
1718 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1719 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1720 else
1721 tx_logo(fi, s_id, received_ox_id);
1722 break;
1723 case ELS_TPLS:
1724 DPRINTK1("ELS_TPLS received from D_ID 0x%x", s_id);
1725 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1726 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1727 else
1728 tx_logo(fi, s_id, received_ox_id);
1729 break;
1730 case ELS_GAID:
1731 DPRINTK1("ELS_GAID received from D_ID 0x%x", s_id);
1732 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1733 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1734 else
1735 tx_logo(fi, s_id, received_ox_id);
1736 break;
1737 case ELS_FACT:
1738 DPRINTK1("ELS_FACT received from D_ID 0x%x", s_id);
1739 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1740 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1741 else
1742 tx_logo(fi, s_id, received_ox_id);
1743 break;
1744 case ELS_FAN:
1745 /* Hmmm... You don't support FAN ??? */
1746 DPRINTK1("ELS_FAN received from D_ID 0x%x", s_id);
1747 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1748 break;
1749 case ELS_FDACT:
1750 DPRINTK1("ELS_FDACT received from D_ID 0x%x", s_id);
1751 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1752 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1753 else
1754 tx_logo(fi, s_id, received_ox_id);
1755 break;
1756 case ELS_NACT:
1757 DPRINTK1("ELS_NACT received from D_ID 0x%x", s_id);
1758 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1759 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1760 else
1761 tx_logo(fi, s_id, received_ox_id);
1762 break;
1763 case ELS_NDACT:
1764 DPRINTK1("ELS_NDACT received from D_ID 0x%x", s_id);
1765 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1766 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1767 else
1768 tx_logo(fi, s_id, received_ox_id);
1769 break;
1770 case ELS_QoSR:
1771 DPRINTK1("ELS_QoSR received from D_ID 0x%x", s_id);
1772 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1773 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1774 else
1775 tx_logo(fi, s_id, received_ox_id);
1776 break;
1777 case ELS_FDISC:
1778 DPRINTK1("ELS_FDISC received from D_ID 0x%x", s_id);
1779 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1780 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1781 else
1782 tx_logo(fi, s_id, received_ox_id);
1783 break;
1784 default:
1785 DPRINTK1("ELS Frame %x received from D_ID 0x%x", class_of_frame, s_id);
1786 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1787 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1788 else
1789 tx_logo(fi, s_id, received_ox_id);
1790 break;
1792 break;
1793 case TYPE_FC_SERVICES:
1794 fs_cmnd_code = (ntohl(*(buff_addr + 10)) & 0xFFFF0000) >>16;
1795 switch(fs_cmnd_code) {
1796 case FCS_ACC:
1797 els_type = remove_from_ox_id_list(fi, received_ox_id);
1798 DPRINTK1("FCS_ACC received from D_ID 0x%x in response to %x", s_id, els_type);
1799 if (els_type == FCS_GP_ID4)
1800 explore_fabric(fi, buff_addr);
1801 break;
1802 case FCS_REJECT:
1803 DPRINTK1("FCS_REJECT received from D_ID 0x%x in response to %x", s_id, els_type);
1804 break;
1806 break;
1807 case TYPE_LLC_SNAP:
1808 rx_net_packet(fi, (u_char *)buff_addr, payload_size);
1809 break;
1810 default:
1811 T_MSG("Frame Type %x received from %x", type_of_frame, s_id);
1814 /* provide Tachyon will another set of buffers */
1815 if (offset == (NO_OF_ENTRIES - 1))
1816 update_SFSBQ_indx(fi);
1817 LEAVE("handle_SFS_interrupt");
1820 static void handle_FM_interrupt(struct fc_info *fi)
1822 u_int fm_status;
1823 u_int tachyon_status;
1825 ENTER("handle_FM_interrupt");
1826 fm_status = readl(fi->t_r.ptr_fm_status_reg);
1827 tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
1828 DPRINTK("FM_status = %x, Tachyon_status = %x", fm_status, tachyon_status);
1829 if (fm_status & LINK_DOWN) {
1830 T_MSG("Fibre Channel Link DOWN");
1831 fm_status = readl(fi->t_r.ptr_fm_status_reg);
1833 del_timer(&fi->explore_timer);
1834 del_timer(&fi->nport_timer);
1835 del_timer(&fi->lport_timer);
1836 del_timer(&fi->display_cache_timer);
1837 fi->g.link_up = FALSE;
1838 if (fi->g.ptp_up == TRUE)
1839 fi->g.n_port_try = FALSE;
1840 fi->g.ptp_up = FALSE;
1841 fi->g.port_discovery = FALSE;
1842 fi->g.explore_fabric = FALSE;
1843 fi->g.perform_adisc = FALSE;
1845 /* Logout will all nodes */
1846 if (fi->node_info_list) {
1847 struct fc_node_info *temp_list = fi->node_info_list;
1848 while(temp_list) {
1849 temp_list->login = LOGIN_ATTEMPTED;
1850 temp_list = temp_list->next;
1852 fi->num_nodes = 0;
1855 if ((fi->g.n_port_try == FALSE) && (fi->g.dont_init == FALSE)){
1856 take_tachyon_offline(fi);
1857 /* write AL_TIME & E_D_TOV into the registers */
1858 writel(TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
1860 if ((fi->g.fabric_present == TRUE) && (fi->g.loop_up == TRUE)) {
1861 u_int al_pa = fi->g.my_id & 0xFF;
1862 writel((al_pa << 24) | LOOP_INIT_FABRIC_ADDRESS | LOOP_INIT_PREVIOUS_ADDRESS, fi->t_r.ptr_fm_config_reg);
1864 else
1865 if (fi->g.loop_up == TRUE) {
1866 u_int al_pa = fi->g.my_id & 0xFF;
1867 writel((al_pa << 24) | LOOP_INIT_PREVIOUS_ADDRESS, fi->t_r.ptr_fm_config_reg);
1869 else
1870 writel(LOOP_INIT_SOFT_ADDRESS, fi->t_r.ptr_fm_config_reg);
1871 fi->g.loop_up = FALSE;
1872 DPRINTK1("In LDWN TACHYON initializing as L_Port...\n");
1873 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
1877 if (fm_status & NON_PARTICIPATING) {
1878 T_MSG("Did not acquire an AL_PA. I am not participating");
1880 else
1881 if ((fm_status & LINK_UP) && ((fm_status & LINK_DOWN) == 0)) {
1882 T_MSG("Fibre Channel Link UP");
1883 if ((fm_status & NON_PARTICIPATING) != TRUE) {
1884 fi->g.link_up = TRUE;
1885 if (tachyon_status & OSM_FROZEN) {
1886 reset_tachyon(fi, ERROR_RELEASE);
1887 reset_tachyon(fi, OCQ_RESET);
1889 init_timer(&fi->explore_timer);
1890 init_timer(&fi->nport_timer);
1891 init_timer(&fi->lport_timer);
1892 init_timer(&fi->display_cache_timer);
1893 if ((fm_status & OLD_PORT) == 0) {
1894 fi->g.loop_up = TRUE;
1895 fi->g.ptp_up = FALSE;
1896 fi->g.my_id = readl(fi->t_r.ptr_fm_config_reg) >> 24;
1897 DPRINTK1("My AL_PA = %x", fi->g.my_id);
1898 fi->g.port_discovery = TRUE;
1899 fi->g.explore_fabric = FALSE;
1901 else
1902 if (((fm_status & 0xF0) == OLD_PORT) && ((fm_status & 0x0F) == PORT_STATE_ACTIVE)) {
1903 fi->g.loop_up = FALSE;
1904 fi->g.my_id = 0x0;
1905 /* In a point-to-point configuration, we expect to be
1906 * connected to an F_Port. This driver does not yet support
1907 * a configuration where it is connected to another N_Port
1908 * directly.
1910 fi->g.explore_fabric = TRUE;
1911 fi->g.port_discovery = FALSE;
1912 if (fi->g.n_port_try == FALSE) {
1913 take_tachyon_offline(fi);
1914 /* write R_T_TOV & E_D_TOV into the registers */
1915 writel(PTP_TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
1916 writel(BB_CREDIT | NPORT, fi->t_r.ptr_fm_config_reg);
1917 fi->g.n_port_try = TRUE;
1918 DPRINTK1("In LUP TACHYON initializing as N_Port...\n");
1919 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
1921 else {
1922 fi->g.ptp_up = TRUE;
1923 tx_logi(fi, ELS_FLOGI, F_PORT);
1926 fi->g.my_ddaa = 0x0;
1927 fi->g.fabric_present = FALSE;
1928 /* We havn't sent out any Name Server Reqs */
1929 fi->g.name_server = FALSE;
1930 fi->g.alpa_list_index = 0;
1931 fi->g.ox_id = NOT_SCSI_XID;
1932 fi->g.my_mtu = FRAME_SIZE;
1934 /* Implicitly LOGO with all logged-in nodes.
1936 if (fi->node_info_list) {
1937 struct fc_node_info *temp_list = fi->node_info_list;
1938 while(temp_list) {
1939 temp_list->login = LOGIN_ATTEMPTED;
1940 temp_list = temp_list->next;
1942 fi->num_nodes = 0;
1943 fi->g.perform_adisc = TRUE;
1944 //fi->g.perform_adisc = FALSE;
1945 fi->g.port_discovery = FALSE;
1946 tx_logi(fi, ELS_FLOGI, F_PORT);
1948 else {
1949 /* If Link coming up for the _first_ time or no nodes
1950 * were logged in before...
1952 fi->g.scsi_oxid = 0;
1953 fi->g.seq_id = 0x00;
1954 fi->g.perform_adisc = FALSE;
1957 /* reset OX_ID table */
1958 while (fi->ox_id_list) {
1959 struct ox_id_els_map *temp = fi->ox_id_list;
1960 fi->ox_id_list = fi->ox_id_list->next;
1961 kfree(temp);
1963 fi->ox_id_list = NULL;
1964 } /* End of if partipating */
1967 if (fm_status & ELASTIC_STORE_ERROR) {
1968 /* Too much junk on the Link
1970 /* Trying to clear it up by Txing PLOGI to urself */
1971 if (fi->g.link_up == TRUE)
1972 tx_logi(fi, ELS_PLOGI, fi->g.my_id);
1975 if (fm_status & LOOP_UP) {
1976 if (tachyon_status & OSM_FROZEN) {
1977 reset_tachyon(fi, ERROR_RELEASE);
1978 reset_tachyon(fi, OCQ_RESET);
1982 if (fm_status & NOS_OLS_RECEIVED){
1983 if (fi->g.nport_timer_set == FALSE) {
1984 DPRINTK("NOS/OLS Received");
1985 DPRINTK("FM_status = %x", fm_status);
1986 fi->nport_timer.function = nos_ols_timer;
1987 fi->nport_timer.data = (unsigned long)fi;
1988 fi->nport_timer.expires = RUN_AT((3*HZ)/100); /* 30 msec */
1989 init_timer(&fi->nport_timer);
1990 add_timer(&fi->nport_timer);
1991 fi->g.nport_timer_set = TRUE;
1995 if (((fm_status & 0xF0) == OLD_PORT) && (((fm_status & 0x0F) == PORT_STATE_LF1) || ((fm_status & 0x0F) == PORT_STATE_LF2))) {
1996 DPRINTK1("Link Fail-I in OLD-PORT.");
1997 take_tachyon_offline(fi);
1998 reset_tachyon(fi, SOFTWARE_RESET);
2001 if (fm_status & LOOP_STATE_TIMEOUT){
2002 if ((fm_status & 0xF0) == ARBITRATING)
2003 DPRINTK1("ED_TOV timesout.In ARBITRATING state...");
2004 if ((fm_status & 0xF0) == ARB_WON)
2005 DPRINTK1("ED_TOV timesout.In ARBITRATION WON state...");
2006 if ((fm_status & 0xF0) == OPEN)
2007 DPRINTK1("ED_TOV timesout.In OPEN state...");
2008 if ((fm_status & 0xF0) == OPENED)
2009 DPRINTK1("ED_TOV timesout.In OPENED state...");
2010 if ((fm_status & 0xF0) == TX_CLS)
2011 DPRINTK1("ED_TOV timesout.In XMITTED CLOSE state...");
2012 if ((fm_status & 0xF0) == RX_CLS)
2013 DPRINTK1("ED_TOV timesout.In RECEIVED CLOSE state...");
2014 if ((fm_status & 0xF0) == INITIALIZING)
2015 DPRINTK1("ED_TOV timesout.In INITIALIZING state...");
2016 DPRINTK1("Initializing Loop...");
2017 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
2020 if ((fm_status & BAD_ALPA) && (fi->g.loop_up == TRUE)) {
2021 u_char bad_alpa = (readl(fi->t_r.ptr_fm_rx_al_pa_reg) & 0xFF00) >> 8;
2022 if (tachyon_status & OSM_FROZEN) {
2023 reset_tachyon(fi, ERROR_RELEASE);
2024 reset_tachyon(fi, OCQ_RESET);
2026 /* Fix for B34 */
2027 tx_logi(fi, ELS_PLOGI, fi->g.my_id);
2029 if (!fi->g.port_discovery && !fi->g.perform_adisc) {
2030 if (bad_alpa != 0xFE)
2031 DPRINTK("Bad AL_PA = %x", bad_alpa);
2033 else {
2034 if ((fi->g.perform_adisc == TRUE) && (bad_alpa == 0x00)) {
2035 DPRINTK1("Performing ADISC...");
2036 fi->g.fabric_present = FALSE;
2037 perform_adisc(fi);
2042 if (fm_status & LIPF_RECEIVED){
2043 DPRINTK("LIP(F8) Received");
2046 if (fm_status & LINK_FAILURE) {
2047 if (fm_status & LOSS_OF_SIGNAL)
2048 DPRINTK1("Detected Loss of Signal.");
2049 if (fm_status & OUT_OF_SYNC)
2050 DPRINTK1("Detected Loss of Synchronization.");
2053 if (fm_status & TRANSMIT_PARITY_ERROR) {
2054 /* Bad! Should not happen. Solution-> Hard Reset.
2056 T_MSG("Parity Error. Perform Hard Reset!");
2059 if (fi->g.alpa_list_index >= MAX_NODES){
2060 if (fi->g.port_discovery == TRUE) {
2061 fi->g.port_discovery = FALSE;
2062 add_display_cache_timer(fi);
2064 fi->g.alpa_list_index = MAX_NODES;
2067 if (fi->g.port_discovery == TRUE)
2068 local_port_discovery(fi);
2070 LEAVE("handle_FM_interrupt");
2071 return;
2074 static void local_port_discovery(struct fc_info *fi)
2076 if (fi->g.loop_up == TRUE) {
2077 /* If this is not here, some of the Bad AL_PAs are missed.
2079 udelay(20);
2080 if ((fi->g.alpa_list_index == 0) && (fi->g.fabric_present == FALSE)){
2081 tx_logi(fi, ELS_FLOGI, F_PORT);
2083 else {
2084 int login_state = sid_logged_in(fi, fi->g.my_ddaa | alpa_list[fi->g.alpa_list_index]);
2085 while ((fi->g.alpa_list_index == 0) || ((fi->g.alpa_list_index < MAX_NODES) && ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN) || (alpa_list[fi->g.alpa_list_index] == (fi->g.my_id & 0xFF)))))
2086 fi->g.alpa_list_index++;
2087 if (fi->g.alpa_list_index < MAX_NODES)
2088 tx_logi(fi, ELS_PLOGI, alpa_list[fi->g.alpa_list_index]);
2090 fi->g.alpa_list_index++;
2091 if (fi->g.alpa_list_index >= MAX_NODES){
2092 if (fi->g.port_discovery == TRUE) {
2093 fi->g.port_discovery = FALSE;
2094 add_display_cache_timer(fi);
2096 fi->g.alpa_list_index = MAX_NODES;
2101 static void nos_ols_timer(unsigned long data)
2103 struct fc_info *fi = (struct fc_info*)data;
2104 u_int fm_status;
2105 fm_status = readl(fi->t_r.ptr_fm_status_reg);
2106 DPRINTK1("FM_status in timer= %x", fm_status);
2107 fi->g.nport_timer_set = FALSE;
2108 del_timer(&fi->nport_timer);
2109 if ((fi->g.ptp_up == TRUE) || (fi->g.loop_up == TRUE))
2110 return;
2111 if (((fm_status & 0xF0) == OLD_PORT) && (((fm_status & 0x0F) == PORT_STATE_ACTIVE) || ((fm_status & 0x0F) == PORT_STATE_OFFLINE))) {
2112 DPRINTK1("In OLD-PORT after E_D_TOV.");
2113 take_tachyon_offline(fi);
2114 /* write R_T_TOV & E_D_TOV into the registers */
2115 writel(PTP_TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
2116 writel(BB_CREDIT | NPORT, fi->t_r.ptr_fm_config_reg);
2117 fi->g.n_port_try = TRUE;
2118 DPRINTK1("In timer, TACHYON initializing as N_Port...\n");
2119 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
2121 else
2122 if ((fi->g.lport_timer_set == FALSE) && ((fm_status & 0xF0) == LOOP_FAIL)) {
2123 DPRINTK1("Loop Fail after E_D_TOV.");
2124 fi->lport_timer.function = loop_timer;
2125 fi->lport_timer.data = (unsigned long)fi;
2126 fi->lport_timer.expires = RUN_AT((8*HZ)/100);
2127 init_timer(&fi->lport_timer);
2128 add_timer(&fi->lport_timer);
2129 fi->g.lport_timer_set = TRUE;
2130 take_tachyon_offline(fi);
2131 reset_tachyon(fi, SOFTWARE_RESET);
2133 else
2134 if (((fm_status & 0xF0) == OLD_PORT) && (((fm_status & 0x0F) == PORT_STATE_LF1) || ((fm_status & 0x0F) == PORT_STATE_LF2))) {
2135 DPRINTK1("Link Fail-II in OLD-PORT.");
2136 take_tachyon_offline(fi);
2137 reset_tachyon(fi, SOFTWARE_RESET);
2141 static void loop_timer(unsigned long data)
2143 struct fc_info *fi = (struct fc_info*)data;
2144 fi->g.lport_timer_set = FALSE;
2145 del_timer(&fi->lport_timer);
2146 if ((fi->g.ptp_up == TRUE) || (fi->g.loop_up == TRUE))
2147 return;
2150 static void add_display_cache_timer(struct fc_info *fi)
2152 fi->display_cache_timer.function = display_cache_timer;
2153 fi->display_cache_timer.data = (unsigned long)fi;
2154 fi->display_cache_timer.expires = RUN_AT(fi->num_nodes * HZ);
2155 init_timer(&fi->display_cache_timer);
2156 add_timer(&fi->display_cache_timer);
2159 static void display_cache_timer(unsigned long data)
2161 struct fc_info *fi = (struct fc_info*)data;
2162 del_timer(&fi->display_cache_timer);
2163 display_cache(fi);
2164 return;
2167 static void reset_tachyon(struct fc_info *fi, u_int value)
2169 u_int tachyon_status, reset_done = OCQ_RESET_STATUS | SCSI_FREEZE_STATUS;
2170 int not_done = 1, i = 0;
2171 writel(value, fi->t_r.ptr_tach_control_reg);
2172 if (value == OCQ_RESET)
2173 fi->q.ocq_prod_indx = 0;
2174 tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
2176 /* Software resets are immediately done, whereas other aren't. It
2177 about 30 clocks to do the reset */
2178 if (value != SOFTWARE_RESET) {
2179 while(not_done) {
2180 if (i++ > 100000) {
2181 T_MSG("Reset was unsuccessful! Tachyon Status = %x", tachyon_status);
2182 break;
2184 tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
2185 if ((tachyon_status & reset_done) == 0)
2186 not_done = 0;
2189 else {
2190 write_to_tachyon_registers(fi);
2194 static void take_tachyon_offline(struct fc_info *fi)
2196 u_int fm_status = readl(fi->t_r.ptr_fm_status_reg);
2198 /* The first two conditions will never be true. The Manual and
2199 * the errata say this. But the current implementation is
2200 * decently stable.
2202 //if ((fm_status & 0xF0) == LOOP_FAIL) {
2203 if (fm_status == LOOP_FAIL) {
2204 // workaround as in P. 89
2205 writel(HOST_CONTROL, fi->t_r.ptr_fm_control_reg);
2206 if (fi->g.loop_up == TRUE)
2207 writel(SOFTWARE_RESET, fi->t_r.ptr_tach_control_reg);
2208 else {
2209 writel(OFFLINE, fi->t_r.ptr_fm_control_reg);
2210 writel(EXIT_HOST_CONTROL, fi->t_r.ptr_fm_control_reg);
2213 else
2214 //if ((fm_status & LOOP_UP) == LOOP_UP) {
2215 if (fm_status == LOOP_UP) {
2216 writel(SOFTWARE_RESET, fi->t_r.ptr_tach_control_reg);
2218 else
2219 writel(OFFLINE, fi->t_r.ptr_fm_control_reg);
2223 static void read_novram(struct fc_info *fi)
2225 int off = 0;
2226 fi->n_r.ptr_novram_hw_control_reg = fi->i_r.ptr_ichip_hw_control_reg;
2227 fi->n_r.ptr_novram_hw_status_reg = fi->i_r.ptr_ichip_hw_status_reg;
2228 iph5526_nr_do_init(fi);
2229 if (fi->clone_id == PCI_VENDOR_ID_INTERPHASE)
2230 off = 32;
2232 fi->g.my_node_name_high = (fi->n_r.data[off] << 16) | fi->n_r.data[off+1];
2233 fi->g.my_node_name_low = (fi->n_r.data[off+2] << 16) | fi->n_r.data[off+3];
2234 fi->g.my_port_name_high = (fi->n_r.data[off+4] << 16) | fi->n_r.data[off+5];
2235 fi->g.my_port_name_low = (fi->n_r.data[off+6] << 16) | fi->n_r.data[off+7];
2236 DPRINTK("node_name = %x %x", fi->g.my_node_name_high, fi->g.my_node_name_low);
2237 DPRINTK("port_name = %x %x", fi->g.my_port_name_high, fi->g.my_port_name_low);
2240 static void reset_ichip(struct fc_info *fi)
2242 /* (i)chip reset */
2243 writel(ICHIP_HCR_RESET, fi->i_r.ptr_ichip_hw_control_reg);
2244 /*wait for chip to get reset */
2245 udelay(10000);
2246 /*de-assert reset */
2247 writel(ICHIP_HCR_DERESET, fi->i_r.ptr_ichip_hw_control_reg);
2249 /* enable INT lines on the (i)chip */
2250 writel(ICHIP_HCR_ENABLE_INTA , fi->i_r.ptr_ichip_hw_control_reg);
2251 /* enable byte swap */
2252 writel(ICHIP_HAMR_BYTE_SWAP_ADDR_TR, fi->i_r.ptr_ichip_hw_addr_mask_reg);
2255 static void tx_logi(struct fc_info *fi, u_int logi, u_int d_id)
2257 int int_required = 1;
2258 u_short ox_id = OX_ID_FIRST_SEQUENCE;
2259 u_int r_ctl = RCTL_ELS_UCTL;
2260 u_int type = TYPE_ELS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2261 u_int my_mtu = fi->g.my_mtu;
2262 ENTER("tx_logi");
2263 /* We dont want interrupted for our own logi.
2264 * It screws up the port discovery process.
2266 if (d_id == fi->g.my_id)
2267 int_required = 0;
2268 fill_login_frame(fi, logi);
2269 fi->g.type_of_frame = FC_ELS;
2270 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.login, sizeof(LOGIN));
2271 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),sizeof(LOGIN), r_ctl, type, d_id, my_mtu, int_required, ox_id, logi);
2272 fi->g.e_i++;
2273 if (fi->g.e_i == MAX_PENDING_FRAMES)
2274 fi->g.e_i = 0;
2275 LEAVE("tx_logi");
2276 return;
2279 static void tx_logi_acc(struct fc_info *fi, u_int logi, u_int d_id, u_short received_ox_id)
2281 int int_required = 0;
2282 u_int r_ctl = RCTL_ELS_SCTL;
2283 u_int type = TYPE_ELS | EXCHANGE_RESPONDER | LAST_SEQUENCE;
2284 u_int my_mtu = fi->g.my_mtu;
2285 ENTER("tx_logi_acc");
2286 fill_login_frame(fi, logi);
2287 fi->g.type_of_frame = FC_ELS;
2288 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.login, sizeof(LOGIN));
2289 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),sizeof(LOGIN), r_ctl, type, d_id, my_mtu, int_required, received_ox_id, logi);
2290 fi->g.e_i++;
2291 if (fi->g.e_i == MAX_PENDING_FRAMES)
2292 fi->g.e_i = 0;
2293 LEAVE("tx_logi_acc");
2294 return;
2297 static void tx_prli(struct fc_info *fi, u_int command_code, u_int d_id, u_short received_ox_id)
2299 int int_required = 1;
2300 u_int r_ctl = RCTL_ELS_UCTL;
2301 u_int type = TYPE_ELS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2302 u_int my_mtu = fi->g.my_mtu;
2303 ENTER("tx_prli");
2304 if (command_code == ELS_PRLI)
2305 fi->g.prli.cmnd_code = htons((ELS_PRLI | PAGE_LEN) >> 16);
2306 else {
2307 fi->g.prli.cmnd_code = htons((ELS_ACC | PAGE_LEN) >> 16);
2308 int_required = 0;
2309 type = TYPE_ELS | EXCHANGE_RESPONDER | LAST_SEQUENCE;
2310 r_ctl = RCTL_ELS_SCTL;
2312 fi->g.prli.payload_length = htons(PRLI_LEN);
2313 fi->g.prli.type_code = htons(FCP_TYPE_CODE);
2314 fi->g.prli.est_image_pair = htons(IMAGE_PAIR);
2315 fi->g.prli.responder_pa = 0;
2316 fi->g.prli.originator_pa = 0;
2317 fi->g.prli.service_params = htonl(INITIATOR_FUNC | READ_XFER_RDY_DISABLED);
2318 fi->g.type_of_frame = FC_ELS;
2319 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.prli, sizeof(PRLI));
2320 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]), sizeof(PRLI), r_ctl, type, d_id, my_mtu, int_required, received_ox_id, command_code);
2321 fi->g.e_i++;
2322 if (fi->g.e_i == MAX_PENDING_FRAMES)
2323 fi->g.e_i = 0;
2324 LEAVE("tx_prli");
2325 return;
2328 static void tx_logo(struct fc_info *fi, u_int d_id, u_short received_ox_id)
2330 int int_required = 1;
2331 u_int r_ctl = RCTL_ELS_UCTL;
2332 u_int type = TYPE_ELS | EXCHANGE_RESPONDER | SEQUENCE_RESPONDER | FIRST_SEQUENCE | END_SEQUENCE | SEQUENCE_INITIATIVE;
2333 int size = sizeof(LOGO);
2334 char fc_id[3];
2335 u_int my_mtu = fi->g.my_mtu;
2336 ENTER("tx_logo");
2337 fi->g.logo.logo_cmnd = htonl(ELS_LOGO);
2338 fi->g.logo.reserved = 0;
2339 memcpy(fc_id, &(fi->g.my_id), 3);
2340 fi->g.logo.n_port_id_0 = fc_id[0];
2341 fi->g.logo.n_port_id_1 = fc_id[1];
2342 fi->g.logo.n_port_id_2 = fc_id[2];
2343 fi->g.logo.port_name_up = htonl(N_PORT_NAME_HIGH);
2344 fi->g.logo.port_name_low = htonl(N_PORT_NAME_LOW);
2345 fi->g.type_of_frame = FC_ELS;
2346 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.logo, sizeof(LOGO));
2347 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, received_ox_id, ELS_LOGO);
2348 fi->g.e_i++;
2349 if (fi->g.e_i == MAX_PENDING_FRAMES)
2350 fi->g.e_i = 0;
2351 LEAVE("tx_logo");
2354 static void tx_adisc(struct fc_info *fi, u_int cmnd_code, u_int d_id, u_short received_ox_id)
2356 int int_required = 0;
2357 u_int r_ctl = RCTL_ELS_SCTL;
2358 u_int type = TYPE_ELS | EXCHANGE_RESPONDER | SEQUENCE_RESPONDER | FIRST_SEQUENCE | END_SEQUENCE;
2359 int size = sizeof(ADISC);
2360 u_int my_mtu = fi->g.my_mtu;
2361 fi->g.adisc.ls_cmnd_code = htonl(cmnd_code);
2362 fi->g.adisc.hard_address = htonl(0);
2363 fi->g.adisc.port_name_high = htonl(N_PORT_NAME_HIGH);
2364 fi->g.adisc.port_name_low = htonl(N_PORT_NAME_LOW);
2365 fi->g.adisc.node_name_high = htonl(NODE_NAME_HIGH);
2366 fi->g.adisc.node_name_low = htonl(NODE_NAME_LOW);
2367 fi->g.adisc.n_port_id = htonl(fi->g.my_id);
2368 if (cmnd_code == ELS_ADISC) {
2369 int_required = 1;
2370 r_ctl = RCTL_ELS_UCTL;
2371 type = TYPE_ELS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2373 fi->g.type_of_frame = FC_ELS;
2374 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.adisc, size);
2375 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, received_ox_id, cmnd_code);
2376 fi->g.e_i++;
2377 if (fi->g.e_i == MAX_PENDING_FRAMES)
2378 fi->g.e_i = 0;
2381 static void tx_ls_rjt(struct fc_info *fi, u_int d_id, u_short received_ox_id, u_short reason_code, u_short expln_code)
2383 int int_required = 0;
2384 u_int r_ctl = RCTL_ELS_SCTL;
2385 u_int type = TYPE_ELS | EXCHANGE_RESPONDER | LAST_SEQUENCE;
2386 int size = sizeof(LS_RJT);
2387 u_int my_mtu = fi->g.my_mtu;
2388 ENTER("tx_ls_rjt");
2389 fi->g.ls_rjt.cmnd_code = htonl(ELS_LS_RJT);
2390 fi->g.ls_rjt.reason_code = htonl((reason_code << 16) | expln_code);
2391 fi->g.type_of_frame = FC_ELS;
2392 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.ls_rjt, size);
2393 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, received_ox_id, ELS_LS_RJT);
2394 fi->g.e_i++;
2395 if (fi->g.e_i == MAX_PENDING_FRAMES)
2396 fi->g.e_i = 0;
2397 LEAVE("tx_ls_rjt");
2400 static void tx_abts(struct fc_info *fi, u_int d_id, u_short ox_id)
2402 int int_required = 1;
2403 u_int r_ctl = RCTL_BASIC_ABTS;
2404 u_int type = TYPE_BLS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2405 int size = 0;
2406 u_int my_mtu = fi->g.my_mtu;
2407 ENTER("tx_abts");
2408 fi->g.type_of_frame = FC_BLS;
2409 tx_exchange(fi, NULL, size, r_ctl, type, d_id, my_mtu, int_required, ox_id, RCTL_BASIC_ABTS);
2410 LEAVE("tx_abts");
2413 static u_int plogi_ok(struct fc_info *fi, u_int *buff_addr, int size)
2415 int ret_code = 0;
2416 u_short mtu = ntohl(*(buff_addr + 10)) & 0x00000FFF;
2417 u_short class3 = ntohl(*(buff_addr + 25)) >> 16;
2418 u_short class3_conc_seq = ntohl(*(buff_addr + 27)) >> 16;
2419 u_short open_seq = ntohl(*(buff_addr + 28)) >> 16;
2420 DPRINTK1("mtu = %x class3 = %x conc_seq = %x open_seq = %x", mtu, class3, class3_conc_seq, open_seq);
2421 size -= TACHYON_HEADER_LEN;
2422 if (!(class3 & 0x8000)) {
2423 DPRINTK1("Received PLOGI with class3 = %x", class3);
2424 ret_code = (LOGICAL_ERR << 16) | NO_EXPLN;
2425 return ret_code;
2427 if (mtu < 256) {
2428 DPRINTK1("Received PLOGI with MTU set to %x", mtu);
2429 ret_code = (LOGICAL_ERR << 16) | RECV_FIELD_SIZE;
2430 return ret_code;
2432 if (size != PLOGI_LEN) {
2433 DPRINTK1("Received PLOGI of size %x", size);
2434 ret_code = (LOGICAL_ERR << 16) | INV_PAYLOAD_LEN;
2435 return ret_code;
2437 if (class3_conc_seq == 0) {
2438 DPRINTK1("Received PLOGI with conc_seq == 0");
2439 ret_code = (LOGICAL_ERR << 16) | CONC_SEQ;
2440 return ret_code;
2442 if (open_seq == 0) {
2443 DPRINTK1("Received PLOGI with open_seq == 0");
2444 ret_code = (LOGICAL_ERR << 16) | NO_EXPLN;
2445 return ret_code;
2448 /* Could potentially check for more fields, but might end up
2449 not talking to most of the devices. ;-) */
2450 /* Things that could get checked are:
2451 common_features = 0x8800
2452 total_concurrent_seq = at least 1
2454 return ret_code;
2457 static void tx_acc(struct fc_info *fi, u_int d_id, u_short received_ox_id)
2459 int int_required = 0;
2460 u_int r_ctl = RCTL_ELS_SCTL;
2461 u_int type = TYPE_ELS | EXCHANGE_RESPONDER | LAST_SEQUENCE;
2462 int size = sizeof(ACC);
2463 u_int my_mtu = fi->g.my_mtu;
2464 ENTER("tx_acc");
2465 fi->g.acc.cmnd_code = htonl(ELS_ACC);
2466 fi->g.type_of_frame = FC_ELS;
2467 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.acc, size);
2468 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, received_ox_id, ELS_ACC);
2469 fi->g.e_i++;
2470 if (fi->g.e_i == MAX_PENDING_FRAMES)
2471 fi->g.e_i = 0;
2472 LEAVE("tx_acc");
2476 static void tx_name_server_req(struct fc_info *fi, u_int req)
2478 int int_required = 1, i, size = 0;
2479 u_short ox_id = OX_ID_FIRST_SEQUENCE;
2480 u_int type = TYPE_FC_SERVICES | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2481 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_CONTROL;
2482 u_int my_mtu = fi->g.my_mtu, d_id = DIRECTORY_SERVER;
2483 CT_HDR ct_hdr;
2484 ENTER("tx_name_server_req");
2485 /* Fill up CT_Header */
2486 ct_hdr.rev_in_id = htonl(FC_CT_REV);
2487 ct_hdr.fs_type = DIRECTORY_SERVER_APP;
2488 ct_hdr.fs_subtype = NAME_SERVICE;
2489 ct_hdr.options = 0;
2490 ct_hdr.resv1 = 0;
2491 ct_hdr.cmnd_resp_code = htons(req >> 16);
2492 ct_hdr.max_res_size = 0;
2493 ct_hdr.resv2 = 0;
2494 ct_hdr.reason_code = 0;
2495 ct_hdr.expln_code = 0;
2496 ct_hdr.vendor_unique = 0;
2498 fi->g.type_of_frame = FC_ELS;
2499 switch(req) {
2500 case FCS_RFC_4:
2501 memcpy(&(fi->g.rfc_4.ct_hdr), &ct_hdr, sizeof(CT_HDR));
2502 fi->g.rfc_4.s_id = htonl(fi->g.my_id);
2503 for (i = 0; i < 32; i++)
2504 fi->g.rfc_4.bit_map[i] = 0;
2505 /* We support IP & SCSI */
2506 fi->g.rfc_4.bit_map[2] = 0x01;
2507 fi->g.rfc_4.bit_map[3] = 0x20;
2508 size = sizeof(RFC_4);
2509 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.rfc_4, size);
2510 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, ox_id, req);
2511 break;
2512 case FCS_GP_ID4:
2513 memcpy(&(fi->g.gp_id4.ct_hdr), &ct_hdr, sizeof(CT_HDR));
2514 fi->g.gp_id4.port_type = htonl(PORT_TYPE_NX_PORTS);
2515 size = sizeof(GP_ID4);
2516 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.gp_id4, size);
2517 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, ox_id, req);
2518 break;
2520 fi->g.e_i++;
2521 if (fi->g.e_i == MAX_PENDING_FRAMES)
2522 fi->g.e_i = 0;
2523 LEAVE("tx_name_server_req");
2526 static void tx_scr(struct fc_info *fi)
2528 int int_required = 1, size = sizeof(SCR);
2529 u_short ox_id = OX_ID_FIRST_SEQUENCE;
2530 u_int type = TYPE_ELS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2531 u_int r_ctl = RCTL_ELS_UCTL;
2532 u_int my_mtu = fi->g.my_mtu, d_id = FABRIC_CONTROLLER;
2533 ENTER("tx_scr");
2534 fi->g.scr.cmnd_code = htonl(ELS_SCR);
2535 fi->g.scr.reg_function = htonl(FULL_REGISTRATION);
2536 fi->g.type_of_frame = FC_ELS;
2537 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.scr, size);
2538 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, ox_id, ELS_SCR);
2539 fi->g.e_i++;
2540 if (fi->g.e_i == MAX_PENDING_FRAMES)
2541 fi->g.e_i = 0;
2542 LEAVE("tx_scr");
2545 static void perform_adisc(struct fc_info *fi)
2547 int count = 0;
2548 /* Will be set to TRUE when timer expires in a PLDA environment.
2550 fi->g.port_discovery = FALSE;
2552 if (fi->node_info_list) {
2553 struct fc_node_info *temp_list = fi->node_info_list;
2554 while(temp_list) {
2555 /* Tx ADISC to all non-fabric based
2556 * entities.
2558 if ((temp_list->d_id & 0xFF0000) != 0xFF0000)
2559 tx_adisc(fi, ELS_ADISC, temp_list->d_id, OX_ID_FIRST_SEQUENCE);
2560 temp_list = temp_list->next;
2561 udelay(20);
2562 count++;
2565 /* Perform Port Discovery after timer expires.
2566 * We are giving time for the ADISCed nodes to respond
2567 * so that we dont have to perform PLOGI to those whose
2568 * login are _still_ valid.
2570 fi->explore_timer.function = port_discovery_timer;
2571 fi->explore_timer.data = (unsigned long)fi;
2572 fi->explore_timer.expires = RUN_AT((count*3*HZ)/100);
2573 init_timer(&fi->explore_timer);
2574 add_timer(&fi->explore_timer);
2577 static void explore_fabric(struct fc_info *fi, u_int *buff_addr)
2579 u_int *addr = buff_addr + 12; /* index into payload */
2580 u_char control_code;
2581 u_int d_id;
2582 int count = 0;
2583 ENTER("explore_fabric");
2584 DPRINTK1("entering explore_fabric");
2586 /*fi->g.perform_adisc = TRUE;
2587 fi->g.explore_fabric = TRUE;
2588 perform_adisc(fi);*/
2590 do {
2591 d_id = ntohl(*addr) & 0x00FFFFFF;
2592 if (d_id != fi->g.my_id) {
2593 if (sid_logged_in(fi, d_id) == NODE_NOT_PRESENT)
2594 tx_logi(fi, ELS_PLOGI, d_id);
2595 else
2596 if (sid_logged_in(fi, d_id) == NODE_LOGGED_OUT)
2597 tx_adisc(fi, ELS_ADISC, d_id, OX_ID_FIRST_SEQUENCE);
2598 count++;
2600 control_code = (ntohl(*addr) & 0xFF000000) >> 24;
2601 addr++;
2602 DPRINTK1("cc = %x, d_id = %x", control_code, d_id);
2603 } while (control_code != 0x80);
2605 fi->explore_timer.function = fabric_explore_timer;
2606 fi->explore_timer.data = (unsigned long)fi;
2607 /* We give 30 msec for each device to respond and then send out
2608 * our SCSI enquiries.
2610 fi->explore_timer.expires = RUN_AT((count*3*HZ)/100);
2611 init_timer(&fi->explore_timer);
2612 add_timer(&fi->explore_timer);
2614 DPRINTK1("leaving explore_fabric");
2615 LEAVE("explore_fabric");
2618 static void fabric_explore_timer(unsigned long data)
2620 struct fc_info *fi = (struct fc_info*)data;
2621 del_timer(&fi->explore_timer);
2623 if ((fi->g.loop_up == TRUE) && (fi->g.ptp_up == FALSE)) {
2624 /* Initiate Local Port Discovery on the Local Loop.
2626 fi->g.port_discovery = TRUE;
2627 fi->g.alpa_list_index = 1;
2628 local_port_discovery(fi);
2630 fi->g.explore_fabric = FALSE;
2631 return;
2634 static void port_discovery_timer(unsigned long data)
2636 struct fc_info *fi = (struct fc_info*)data;
2637 del_timer(&fi->explore_timer);
2639 if ((fi->g.loop_up == TRUE) && (fi->g.explore_fabric != TRUE)) {
2640 fi->g.port_discovery = TRUE;
2641 fi->g.alpa_list_index = 1;
2642 local_port_discovery(fi);
2644 fi->g.perform_adisc = FALSE;
2645 return;
2648 static void add_to_ox_id_list(struct fc_info *fi, u_int transaction_id, u_int cmnd_code)
2650 struct ox_id_els_map *p, *q = fi->ox_id_list, *r = NULL;
2651 int size = sizeof(struct ox_id_els_map);
2652 while (q != NULL) {
2653 r = q;
2654 q = q->next;
2656 p = (struct ox_id_els_map *)kmalloc(size, GFP_ATOMIC);
2657 if (p == NULL) {
2658 T_MSG("kmalloc failed in add_to_ox_id_list()");
2659 return;
2661 p->ox_id = transaction_id;
2662 p->els = cmnd_code;
2663 p->next = NULL;
2664 if (fi->ox_id_list == NULL)
2665 fi->ox_id_list = p;
2666 else
2667 r->next = p;
2668 return;
2671 static u_int remove_from_ox_id_list(struct fc_info *fi, u_short received_ox_id)
2673 struct ox_id_els_map *p = fi->ox_id_list, *q = fi->ox_id_list;
2674 u_int els_type;
2675 while (q != NULL) {
2676 if (q->ox_id == received_ox_id) {
2678 if (q == fi->ox_id_list)
2679 fi->ox_id_list = fi->ox_id_list->next;
2680 else
2681 if (q->next == NULL)
2682 p->next = NULL;
2683 else
2684 p->next = q->next;
2686 els_type = q->els;
2687 kfree(q);
2688 return els_type;
2690 p = q;
2691 q = q->next;
2693 if (q == NULL)
2694 DPRINTK2("Could not find ox_id %x in ox_id_els_map", received_ox_id);
2695 return 0;
2698 static void build_tachyon_header(struct fc_info *fi, u_int my_id, u_int r_ctl, u_int d_id, u_int type, u_char seq_id, u_char df_ctl, u_short ox_id, u_short rx_id, char *data)
2700 u_char alpa = d_id & 0x0000FF;
2701 u_int dest_ddaa = d_id &0xFFFF00;
2703 ENTER("build_tachyon_header");
2704 DPRINTK("d_id = %x, my_ddaa = %x", d_id, fi->g.my_ddaa);
2705 /* Does it have to go to/thru a Fabric? */
2706 if ((dest_ddaa != 0) && ((d_id == F_PORT) || (fi->g.fabric_present && (dest_ddaa != fi->g.my_ddaa))))
2707 alpa = 0x00;
2708 fi->g.tach_header.resv = 0x00000000;
2709 fi->g.tach_header.sof_and_eof = SOFI3 | EOFN;
2710 fi->g.tach_header.dest_alpa = alpa;
2711 /* Set LCr properly to have enuff credit */
2712 if (alpa == REPLICATE)
2713 fi->g.tach_header.lcr_and_time_stamp = htons(0xC00);/* LCr=3 */
2714 else
2715 fi->g.tach_header.lcr_and_time_stamp = 0;
2716 fi->g.tach_header.r_ctl_and_d_id = htonl(r_ctl | d_id);
2717 fi->g.tach_header.vc_id_and_s_id = htonl(my_id);
2718 fi->g.tach_header.type_and_f_cntl = htonl(type);
2719 fi->g.tach_header.seq_id = seq_id;
2720 fi->g.tach_header.df_cntl = df_ctl;
2721 fi->g.tach_header.seq_cnt = 0;
2722 fi->g.tach_header.ox_id = htons(ox_id);
2723 fi->g.tach_header.rx_id = htons(rx_id);
2724 fi->g.tach_header.ro = 0;
2725 if (data) {
2726 /* We use the Seq_Count to keep track of IP frames in the
2727 * OCI_interrupt handler. Initial Seq_Count of IP frames is 1.
2729 if (fi->g.type_of_frame == FC_BROADCAST)
2730 fi->g.tach_header.seq_cnt = htons(0x1);
2731 else
2732 fi->g.tach_header.seq_cnt = htons(0x2);
2733 fi->g.tach_header.nw_header.d_naa = htons(0x1000);
2734 fi->g.tach_header.nw_header.s_naa = htons(0x1000);
2735 memcpy(&(fi->g.tach_header.nw_header.dest_high), data, 2);
2736 memcpy(&(fi->g.tach_header.nw_header.dest_low), data + 2, 4);
2737 memcpy(&(fi->g.tach_header.nw_header.source_high), data + 6, 2);
2738 memcpy(&(fi->g.tach_header.nw_header.source_low), data + 8, 4);
2740 LEAVE("build_tachyon_header");
2743 static void build_EDB(struct fc_info *fi, char *data, u_short flags, u_short len)
2745 fi->g.edb.buf_addr = ntohl((u_int)virt_to_bus(data));
2746 fi->g.edb.ehf = ntohs(flags);
2747 if (len % 4)
2748 len += (4 - (len % 4));
2749 fi->g.edb.buf_len = ntohs(len);
2752 static void build_ODB(struct fc_info *fi, u_char seq_id, u_int d_id, u_int len, u_int cntl, u_short mtu, u_short ox_id, u_short rx_id, int NW_header, int int_required, u_int frame_class)
2754 fi->g.odb.seq_d_id = htonl(seq_id << 24 | d_id);
2755 fi->g.odb.tot_len = len;
2756 if (NW_header)
2757 fi->g.odb.tot_len += NW_HEADER_LEN;
2758 if (fi->g.odb.tot_len % 4)
2759 fi->g.odb.tot_len += (4 - (fi->g.odb.tot_len % 4));
2760 fi->g.odb.tot_len = htonl(fi->g.odb.tot_len);
2761 switch(int_required) {
2762 case NO_COMP_AND_INT:
2763 fi->g.odb.cntl = htons(ODB_CLASS_3 | ODB_EE_CREDIT | ODB_NO_INT | ODB_NO_COMP | cntl);
2764 break;
2765 case INT_AND_COMP_REQ:
2766 fi->g.odb.cntl = htons(ODB_CLASS_3 | ODB_EE_CREDIT | cntl);
2767 break;
2768 case NO_INT_COMP_REQ:
2769 fi->g.odb.cntl = htons(ODB_CLASS_3 | ODB_EE_CREDIT | ODB_NO_INT | cntl);
2770 break;
2772 fi->g.odb.rx_id = htons(rx_id);
2773 fi->g.odb.cs_enable = 0;
2774 fi->g.odb.cs_seed = htons(1);
2776 fi->g.odb.hdr_addr = htonl(virt_to_bus(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx]));
2777 fi->g.odb.frame_len = htons(mtu);
2779 if (NW_header) {
2780 /* The pointer to the sk_buff is in here. Freed up when the
2781 * OCI_interrupt is received.
2783 fi->g.odb.trans_id = htonl(frame_class);
2784 fi->g.odb.hdr_len = TACHYON_HEADER_LEN + NW_HEADER_LEN;
2786 else {
2787 /* helps in tracking transmitted OX_IDs */
2788 fi->g.odb.trans_id = htonl((frame_class & 0xFFFF0000) | ox_id);
2789 fi->g.odb.hdr_len = TACHYON_HEADER_LEN;
2791 fi->g.odb.hdr_len = htons(fi->g.odb.hdr_len);
2793 fi->g.odb.edb_addr = htonl(virt_to_bus(fi->q.ptr_edb[fi->q.edb_buffer_indx]));
2796 static void fill_login_frame(struct fc_info *fi, u_int logi)
2798 int i;
2799 fi->g.login.ls_cmnd_code= htonl(logi);
2800 fi->g.login.fc_ph_version = htons(PH_VERSION);
2801 if (fi->g.loop_up)
2802 fi->g.login.buff_to_buff_credit = htons(LOOP_BB_CREDIT);
2803 else
2804 if (fi->g.ptp_up)
2805 fi->g.login.buff_to_buff_credit = htons(PT2PT_BB_CREDIT);
2806 if ((logi != ELS_FLOGI) || (logi == ELS_ACC))
2807 fi->g.login.common_features = htons(PLOGI_C_F);
2808 else
2809 if (logi == ELS_FLOGI)
2810 fi->g.login.common_features = htons(FLOGI_C_F);
2811 fi->g.login.recv_data_field_size = htons(FRAME_SIZE);
2812 fi->g.login.n_port_total_conc_seq = htons(CONCURRENT_SEQUENCES);
2813 fi->g.login.rel_off_by_info_cat = htons(RO_INFO_CATEGORY);
2814 fi->g.login.ED_TOV = htonl(E_D_TOV);
2815 fi->g.login.n_port_name_high = htonl(N_PORT_NAME_HIGH);
2816 fi->g.login.n_port_name_low = htonl(N_PORT_NAME_LOW);
2817 fi->g.login.node_name_high = htonl(NODE_NAME_HIGH);
2818 fi->g.login.node_name_low = htonl(NODE_NAME_LOW);
2820 /* Fill Class 1 parameters */
2821 fi->g.login.c_of_s[0].service_options = htons(0);
2822 fi->g.login.c_of_s[0].initiator_ctl = htons(0);
2823 fi->g.login.c_of_s[0].recipient_ctl = htons(0);
2824 fi->g.login.c_of_s[0].recv_data_field_size = htons(0);
2825 fi->g.login.c_of_s[0].concurrent_sequences = htons(0);
2826 fi->g.login.c_of_s[0].n_port_end_to_end_credit = htons(0);
2827 fi->g.login.c_of_s[0].open_seq_per_exchange = htons(0);
2828 fi->g.login.c_of_s[0].resv = htons(0);
2830 /* Fill Class 2 parameters */
2831 fi->g.login.c_of_s[1].service_options = htons(0);
2832 fi->g.login.c_of_s[1].initiator_ctl = htons(0);
2833 fi->g.login.c_of_s[1].recipient_ctl = htons(0);
2834 fi->g.login.c_of_s[1].recv_data_field_size = htons(0);
2835 fi->g.login.c_of_s[1].concurrent_sequences = htons(0);
2836 fi->g.login.c_of_s[1].n_port_end_to_end_credit = htons(0);
2837 fi->g.login.c_of_s[1].open_seq_per_exchange = htons(0);
2838 fi->g.login.c_of_s[1].resv = htons(0);
2840 /* Fill Class 3 parameters */
2841 if (logi == ELS_FLOGI)
2842 fi->g.login.c_of_s[2].service_options = htons(SERVICE_VALID | SEQUENCE_DELIVERY);
2843 else
2844 fi->g.login.c_of_s[2].service_options = htons(SERVICE_VALID);
2845 fi->g.login.c_of_s[2].initiator_ctl = htons(0);
2846 fi->g.login.c_of_s[2].recipient_ctl = htons(0);
2847 fi->g.login.c_of_s[2].recv_data_field_size = htons(FRAME_SIZE);
2848 fi->g.login.c_of_s[2].concurrent_sequences = htons(CLASS3_CONCURRENT_SEQUENCE);
2849 fi->g.login.c_of_s[2].n_port_end_to_end_credit = htons(0);
2850 fi->g.login.c_of_s[2].open_seq_per_exchange = htons(CLASS3_OPEN_SEQUENCE);
2851 fi->g.login.c_of_s[2].resv = htons(0);
2853 for(i = 0; i < 4; i++) {
2854 fi->g.login.resv[i] = 0;
2855 fi->g.login.vendor_version_level[i] = 0;
2860 /* clear the Interrupt Latch on the (i)chip, so that you can receive
2861 * Interrupts from Tachyon in future
2863 static void reset_latch(struct fc_info *fi)
2865 writel(readl(fi->i_r.ptr_ichip_hw_status_reg) | ICHIP_HSR_INT_LATCH, fi->i_r.ptr_ichip_hw_status_reg);
2868 static void update_OCQ_indx(struct fc_info *fi)
2870 fi->q.ocq_prod_indx++;
2871 if (fi->q.ocq_prod_indx == OCQ_LENGTH)
2872 fi->q.ocq_prod_indx = 0;
2873 writel(fi->q.ocq_prod_indx, fi->t_r.ptr_ocq_prod_indx_reg);
2876 static void update_IMQ_indx(struct fc_info *fi, int count)
2878 fi->q.imq_cons_indx += count;
2879 if (fi->q.imq_cons_indx >= IMQ_LENGTH)
2880 fi->q.imq_cons_indx -= IMQ_LENGTH;
2881 writel(fi->q.imq_cons_indx, fi->t_r.ptr_imq_cons_indx_reg);
2884 static void update_SFSBQ_indx(struct fc_info *fi)
2886 fi->q.sfsbq_prod_indx++;
2887 if (fi->q.sfsbq_prod_indx == SFSBQ_LENGTH)
2888 fi->q.sfsbq_prod_indx = 0;
2889 writel(fi->q.sfsbq_prod_indx, fi->t_r.ptr_sfsbq_prod_reg);
2892 static void update_MFSBQ_indx(struct fc_info *fi, int count)
2894 fi->q.mfsbq_prod_indx += count;
2895 if (fi->q.mfsbq_prod_indx >= MFSBQ_LENGTH)
2896 fi->q.mfsbq_prod_indx -= MFSBQ_LENGTH;
2897 writel(fi->q.mfsbq_prod_indx, fi->t_r.ptr_mfsbq_prod_reg);
2901 static void update_tachyon_header_indx(struct fc_info *fi)
2903 fi->q.tachyon_header_indx++;
2904 if (fi->q.tachyon_header_indx == NO_OF_TACH_HEADERS)
2905 fi->q.tachyon_header_indx = 0;
2908 static void update_EDB_indx(struct fc_info *fi)
2910 fi->q.edb_buffer_indx++;
2911 if (fi->q.edb_buffer_indx == EDB_LEN)
2912 fi->q.edb_buffer_indx = 0;
2915 static int iph5526_open(struct net_device *dev)
2917 dev->tbusy = 0;
2918 dev->interrupt = 0;
2919 dev->start = 1;
2920 MOD_INC_USE_COUNT;
2921 return 0;
2924 static int iph5526_close(struct net_device *dev)
2926 dev->tbusy = 1;
2927 dev->start = 0;
2928 MOD_DEC_USE_COUNT;
2929 return 0;
2932 static int iph5526_send_packet(struct sk_buff *skb, struct net_device *dev)
2934 struct fc_info *fi = (struct fc_info*)dev->priv;
2935 int status = 0;
2936 short type = 0;
2937 u_long flags;
2938 ENTER("iph5526_send_packet");
2939 if (dev->tbusy) {
2940 printk(KERN_WARNING "%s: DEVICE BUSY\n", dev->name);
2941 dev->tbusy = 0;
2942 fi->fc_stats.rx_dropped++;
2943 dev->trans_start = jiffies;
2944 return 0;
2946 if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
2947 printk(KERN_WARNING "%s: Transmitter access conflict.\n",
2948 dev->name);
2949 fi->fc_stats.rx_dropped++;
2950 return 1;
2952 else {
2953 struct fcllc *fcllc;
2954 /* Strip off the pseudo header.
2956 skb->data = skb->data + 2*FC_ALEN;
2957 skb->len = skb->len - 2*FC_ALEN;
2958 fcllc = (struct fcllc *)skb->data;
2959 type = ntohs(fcllc->ethertype);
2961 spin_lock_irqsave(&fi->fc_lock, flags);
2962 switch(type) {
2963 case ETH_P_IP:
2964 status = tx_ip_packet(skb, skb->len, fi);
2965 break;
2966 case ETH_P_ARP:
2967 status = tx_arp_packet(skb->data, skb->len, fi);
2968 break;
2969 default:
2970 T_MSG("WARNING!!! Received Unknown Packet Type... Discarding...");
2971 fi->fc_stats.rx_dropped++;
2972 break;
2974 spin_unlock_irqrestore(&fi->fc_lock, flags);
2977 if (status) {
2978 fi->fc_stats.tx_bytes += skb->len;
2979 fi->fc_stats.tx_packets++;
2981 else
2982 fi->fc_stats.rx_dropped++;
2983 dev->trans_start = jiffies;
2984 dev->tbusy = 0;
2985 /* We free up the IP buffers in the OCI_interrupt handler.
2986 * status == 0 implies that the frame was not transmitted. So the
2987 * skb is freed here.
2989 if ((type == ETH_P_ARP) || (status == 0))
2990 dev_kfree_skb(skb);
2991 mark_bh(NET_BH);
2992 LEAVE("iph5526_send_packet");
2993 return 0;
2996 static int iph5526_change_mtu(struct net_device *dev, int mtu)
2998 return 0;
3001 static int tx_ip_packet(struct sk_buff *skb, unsigned long len, struct fc_info *fi)
3003 u_int d_id;
3004 int int_required = 1;
3005 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_DATA;
3006 u_int type = TYPE_LLC_SNAP;
3007 u_short ox_id = OX_ID_FIRST_SEQUENCE;
3008 u_int mtu;
3009 struct fc_node_info *q;
3011 ENTER("tx_ip_packet");
3012 q = look_up_cache(fi, skb->data - 2*FC_ALEN);
3013 if (q != NULL) {
3014 d_id = q->d_id;
3015 DPRINTK("Look-Up Cache Succeeded for d_id = %x", d_id);
3016 mtu = q->mtu;
3017 if (q->login == LOGIN_COMPLETED){
3018 fi->g.type_of_frame = FC_IP;
3019 return tx_exchange(fi, skb->data, len, r_ctl, type, d_id, mtu, int_required, ox_id, virt_to_bus(skb));
3022 if (q->d_id == BROADCAST) {
3023 struct fc_node_info *p = fi->node_info_list;
3024 int return_value = FALSE;
3025 fi->g.type_of_frame = FC_BROADCAST;
3026 /* Do unicast to local nodes.
3028 int_required = 0;
3029 while(p != NULL) {
3030 d_id = p->d_id;
3031 if ((d_id & 0xFFFF00) == fi->g.my_ddaa)
3032 return_value |= tx_exchange(fi, skb->data, len, r_ctl, type, d_id, fi->g.my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3033 p = p->next;
3035 kfree(q);
3036 return return_value;
3039 if (q->login != LOGIN_COMPLETED) {
3040 DPRINTK1("Node not logged in... Txing PLOGI to %x", d_id);
3041 /* FIXME: we are dumping the frame here */
3042 tx_logi(fi, ELS_PLOGI, d_id);
3045 DPRINTK2("Look-Up Cache Failed");
3046 LEAVE("tx_ip_packet");
3047 return 0;
3050 static int tx_arp_packet(char *data, unsigned long len, struct fc_info *fi)
3052 u_int opcode = data[ARP_OPCODE_0];
3053 u_int d_id;
3054 int int_required = 0, return_value = FALSE;
3055 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_DATA;
3056 u_int type = TYPE_LLC_SNAP;
3057 u_short ox_id = OX_ID_FIRST_SEQUENCE;
3058 u_int my_mtu = fi->g.my_mtu;
3059 ENTER("tx_arp_packet");
3061 opcode = opcode << 8 | data[ARP_OPCODE_1];
3062 fi->g.type_of_frame = FC_IP;
3064 if (opcode == ARPOP_REQUEST) {
3065 struct fc_node_info *q = fi->node_info_list;
3066 d_id = BROADCAST;
3067 return_value |= tx_exchange(fi, data, len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3068 /* Some devices support HW_TYPE 0x01 */
3069 memcpy(fi->g.arp_buffer, data - 2*FC_ALEN, len + 2*FC_ALEN);
3070 fi->g.arp_buffer[9 + 2*FC_ALEN] = 0x01;
3071 return_value |= tx_exchange(fi, (char *)(fi->g.arp_buffer + 2*FC_ALEN), len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3073 /* Do unicast to local nodes.
3075 while(q != NULL) {
3076 fi->g.type_of_frame = FC_BROADCAST;
3077 d_id = q->d_id;
3078 if ((d_id & 0xFFFF00) == fi->g.my_ddaa) {
3079 return_value |= tx_exchange(fi, data, len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3080 // Some devices support HW_TYPE 0x01
3081 memcpy(fi->g.arp_buffer, data - 2*FC_ALEN, len + 2*FC_ALEN);
3082 fi->g.arp_buffer[9 + 2*FC_ALEN] = 0x01;
3083 return_value |= tx_exchange(fi, (char *)(fi->g.arp_buffer + 2*FC_ALEN), len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3085 q = q->next;
3087 return return_value;
3089 else
3090 if (opcode == ARPOP_REPLY) {
3091 struct fc_node_info *q; u_int mtu;
3092 DPRINTK("We are sending out an ARP reply");
3093 q = look_up_cache(fi, data - 2*FC_ALEN);
3094 if (q != NULL) {
3095 d_id = q->d_id;
3096 DPRINTK("Look-Up Cache Succeeded for d_id = %x", d_id);
3097 mtu = q->mtu;
3098 if (q->login == LOGIN_COMPLETED){
3099 tx_exchange(fi, data, len, r_ctl, type, d_id, mtu, int_required, ox_id, TYPE_LLC_SNAP);
3100 /* Some devices support HW_TYPE 0x01 */
3101 memcpy(fi->g.arp_buffer, data - 2*FC_ALEN, len + 2*FC_ALEN);
3102 fi->g.arp_buffer[9 + 2*FC_ALEN] = 0x01;
3103 return tx_exchange(fi, (char *)(fi->g.arp_buffer + 2*FC_ALEN), len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3105 else {
3106 DPRINTK1("Node not logged in... Txing PLOGI to %x", d_id);
3107 tx_logi(fi, ELS_PLOGI, d_id); /* FIXME: we are dumping the frame here */
3110 DPRINTK2("Look-Up Cache Failed");
3112 else {
3113 T_MSG("Warning!!! Invalid Opcode in ARP Packet!");
3115 LEAVE("tx_arp_packet");
3116 return 0;
3120 static void rx_net_packet(struct fc_info *fi, u_char *buff_addr, int payload_size)
3122 struct net_device *dev = fi->dev;
3123 struct sk_buff *skb;
3124 u_int skb_size = 0;
3125 struct fch_hdr fch;
3126 ENTER("rx_net_packet");
3127 skb_size = payload_size - TACHYON_HEADER_LEN;
3128 DPRINTK("skb_size = %d", skb_size);
3129 fi->fc_stats.rx_bytes += skb_size - 2;
3130 skb = dev_alloc_skb(skb_size);
3131 if (skb == NULL) {
3132 printk(KERN_NOTICE "%s: In rx_net_packet() Memory squeeze, dropping packet.\n", dev->name);
3133 fi->fc_stats.rx_dropped++;
3134 return;
3136 /* Skip over the Tachyon Frame Header.
3138 buff_addr += TACHYON_HEADER_LEN;
3140 memcpy(fch.daddr, buff_addr + 2, FC_ALEN);
3141 memcpy(fch.saddr, buff_addr + 10, FC_ALEN);
3142 buff_addr += 2;
3143 memcpy(buff_addr, fch.daddr, FC_ALEN);
3144 memcpy(buff_addr + 6, fch.saddr, FC_ALEN);
3145 skb_reserve(skb, 2);
3146 memcpy(skb_put(skb, skb_size - 2), buff_addr, skb_size - 2);
3147 skb->dev = dev;
3148 skb->protocol = fc_type_trans(skb, dev);
3149 DPRINTK("protocol = %x", skb->protocol);
3151 /* Hmmm... to accept HW Type 0x01 as well...
3153 if (skb->protocol == ntohs(ETH_P_ARP))
3154 skb->data[1] = 0x06;
3155 netif_rx(skb);
3156 fi->fc_stats.rx_packets++;
3157 LEAVE("rx_net_packet");
3161 static void rx_net_mfs_packet(struct fc_info *fi, struct sk_buff *skb)
3163 struct net_device *dev = fi->dev;
3164 struct fch_hdr fch;
3165 ENTER("rx_net_mfs_packet");
3166 /* Construct your Hard Header */
3167 memcpy(fch.daddr, skb->data + 2, FC_ALEN);
3168 memcpy(fch.saddr, skb->data + 10, FC_ALEN);
3169 skb_pull(skb, 2);
3170 memcpy(skb->data, fch.daddr, FC_ALEN);
3171 memcpy(skb->data + 6, fch.saddr, FC_ALEN);
3172 skb->dev = dev;
3173 skb->protocol = fc_type_trans(skb, dev);
3174 DPRINTK("protocol = %x", skb->protocol);
3175 netif_rx(skb);
3176 LEAVE("rx_net_mfs_packet");
3179 unsigned short fc_type_trans(struct sk_buff *skb, struct net_device *dev)
3181 struct fch_hdr *fch=(struct fch_hdr *)skb->data;
3182 struct fcllc *fcllc;
3183 skb->mac.raw = skb->data;
3184 fcllc = (struct fcllc *)(skb->data + sizeof(struct fch_hdr) + 2);
3185 skb_pull(skb,sizeof(struct fch_hdr) + 2);
3187 if(*fch->daddr & 1) {
3188 if(!memcmp(fch->daddr,dev->broadcast,FC_ALEN))
3189 skb->pkt_type = PACKET_BROADCAST;
3190 else
3191 skb->pkt_type = PACKET_MULTICAST;
3193 else if(dev->flags & IFF_PROMISC) {
3194 if(memcmp(fch->daddr, dev->dev_addr, FC_ALEN))
3195 skb->pkt_type=PACKET_OTHERHOST;
3198 /* Strip the SNAP header from ARP packets since we don't
3199 * pass them through to the 802.2/SNAP layers.
3202 if (fcllc->dsap == EXTENDED_SAP &&
3203 (fcllc->ethertype == ntohs(ETH_P_IP) ||
3204 fcllc->ethertype == ntohs(ETH_P_ARP))) {
3205 skb_pull(skb, sizeof(struct fcllc));
3206 return fcllc->ethertype;
3208 return ntohs(ETH_P_802_2);
3211 static int tx_exchange(struct fc_info *fi, char *data, u_int len, u_int r_ctl, u_int type, u_int d_id, u_int mtu, int int_required, u_short tx_ox_id, u_int frame_class)
3213 u_char df_ctl;
3214 int NW_flag = 0, h_size, return_value;
3215 u_short rx_id = RX_ID_FIRST_SEQUENCE;
3216 u_int tachyon_status;
3217 u_int my_id = fi->g.my_id;
3218 ENTER("tx_exchange");
3220 tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
3221 DPRINTK("Tachyon Status = %x len = %d MTU = %d", tachyon_status, len, mtu);
3222 if (tachyon_status & OSM_FROZEN) {
3223 reset_tachyon(fi, ERROR_RELEASE);
3224 reset_tachyon(fi, OCQ_RESET);
3225 DPRINTK("Tachyon Status = %x len = %d MTU = %d", tachyon_status, len, mtu);
3227 if (tx_ox_id == OX_ID_FIRST_SEQUENCE) {
3228 switch(fi->g.type_of_frame) {
3229 case FC_SCSI_READ:
3230 tx_ox_id = fi->g.scsi_oxid | SCSI_READ_BIT;
3231 break;
3232 case FC_SCSI_WRITE:
3233 tx_ox_id = fi->g.scsi_oxid;
3234 break;
3235 default:
3236 tx_ox_id = fi->g.ox_id;
3237 break;
3240 else {
3241 switch(fi->g.type_of_frame) {
3242 case FC_SCSI_READ:
3243 rx_id = fi->g.scsi_oxid | SCSI_READ_BIT;
3244 break;
3245 case FC_SCSI_WRITE:
3246 rx_id = fi->g.scsi_oxid;
3247 break;
3248 case FC_BLS:
3249 rx_id = RX_ID_FIRST_SEQUENCE;
3250 break;
3251 default:
3252 rx_id = fi->g.ox_id;
3253 break;
3257 if (type == TYPE_LLC_SNAP) {
3258 df_ctl = 0x20;
3259 NW_flag = 1;
3260 /* Multi Frame Sequence ? If yes, set RO bit */
3261 if (len > mtu)
3262 type |= RELATIVE_OFF_PRESENT;
3263 build_tachyon_header(fi, my_id, r_ctl, d_id, type, fi->g.seq_id, df_ctl, tx_ox_id, rx_id, data - 2*FC_ALEN);
3265 else {
3266 df_ctl = 0;
3267 /* Multi Frame Sequence ? If yes, set RO bit */
3268 if (len > mtu)
3269 type |= RELATIVE_OFF_PRESENT;
3270 build_tachyon_header(fi, my_id, r_ctl, d_id, type, fi->g.seq_id, df_ctl, tx_ox_id, rx_id, NULL);
3273 /* Get free Tachyon Headers and EDBs */
3274 if (get_free_header(fi) || get_free_EDB(fi))
3275 return 0;
3277 if ((type & 0xFF000000) == TYPE_LLC_SNAP) {
3278 h_size = TACHYON_HEADER_LEN + NW_HEADER_LEN;
3279 memcpy(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx], &(fi->g.tach_header), h_size);
3281 else
3282 memcpy(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx], &(fi->g.tach_header), TACHYON_HEADER_LEN);
3284 return_value = tx_sequence(fi, data, len, mtu, d_id, tx_ox_id, rx_id, fi->g.seq_id, NW_flag, int_required, frame_class);
3286 switch(fi->g.type_of_frame) {
3287 case FC_SCSI_READ:
3288 case FC_SCSI_WRITE:
3289 update_scsi_oxid(fi);
3290 break;
3291 case FC_BLS:
3292 break;
3293 default:
3294 fi->g.ox_id++;
3295 if (fi->g.ox_id == 0xFFFF)
3296 fi->g.ox_id = NOT_SCSI_XID;
3297 break;
3300 if (fi->g.seq_id == MAX_SEQ_ID)
3301 fi->g.seq_id = 0;
3302 else
3303 fi->g.seq_id++;
3304 LEAVE("tx_exchange");
3305 return return_value;
3308 static int tx_sequence(struct fc_info *fi, char *data, u_int len, u_int mtu, u_int d_id, u_short ox_id, u_short rx_id, u_char seq_id, int NW_flag, int int_required, u_int frame_class)
3310 u_int cntl = 0;
3311 int return_value;
3312 ENTER("tx_sequence");
3313 build_EDB(fi, data, EDB_END, len);
3314 memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
3315 build_ODB(fi, seq_id, d_id, len, cntl, mtu, ox_id, rx_id, NW_flag, int_required, frame_class);
3316 memcpy(fi->q.ptr_odb[fi->q.ocq_prod_indx], &(fi->g.odb), sizeof(ODB));
3317 if (fi->g.link_up != TRUE) {
3318 DPRINTK2("Fibre Channel Link not up. Dropping Exchange!");
3319 return_value = FALSE;
3321 else {
3322 /* To be on the safe side, a check should be included
3323 * at this point to check if we are overrunning
3324 * Tachyon.
3326 update_OCQ_indx(fi);
3327 return_value = TRUE;
3329 update_EDB_indx(fi);
3330 update_tachyon_header_indx(fi);
3331 LEAVE("tx_sequence");
3332 return return_value;
3335 static int get_free_header(struct fc_info *fi)
3337 u_short temp_ox_id;
3338 u_int *tach_header, initial_indx = fi->q.tachyon_header_indx;
3339 /* Check if the header is in use.
3340 * We could have an outstanding command.
3341 * We should find a free slot as we can queue a
3342 * maximum of 32 SCSI commands only.
3344 tach_header = fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx];
3345 temp_ox_id = ntohl(*(tach_header + 6)) >> 16;
3346 /* We care about the SCSI writes only. Those are the wicked ones
3347 * that need an additional set of buffers.
3349 while(temp_ox_id <= MAX_SCSI_XID) {
3350 update_tachyon_header_indx(fi);
3351 if (fi->q.tachyon_header_indx == initial_indx) {
3352 /* Should never happen.
3354 T_MSG("No free Tachyon headers available");
3355 reset_tachyon(fi, SOFTWARE_RESET);
3356 return 1;
3358 tach_header = fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx];
3359 temp_ox_id = ntohl(*(tach_header + 6)) >> 16;
3361 return 0;
3364 static int get_free_EDB(struct fc_info *fi)
3366 unsigned int initial_indx = fi->q.edb_buffer_indx;
3367 /* Check if the EDB is in use.
3368 * We could have an outstanding SCSI Write command.
3369 * We should find a free slot as we can queue a
3370 * maximum of 32 SCSI commands only.
3372 while (fi->q.free_edb_list[fi->q.edb_buffer_indx] != EDB_FREE) {
3373 update_EDB_indx(fi);
3374 if (fi->q.edb_buffer_indx == initial_indx) {
3375 T_MSG("No free EDB buffers avaliable")
3376 reset_tachyon(fi, SOFTWARE_RESET);
3377 return 1;
3380 return 0;
3383 static int validate_login(struct fc_info *fi, u_int *base_ptr)
3385 struct fc_node_info *q = fi->node_info_list;
3386 char n_port_name[PORT_NAME_LEN];
3387 char node_name[NODE_NAME_LEN];
3388 u_int s_id;
3389 ENTER("validate_login");
3390 /*index to Port Name in the payload. We need the 8 byte Port Name */
3391 memcpy(n_port_name, base_ptr + 10, PORT_NAME_LEN);
3392 memcpy(node_name, base_ptr + 12, NODE_NAME_LEN);
3393 s_id = ntohl(*(base_ptr + 3)) & 0x00FFFFFF;
3395 /* check if Fibre Channel IDs have changed */
3396 while(q != NULL) {
3397 if (memcmp(n_port_name, q->hw_addr, PORT_NAME_LEN) == 0) {
3398 if ((s_id != q->d_id) || (memcmp(node_name, q->node_name, NODE_NAME_LEN) != 0)) {
3399 DPRINTK1("Fibre Channel ID of Node has changed. Txing LOGO.");
3400 return 0;
3402 q->login = LOGIN_COMPLETED;
3403 #if DEBUG_5526_2
3404 display_cache(fi);
3405 #endif
3406 return 1;
3408 q = q->next;
3410 DPRINTK1("Port Name does not match. Txing LOGO.");
3411 return 0;
3412 LEAVE("validate_login");
3415 static void add_to_address_cache(struct fc_info *fi, u_int *base_ptr)
3417 int size = sizeof(struct fc_node_info);
3418 struct fc_node_info *p, *q = fi->node_info_list, *r = NULL;
3419 char n_port_name[PORT_NAME_LEN];
3420 u_int s_id;
3421 ENTER("add_to_address_cache");
3422 /*index to Port Name in the payload. We need the 8 byte Port Name */
3423 memcpy(n_port_name, base_ptr + 13, PORT_NAME_LEN);
3424 s_id = ntohl(*(base_ptr + 3)) & 0x00FFFFFF;
3426 /* check if info already exists */
3427 while(q != NULL) {
3428 if (memcmp(n_port_name, q->hw_addr, PORT_NAME_LEN) == 0) {
3429 if (s_id != q->d_id) {
3430 memcpy(&(q->c_of_s[0]), base_ptr + 17, 3 * sizeof(CLASS_OF_SERVICE));
3431 q->mtu = ntohl(*(base_ptr + 10)) & 0x00000FFF;
3432 q->d_id = s_id;
3433 memcpy(q->node_name, base_ptr + 15, NODE_NAME_LEN);
3435 q->login = LOGIN_COMPLETED;
3436 q->scsi = FALSE;
3437 fi->num_nodes++;
3438 #if DEBUG_5526_2
3439 display_cache(fi);
3440 #endif
3441 return;
3443 r = q;
3444 q = q->next;
3446 p = (struct fc_node_info *)kmalloc(size, GFP_ATOMIC);
3447 if (p == NULL) {
3448 T_MSG("kmalloc failed in add_to_address_cache()");
3449 return;
3451 memcpy(&(p->c_of_s[0]), base_ptr + 17, 3 * sizeof(CLASS_OF_SERVICE));
3452 p->mtu = ntohl(*(base_ptr + 10)) & 0x00000FFF;
3453 p->d_id = s_id;
3454 memcpy(p->hw_addr, base_ptr + 13, PORT_NAME_LEN);
3455 memcpy(p->node_name, base_ptr + 15, NODE_NAME_LEN);
3456 p->login = LOGIN_COMPLETED;
3457 p->scsi = FALSE;
3458 p->target_id = 0xFF;
3459 p->next = NULL;
3460 if (fi->node_info_list == NULL)
3461 fi->node_info_list = p;
3462 else
3463 r->next = p;
3464 fi->num_nodes++;
3465 #if DEBUG_5526_2
3466 display_cache(fi);
3467 #endif
3468 LEAVE("add_to_address_cache");
3469 return;
3472 static void remove_from_address_cache(struct fc_info *fi, u_int *base_ptr, u_int cmnd_code)
3474 struct fc_node_info *q = fi->node_info_list;
3475 u_int s_id;
3476 ENTER("remove_from_address_cache");
3477 s_id = ntohl(*(base_ptr + 3)) & 0x00FFFFFF;
3478 switch(cmnd_code) {
3479 case ELS_LOGO:
3480 /* check if info exists */
3481 while (q != NULL) {
3482 if (s_id == q->d_id) {
3483 if (q->login == LOGIN_COMPLETED)
3484 q->login = LOGIN_ATTEMPTED;
3485 if (fi->num_nodes > 0)
3486 fi->num_nodes--;
3487 #if DEBUG_5526_2
3488 display_cache(fi);
3489 #endif
3490 return;
3492 q = q->next;
3494 DPRINTK1("ELS_LOGO received from node 0x%x which is not logged-in", s_id);
3495 break;
3496 case ELS_RSCN:
3498 int payload_len = ntohl(*(base_ptr + 8)) & 0xFF;
3499 int no_of_pages, i;
3500 u_char address_format;
3501 u_short received_ox_id = ntohl(*(base_ptr + 6)) >> 16;
3502 u_int node_id, mask, *page_ptr = base_ptr + 9;
3503 if ((payload_len < 4) || (payload_len > 256)) {
3504 DPRINTK1("RSCN with invalid payload length received");
3505 tx_ls_rjt(fi, s_id, received_ox_id, LOGICAL_ERR, RECV_FIELD_SIZE);
3506 return;
3508 /* Page_size includes the Command Code */
3509 no_of_pages = (payload_len / 4) - 1;
3510 for (i = 0; i < no_of_pages; i++) {
3511 address_format = ntohl(*page_ptr) >> 24;
3512 node_id = ntohl(*page_ptr) & 0x00FFFFFF;
3513 switch(address_format) {
3514 case PORT_ADDRESS_FORMAT:
3515 rscn_handler(fi, node_id);
3516 break;
3517 case AREA_ADDRESS_FORMAT:
3518 case DOMAIN_ADDRESS_FORMAT:
3519 if (address_format == AREA_ADDRESS_FORMAT)
3520 mask = 0xFFFF00;
3521 else
3522 mask = 0xFF0000;
3523 while(q != NULL) {
3524 if ((q->d_id & mask) == (node_id & mask))
3525 rscn_handler(fi, q->d_id);
3526 q = q->next;
3528 /* There might be some new nodes to be
3529 * discovered. But, some of the earlier
3530 * requests as a result of the RSCN might be
3531 * in progress. We dont want to duplicate that
3532 * effort. So letz call SCR after a lag.
3534 fi->explore_timer.function = scr_timer;
3535 fi->explore_timer.data = (unsigned long)fi;
3536 fi->explore_timer.expires = RUN_AT((no_of_pages*3*HZ)/100);
3537 init_timer(&fi->explore_timer);
3538 add_timer(&fi->explore_timer);
3539 break;
3540 default:
3541 T_MSG("RSCN with invalid address format received");
3542 tx_ls_rjt(fi, s_id, received_ox_id, LOGICAL_ERR, NO_EXPLN);
3544 page_ptr += 1;
3545 } /* end of for loop */
3546 } /* end of case RSCN: */
3547 break;
3549 #if DEBUG_5526_2
3550 display_cache(fi);
3551 #endif
3552 LEAVE("remove_from_address_cache");
3555 static void rscn_handler(struct fc_info *fi, u_int node_id)
3557 struct fc_node_info *q = fi->node_info_list;
3558 int login_state = sid_logged_in(fi, node_id);
3559 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN)) {
3560 while(q != NULL) {
3561 if (q->d_id == node_id) {
3562 q->login = LOGIN_ATTEMPTED;
3563 if (fi->num_nodes > 0)
3564 fi->num_nodes--;
3565 break;
3567 else
3568 q = q->next;
3571 else
3572 if (login_state == NODE_LOGGED_OUT)
3573 tx_adisc(fi, ELS_ADISC, node_id, OX_ID_FIRST_SEQUENCE);
3574 else
3575 if (login_state == NODE_LOGGED_OUT)
3576 tx_logi(fi, ELS_PLOGI, node_id);
3579 static void scr_timer(unsigned long data)
3581 struct fc_info *fi = (struct fc_info *)data;
3582 del_timer(&fi->explore_timer);
3583 tx_name_server_req(fi, FCS_GP_ID4);
3586 static int sid_logged_in(struct fc_info *fi, u_int s_id)
3588 struct fc_node_info *temp = fi->node_info_list;
3589 while(temp != NULL)
3590 if ((temp->d_id == s_id) && (temp->login == LOGIN_COMPLETED)) {
3591 if (temp->scsi != FALSE)
3592 return NODE_PROCESS_LOGGED_IN;
3593 else
3594 return NODE_LOGGED_IN;
3596 else
3597 if ((temp->d_id == s_id) && (temp->login != LOGIN_COMPLETED))
3598 return NODE_LOGGED_OUT;
3599 else
3600 temp = temp->next;
3601 return NODE_NOT_PRESENT;
3604 static void mark_scsi_sid(struct fc_info *fi, u_int *buff_addr, u_char action)
3606 struct fc_node_info *temp = fi->node_info_list;
3607 u_int s_id;
3608 u_int service_params;
3609 s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
3610 service_params = ntohl(*(buff_addr + 12)) & 0x000000F0;
3611 while(temp != NULL)
3612 if ((temp->d_id == s_id) && (temp->login == LOGIN_COMPLETED)) {
3613 if (action == DELETE_ENTRY) {
3614 temp->scsi = FALSE;
3615 #if DEBUG_5526_2
3616 display_cache(fi);
3617 #endif
3618 return;
3620 /* Check if it is a SCSI Target */
3621 if (!(service_params & TARGET_FUNC)) {
3622 temp->scsi = INITIATOR;
3623 #if DEBUG_5526_2
3624 display_cache(fi);
3625 #endif
3626 return;
3628 temp->scsi = TARGET;
3629 /* This helps to maintain the target_id no matter what your
3630 * Fibre Channel ID is.
3632 if (temp->target_id == 0xFF) {
3633 if (fi->g.no_of_targets <= MAX_SCSI_TARGETS)
3634 temp->target_id = fi->g.no_of_targets++;
3635 else
3636 T_MSG("MAX TARGETS reached!");
3638 else
3639 DPRINTK1("Target_id %d already present", temp->target_id);
3640 #if DEBUG_5526_2
3641 display_cache(fi);
3642 #endif
3643 return;
3645 else
3646 temp = temp->next;
3647 return;
3650 static int node_logged_in_prev(struct fc_info *fi, u_int *buff_addr)
3652 struct fc_node_info *temp;
3653 u_char *data = (u_char *)buff_addr;
3654 u_int s_id;
3655 char node_name[NODE_NAME_LEN];
3656 s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
3657 memcpy(node_name, buff_addr + 12, NODE_NAME_LEN);
3658 /* point to port_name in the ADISC payload */
3659 data += 10 * 4;
3660 /* point to last 6 bytes of port_name */
3661 data += 2;
3662 temp = look_up_cache(fi, data);
3663 if (temp != NULL) {
3664 if ((temp->d_id == s_id) && (memcmp(node_name, temp->node_name, NODE_NAME_LEN) == 0)) {
3665 temp->login = LOGIN_COMPLETED;
3666 #if DEBUG_5526_2
3667 display_cache(fi);
3668 #endif
3669 return TRUE;
3672 return FALSE;
3675 static struct fc_node_info *look_up_cache(struct fc_info *fi, char *data)
3677 struct fc_node_info *temp_list = fi->node_info_list, *q;
3678 u_char n_port_name[FC_ALEN], temp_addr[FC_ALEN];
3679 ENTER("look_up_cache");
3680 memcpy(n_port_name, data, FC_ALEN);
3681 while(temp_list) {
3682 if (memcmp(n_port_name, &(temp_list->hw_addr[2]), FC_ALEN) == 0)
3683 return temp_list;
3684 else
3685 temp_list = temp_list->next;
3688 /* Broadcast IP ?
3690 temp_addr[0] = temp_addr[1] = temp_addr[2] = 0xFF;
3691 temp_addr[3] = temp_addr[4] = temp_addr[5] = 0xFF;
3692 if (memcmp(n_port_name, temp_addr, FC_ALEN) == 0) {
3693 q = (struct fc_node_info *)kmalloc(sizeof(struct fc_node_info), GFP_ATOMIC);
3694 if (q == NULL) {
3695 T_MSG("kmalloc failed in look_up_cache()");
3696 return NULL;
3698 q->d_id = BROADCAST;
3699 return q;
3701 LEAVE("look_up_cache");
3702 return NULL;
3705 static int display_cache(struct fc_info *fi)
3707 struct fc_node_info *q = fi->node_info_list;
3708 #if DEBUG_5526_2
3709 struct ox_id_els_map *temp_ox_id_list = fi->ox_id_list;
3710 #endif
3711 int count = 0, j;
3712 printk("\nFibre Channel Node Information for %s\n", fi->name);
3713 printk("My FC_ID = %x, My WWN = %x %x, ", fi->g.my_id, fi->g.my_node_name_high, fi->g.my_node_name_low);
3714 if (fi->g.ptp_up == TRUE)
3715 printk("Port_Type = N_Port\n");
3716 if (fi->g.loop_up == TRUE)
3717 printk("Port_Type = L_Port\n");
3718 while(q != NULL) {
3719 printk("WWN = ");
3720 for (j = 0; j < PORT_NAME_LEN; j++)
3721 printk("%x ", q->hw_addr[j]);
3722 printk("FC_ID = %x, ", q->d_id);
3723 printk("Login = ");
3724 if (q->login == LOGIN_COMPLETED)
3725 printk("ON ");
3726 else
3727 printk("OFF ");
3728 if (q->scsi == TARGET)
3729 printk("Target_ID = %d ", q->target_id);
3730 printk("\n");
3731 q = q->next;
3732 count++;
3735 #if DEBUG_5526_2
3736 printk("OX_ID -> ELS Map\n");
3737 while(temp_ox_id_list) {
3738 printk("ox_id = %x, ELS = %x\n", temp_ox_id_list->ox_id, temp_ox_id_list->els);
3739 temp_ox_id_list = temp_ox_id_list->next;
3741 #endif
3743 return 0;
3746 static struct net_device_stats * iph5526_get_stats(struct net_device *dev)
3748 struct fc_info *fi = (struct fc_info*)dev->priv;
3749 return (struct net_device_stats *) &fi->fc_stats;
3753 /* SCSI stuff starts here */
3755 static struct proc_dir_entry proc_scsi_iph5526 = {
3756 PROC_SCSI_IPH5526_FC, 7, "iph5526", S_IFDIR, S_IRUGO | S_IXUGO, 2
3760 int iph5526_detect(Scsi_Host_Template *tmpt)
3762 struct Scsi_Host *host = NULL;
3763 struct iph5526_hostdata *hostdata;
3764 struct fc_info *fi = NULL;
3765 int no_of_hosts = 0, timeout, i, j, count = 0;
3766 u_int pci_maddr = 0;
3767 struct pci_dev *pdev = NULL;
3769 tmpt->proc_dir = &proc_scsi_iph5526;
3770 if (pci_present() == 0) {
3771 printk("iph5526: PCI not present\n");
3772 return 0;
3775 for (i = 0; i <= MAX_FC_CARDS; i++)
3776 fc[i] = NULL;
3778 for (i = 0; i < clone_list[i].vendor_id != 0; i++)
3779 while ((pdev = pci_find_device(clone_list[i].vendor_id, clone_list[i].device_id, pdev))) {
3780 unsigned short pci_command;
3781 if (count < MAX_FC_CARDS) {
3782 fc[count] = kmalloc(sizeof(struct fc_info), GFP_ATOMIC);
3783 if (fc[count] == NULL) {
3784 printk("iph5526.c: Unable to register card # %d\n", count + 1);
3785 return no_of_hosts;
3787 memset(fc[count], 0, sizeof(struct fc_info));
3789 else {
3790 printk("iph5526.c: Maximum Number of cards reached.\n");
3791 return no_of_hosts;
3794 fi = fc[count];
3795 sprintf(fi->name, "fc%d", count);
3797 host = scsi_register(tmpt, sizeof(struct iph5526_hostdata));
3798 hostdata = (struct iph5526_hostdata *)host->hostdata;
3799 memset(hostdata, 0 , sizeof(struct iph5526_hostdata));
3800 for (j = 0; j < MAX_SCSI_TARGETS; j++)
3801 hostdata->tag_ages[j] = jiffies;
3802 hostdata->fi = fi;
3803 fi->host = host;
3804 //host->max_id = MAX_SCSI_TARGETS;
3805 host->max_id = 5;
3806 host->hostt->use_new_eh_code = 1;
3807 host->this_id = tmpt->this_id;
3809 pci_maddr = pdev->resource[0].start;
3810 if ( (pdev->resource[0].flags & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
3811 printk("iph5526.c : Cannot find proper PCI device base address.\n");
3812 scsi_unregister(host);
3813 kfree(fc[count]);
3814 fc[count] = NULL;
3815 continue;
3818 DPRINTK("pci_maddr = %x", pci_maddr);
3819 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3821 pci_irq_line = pdev->irq;
3822 printk("iph5526.c: PCI BIOS reports %s at i/o %#x, irq %d.\n", clone_list[i].name, pci_maddr, pci_irq_line);
3823 fi->g.mem_base = ioremap(pci_maddr & PAGE_MASK, 1024);
3825 /* We use Memory Mapped IO. The initial space contains the
3826 * PCI Configuration registers followed by the (i) chip
3827 * registers followed by the Tachyon registers.
3829 /* Thatz where (i)chip maps Tachyon Address Space.
3831 fi->g.tachyon_base = (u_long)fi->g.mem_base + TACHYON_OFFSET + ( pci_maddr & ~PAGE_MASK );
3832 DPRINTK("fi->g.tachyon_base = %x", (u_int)fi->g.tachyon_base);
3833 if (fi->g.mem_base == NULL) {
3834 printk("iph5526.c : ioremap failed!!!\n");
3835 scsi_unregister(host);
3836 kfree(fc[count]);
3837 fc[count] = NULL;
3838 continue;
3840 DPRINTK("IRQ1 = %d\n", pci_irq_line);
3841 printk(version);
3842 fi->base_addr = (long) pdev;
3844 if (pci_irq_line) {
3845 int irqval = 0;
3846 /* Found it, get IRQ.
3848 irqval = request_irq(pci_irq_line, &tachyon_interrupt, pci_irq_line ? SA_SHIRQ : 0, fi->name, host);
3849 if (irqval) {
3850 printk("iph5526.c : Unable to get IRQ %d (irqval = %d).\n", pci_irq_line, irqval);
3851 scsi_unregister(host);
3852 kfree(fc[count]);
3853 fc[count] = NULL;
3854 continue;
3856 host->irq = fi->irq = pci_irq_line;
3857 pci_irq_line = 0;
3858 fi->clone_id = clone_list[i].vendor_id;
3861 if (!initialize_register_pointers(fi) || !tachyon_init(fi)) {
3862 printk("iph5526.c: TACHYON initialization failed for card # %d!!!\n", count + 1);
3863 free_irq(host->irq, host);
3864 scsi_unregister(host);
3865 if (fi)
3866 clean_up_memory(fi);
3867 kfree(fc[count]);
3868 fc[count] = NULL;
3869 break;
3871 DPRINTK1("Fibre Channel card initialized");
3872 /* Wait for the Link to come up and the login process
3873 * to complete.
3875 for(timeout = jiffies + 10*HZ; (timeout > jiffies) && ((fi->g.link_up == FALSE) || (fi->g.port_discovery == TRUE) || (fi->g.explore_fabric == TRUE) || (fi->g.perform_adisc == TRUE));)
3876 barrier();
3878 count++;
3879 no_of_hosts++;
3881 DPRINTK1("no_of_hosts = %d",no_of_hosts);
3883 /* This is to make sure that the ACC to the PRLI comes in
3884 * for the last ALPA.
3886 udelay(1000000); /* Ugly! Let the Gods forgive me */
3888 DPRINTK1("leaving iph5526_detect\n");
3889 return no_of_hosts;
3893 int iph5526_biosparam(Disk * disk, kdev_t n, int ip[])
3895 int size = disk->capacity;
3896 ip[0] = 64;
3897 ip[1] = 32;
3898 ip[2] = size >> 11;
3899 if (ip[2] > 1024) {
3900 ip[0] = 255;
3901 ip[1] = 63;
3902 ip[2] = size / (ip[0] * ip[1]);
3904 return 0;
3907 int iph5526_queuecommand(Scsi_Cmnd *Cmnd, void (*done) (Scsi_Cmnd *))
3909 int int_required = 0;
3910 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_COMMAND;
3911 u_int type = TYPE_FCP | SEQUENCE_INITIATIVE;
3912 u_int frame_class = Cmnd->target;
3913 u_short ox_id = OX_ID_FIRST_SEQUENCE;
3914 struct Scsi_Host *host = Cmnd->host;
3915 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata*)host->hostdata;
3916 struct fc_info *fi = hostdata->fi;
3917 struct fc_node_info *q;
3918 u_long flags;
3919 ENTER("iph5526_queuecommand");
3921 spin_lock_irqsave(&fi->fc_lock, flags);
3922 Cmnd->scsi_done = done;
3924 if (Cmnd->device->tagged_supported) {
3925 switch(Cmnd->tag) {
3926 case SIMPLE_QUEUE_TAG:
3927 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_SIMPLE;
3928 break;
3929 case HEAD_OF_QUEUE_TAG:
3930 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_HEAD_OF_Q;
3931 break;
3932 case ORDERED_QUEUE_TAG:
3933 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_ORDERED;
3934 break;
3935 default:
3936 if ((jiffies - hostdata->tag_ages[Cmnd->target]) > (5 * HZ)) {
3937 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_ORDERED;
3938 hostdata->tag_ages[Cmnd->target] = jiffies;
3940 else
3941 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_SIMPLE;
3942 break;
3945 /*else
3946 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_UNTAGGED;
3949 hostdata->cmnd.fcp_addr[3] = 0;
3950 hostdata->cmnd.fcp_addr[2] = 0;
3951 hostdata->cmnd.fcp_addr[1] = 0;
3952 hostdata->cmnd.fcp_addr[0] = htons(Cmnd->lun);
3954 memcpy(&hostdata->cmnd.fcp_cdb, Cmnd->cmnd, Cmnd->cmd_len);
3955 hostdata->cmnd.fcp_data_len = htonl(Cmnd->request_bufflen);
3957 /* Get an used OX_ID. We could have pending commands.
3959 if (get_scsi_oxid(fi))
3960 return 1;
3961 fi->q.free_scsi_oxid[fi->g.scsi_oxid] = OXID_INUSE;
3963 /* Maintain a handler so that we can associate the done() function
3964 * on completion of the SCSI command.
3966 hostdata->cmnd_handler[fi->g.scsi_oxid] = Cmnd;
3968 switch(Cmnd->cmnd[0]) {
3969 case WRITE_6:
3970 case WRITE_10:
3971 case WRITE_12:
3972 fi->g.type_of_frame = FC_SCSI_WRITE;
3973 hostdata->cmnd.fcp_cntl = htonl(FCP_CNTL_WRITE | hostdata->cmnd.fcp_cntl);
3974 break;
3975 default:
3976 fi->g.type_of_frame = FC_SCSI_READ;
3977 hostdata->cmnd.fcp_cntl = htonl(FCP_CNTL_READ | hostdata->cmnd.fcp_cntl);
3980 memcpy(fi->q.ptr_fcp_cmnd[fi->q.fcp_cmnd_indx], &(hostdata->cmnd), sizeof(fcp_cmd));
3982 q = resolve_target(fi, Cmnd->target);
3984 if (q == NULL) {
3985 u_int bad_id = fi->g.my_ddaa | 0xFE;
3986 /* We transmit to an non-existant AL_PA so that the "done"
3987 * function can be called while receiving the interrupt
3988 * due to a Timeout for a bad AL_PA. In a PTP configuration,
3989 * the int_required field is set, since there is no notion
3990 * of AL_PAs. This approach sucks, but works alright!
3992 if (fi->g.ptp_up == TRUE)
3993 int_required = 1;
3994 tx_exchange(fi, (char *)(&(hostdata->cmnd)), sizeof(fcp_cmd), r_ctl, type, bad_id, fi->g.my_mtu, int_required, ox_id, FC_SCSI_BAD_TARGET);
3995 spin_unlock_irqrestore(&fi->fc_lock, flags);
3996 DPRINTK1("Target ID %x not present", Cmnd->target);
3997 return 0;
3999 if (q->login == LOGIN_COMPLETED) {
4000 if (add_to_sest(fi, Cmnd, q)) {
4001 DPRINTK1("add_to_sest() failed.");
4002 spin_unlock_irqrestore(&fi->fc_lock, flags);
4003 return 0;
4005 tx_exchange(fi, (char *)(fi->q.ptr_fcp_cmnd[fi->q.fcp_cmnd_indx]), sizeof(fcp_cmd), r_ctl, type, q->d_id, q->mtu, int_required, ox_id, frame_class << 16);
4006 update_FCP_CMND_indx(fi);
4008 spin_unlock_irqrestore(&fi->fc_lock, flags);
4009 /* If q != NULL, then we have a SCSI Target.
4010 * If q->login != LOGIN_COMPLETED, then that device could be
4011 * offline temporarily. So we let the command to time-out.
4013 LEAVE("iph5526_queuecommand");
4014 return 0;
4017 int iph5526_abort(Scsi_Cmnd *Cmnd)
4019 struct Scsi_Host *host = Cmnd->host;
4020 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
4021 struct fc_info *fi = hostdata->fi;
4022 struct fc_node_info *q;
4023 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_COMMAND;
4024 u_int type = TYPE_FCP | SEQUENCE_INITIATIVE;
4025 u_short ox_id = OX_ID_FIRST_SEQUENCE;
4026 int int_required = 1, i, abort_status = FALSE;
4027 u_long flags;
4029 ENTER("iph5526_abort");
4031 spin_lock_irqsave(&fi->fc_lock, flags);
4033 q = resolve_target(fi, Cmnd->target);
4034 if (q == NULL) {
4035 u_int bad_id = fi->g.my_ddaa | 0xFE;
4036 /* This should not happen as we should always be able to
4037 * resolve a target id. But, jus in case...
4038 * We transmit to an non-existant AL_PA so that the done
4039 * function can be called while receiving the interrupt
4040 * for a bad AL_PA.
4042 DPRINTK1("Unresolved Target ID!");
4043 tx_exchange(fi, (char *)(&(hostdata->cmnd)), sizeof(fcp_cmd), r_ctl, type, bad_id, fi->g.my_mtu, int_required, ox_id, FC_SCSI_BAD_TARGET);
4044 DPRINTK1("Target ID %x not present", Cmnd->target);
4045 spin_unlock_irqrestore(&fi->fc_lock, flags);
4046 return FAILED;
4049 /* If q != NULL, then we have a SCSI Target. If
4050 * q->login != LOGIN_COMPLETED, then that device could
4051 * be offline temporarily. So we let the command to time-out.
4054 /* Get the OX_ID for the Command to be aborted.
4056 for (i = 0; i <= MAX_SCSI_XID; i++) {
4057 if (hostdata->cmnd_handler[i] == Cmnd) {
4058 hostdata->cmnd_handler[i] = NULL;
4059 ox_id = i;
4060 break;
4063 if (i > MAX_SCSI_XID) {
4064 T_MSG("Command could not be resolved to OX_ID");
4065 spin_unlock_irqrestore(&fi->fc_lock, flags);
4066 return FAILED;
4069 switch(Cmnd->cmnd[0]) {
4070 case WRITE_6:
4071 case WRITE_10:
4072 case WRITE_12:
4073 break;
4074 default:
4075 ox_id |= SCSI_READ_BIT;
4077 abort_status = abort_exchange(fi, ox_id);
4079 if ((q->login == LOGIN_COMPLETED) && (abort_status == TRUE)) {
4080 /* Then, transmit an ABTS to the target. The rest
4081 * is done when the BA_ACC is received for the ABTS.
4083 tx_abts(fi, q->d_id, ox_id);
4085 else {
4086 u_int STE_bit;
4087 u_short x_id;
4088 /* Invalidate resources for that Exchange.
4090 x_id = ox_id & MAX_SCSI_XID;
4091 STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
4092 if (STE_bit & SEST_V) {
4093 *(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
4094 invalidate_SEST_entry(fi, ox_id);
4098 LEAVE("iph5526_abort");
4099 spin_unlock_irqrestore(&fi->fc_lock, flags);
4100 return SUCCESS;
4103 static int abort_exchange(struct fc_info *fi, u_short ox_id)
4105 u_short x_id;
4106 volatile u_int flush_SEST, STE_bit;
4107 x_id = ox_id & MAX_SCSI_XID;
4108 DPRINTK1("Aborting Exchange %x", ox_id);
4110 STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
4111 /* Is the Exchange still active?.
4113 if (STE_bit & SEST_V) {
4114 if (ox_id & SCSI_READ_BIT) {
4115 /* If the Exchange to be aborted is Inbound,
4116 * Flush the SEST Entry from Tachyon's Cache.
4118 *(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
4119 flush_tachyon_cache(fi, ox_id);
4120 flush_SEST = readl(fi->t_r.ptr_tach_flush_oxid_reg);
4121 while ((flush_SEST & 0x80000000) != 0)
4122 flush_SEST = readl(fi->t_r.ptr_tach_flush_oxid_reg);
4123 STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
4124 while ((STE_bit & 0x80000000) != 0)
4125 STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
4126 flush_SEST = readl(fi->t_r.ptr_tach_flush_oxid_reg);
4127 invalidate_SEST_entry(fi, ox_id);
4129 else {
4130 int i;
4131 u_int *ptr_edb;
4132 /* For In-Order Reassembly, the following is done:
4133 * First, write zero as the buffer length in the EDB.
4135 ptr_edb = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 7)));
4136 for (i = 0; i < EDB_LEN; i++)
4137 if (fi->q.ptr_edb[i] == ptr_edb)
4138 break;
4139 if (i < EDB_LEN)
4140 *ptr_edb = *ptr_edb & 0x0000FFFF;
4141 else
4142 T_MSG("EDB not found while clearing in abort_exchange()");
4144 DPRINTK1("Exchange %x invalidated", ox_id);
4145 return TRUE;
4147 else {
4148 DPRINTK1("SEST Entry for exchange %x not valid", ox_id);
4149 return FALSE;
4153 static void flush_tachyon_cache(struct fc_info *fi, u_short ox_id)
4155 volatile u_int tachyon_status;
4156 if (fi->g.loop_up == TRUE) {
4157 writel(HOST_CONTROL, fi->t_r.ptr_fm_control_reg);
4158 /* Make sure that the Inbound FIFO is empty.
4160 do {
4161 tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
4162 udelay(200);
4163 }while ((tachyon_status & RECEIVE_FIFO_EMPTY) == 0);
4164 /* Ok. Go ahead and flushhhhhhhhh!
4166 writel(0x80000000 | ox_id, fi->t_r.ptr_tach_flush_oxid_reg);
4167 writel(EXIT_HOST_CONTROL, fi->t_r.ptr_fm_control_reg);
4168 return;
4170 if (fi->g.ptp_up == TRUE) {
4171 take_tachyon_offline(fi);
4172 /* Make sure that the Inbound FIFO is empty.
4174 do {
4175 tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
4176 udelay(200);
4177 }while ((tachyon_status & RECEIVE_FIFO_EMPTY) == 0);
4178 writel(0x80000000 | ox_id, fi->t_r.ptr_tach_flush_oxid_reg);
4179 /* Write the Initialize command to the FM Control reg.
4181 fi->g.n_port_try = TRUE;
4182 DPRINTK1("In abort_exchange, TACHYON initializing as N_Port...\n");
4183 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
4187 static struct fc_node_info *resolve_target(struct fc_info *fi, u_char target)
4189 struct fc_node_info *temp = fi->node_info_list;
4190 while(temp != NULL)
4191 if (temp->target_id == target) {
4192 if ((temp->scsi == TARGET) && (temp->login == LOGIN_COMPLETED))
4193 return temp;
4194 else {
4195 if (temp->login != LOGIN_COMPLETED) {
4196 /* The Target is not currently logged in.
4197 * It could be a Target on the Local Loop or
4198 * on a Remote Loop connected through a switch.
4199 * In either case, we will know whenever the Target
4200 * comes On-Line again. We let the command to
4201 * time-out so that it gets retried.
4203 T_MSG("Target %d not logged in.", temp->target_id);
4204 tx_logi(fi, ELS_PLOGI, temp->d_id);
4205 return temp;
4207 else {
4208 if (temp->scsi != TARGET) {
4209 /* For some reason, we did not get a response to
4210 * PRLI. Letz try it again...
4212 DPRINTK1("Node not PRLIied. Txing PRLI...");
4213 tx_prli(fi, ELS_PRLI, temp->d_id, OX_ID_FIRST_SEQUENCE);
4216 return temp;
4219 else
4220 temp = temp->next;
4221 return NULL;
4224 static int add_to_sest(struct fc_info *fi, Scsi_Cmnd *Cmnd, struct fc_node_info *ni)
4226 /* we have at least 1 buffer, the terminator */
4227 int no_of_sdb_buffers = 1, i;
4228 int no_of_edb_buffers = 0;
4229 u_int *req_buffer = (u_int *)Cmnd->request_buffer;
4230 u_int *ptr_sdb = NULL;
4231 struct scatterlist *sl1, *sl2 = NULL;
4232 int no_of_sg = 0;
4234 switch(fi->g.type_of_frame) {
4235 case FC_SCSI_READ:
4236 fi->g.inb_sest_entry.flags_and_byte_offset = htonl(INB_SEST_VED);
4237 fi->g.inb_sest_entry.byte_count = 0;
4238 fi->g.inb_sest_entry.no_of_recvd_frames = 0;
4239 fi->g.inb_sest_entry.no_of_expected_frames = 0;
4240 fi->g.inb_sest_entry.last_fctl = 0;
4242 if (Cmnd->use_sg) {
4243 no_of_sg = Cmnd->use_sg;
4244 sl1 = sl2 = (struct scatterlist *)Cmnd->request_buffer;
4245 for (i = 0; i < no_of_sg; i++) {
4246 no_of_sdb_buffers += sl1->length / SEST_BUFFER_SIZE;
4247 if (sl1->length % SEST_BUFFER_SIZE)
4248 no_of_sdb_buffers++;
4249 sl1++;
4252 else {
4253 no_of_sdb_buffers += Cmnd->request_bufflen / SEST_BUFFER_SIZE;
4254 if (Cmnd->request_bufflen % SEST_BUFFER_SIZE)
4255 no_of_sdb_buffers++;
4256 } /* if !use_sg */
4258 /* We are working with the premise that at the max we would
4259 * get a scatter-gather buffer containing 63 buffers
4260 * of size 1024 bytes each. Is it a _bad_ assumption?
4262 if (no_of_sdb_buffers > 512) {
4263 T_MSG("Number of SDB buffers needed = %d", no_of_sdb_buffers);
4264 T_MSG("Disable Scatter-Gather!!!");
4265 return 1;
4269 /* Store it in the sdb_table so that we can retrieve that
4270 * free up the memory when the Read Command completes.
4272 if (get_free_SDB(fi))
4273 return 1;
4274 ptr_sdb = fi->q.ptr_sdb_slot[fi->q.sdb_indx];
4275 fi->q.sdb_slot_status[fi->q.sdb_indx] = SDB_BUSY;
4276 fi->g.inb_sest_entry.sdb_address = htonl(virt_to_bus(ptr_sdb));
4278 if (Cmnd->use_sg) {
4279 int count = 0, j;
4280 for(i = 0; i < no_of_sg; i++) {
4281 char *addr_ptr = sl2->address;
4282 count = sl2->length / SEST_BUFFER_SIZE;
4283 if (sl2->length % SEST_BUFFER_SIZE)
4284 count++;
4285 for (j = 0; j < count; j++) {
4286 *(ptr_sdb) = htonl(virt_to_bus(addr_ptr));
4287 addr_ptr += SEST_BUFFER_SIZE;
4288 ptr_sdb++;
4290 count = 0;
4291 sl2++;
4294 else {
4295 for (i = 0; i < no_of_sdb_buffers - 1; i++) {
4296 *(ptr_sdb) = htonl(virt_to_bus(req_buffer));
4297 req_buffer += SEST_BUFFER_SIZE/4;
4298 ptr_sdb++;
4301 *(ptr_sdb) = htonl(0x1); /* Terminator */
4303 /* The scratch pad is used to hold the index into the SDB.
4305 fi->g.inb_sest_entry.scratch_pad = fi->q.sdb_indx;
4306 fi->g.inb_sest_entry.expected_ro = 0;
4307 fi->g.inb_sest_entry.buffer_index = 0;
4308 fi->g.inb_sest_entry.buffer_offset = 0;
4309 memcpy(fi->q.ptr_sest[fi->g.scsi_oxid], &fi->g.inb_sest_entry, sizeof(INB_SEST_ENTRY));
4310 break;
4311 case FC_SCSI_WRITE:
4312 fi->g.outb_sest_entry.flags_and_did = htonl(OUTB_SEST_VED | ni->d_id);
4313 fi->g.outb_sest_entry.max_frame_len = htons(ni->mtu << 4);
4314 fi->g.outb_sest_entry.cntl = htons(ODB_CLASS_3 | ODB_EE_CREDIT | ODB_NO_INT | ODB_NO_COMP);
4315 fi->g.outb_sest_entry.total_seq_length = INV_SEQ_LEN;
4316 fi->g.outb_sest_entry.link = htons(OUTB_SEST_LINK);
4317 fi->g.outb_sest_entry.transaction_id = htonl(fi->g.scsi_oxid);
4318 fi->g.outb_sest_entry.seq_id = fi->g.seq_id;
4319 fi->g.outb_sest_entry.reserved = 0x0;
4320 fi->g.outb_sest_entry.header_length = htons(TACHYON_HEADER_LEN);
4323 u_char df_ctl = 0;
4324 u_short rx_id = RX_ID_FIRST_SEQUENCE;
4325 u_int r_ctl = FC4_DEVICE_DATA | SOLICITED_DATA;
4326 u_int type = TYPE_FCP | SEQUENCE_INITIATIVE;
4327 /* Multi Frame Sequence ? If yes, set RO bit.
4329 if (Cmnd->request_bufflen > ni->mtu)
4330 type |= RELATIVE_OFF_PRESENT;
4331 build_tachyon_header(fi, fi->g.my_id, r_ctl, ni->d_id, type, fi->g.seq_id, df_ctl, fi->g.scsi_oxid, rx_id, NULL);
4332 if (get_free_header(fi) || get_free_EDB(fi))
4333 return 1;
4334 memcpy(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx], &(fi->g.tach_header), TACHYON_HEADER_LEN);
4335 fi->g.outb_sest_entry.header_address = htonl(virt_to_bus(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx]));
4336 update_tachyon_header_indx(fi);
4339 if (Cmnd->use_sg) {
4340 no_of_sg = Cmnd->use_sg;
4341 sl1 = sl2 = (struct scatterlist *)Cmnd->request_buffer;
4342 for (i = 0; i < no_of_sg; i++) {
4343 no_of_edb_buffers += sl1->length / SEST_BUFFER_SIZE;
4344 if (sl1->length % SEST_BUFFER_SIZE)
4345 no_of_edb_buffers++;
4346 sl1++;
4349 else {
4350 no_of_edb_buffers += Cmnd->request_bufflen / SEST_BUFFER_SIZE;
4351 if (Cmnd->request_bufflen % SEST_BUFFER_SIZE)
4352 no_of_edb_buffers++;
4353 } /* if !use_sg */
4356 /* We need "no_of_edb_buffers" _contiguous_ EDBs
4357 * that are FREE. Check for that first.
4359 for (i = 0; i < no_of_edb_buffers; i++) {
4360 int j;
4361 if ((fi->q.edb_buffer_indx + no_of_edb_buffers) >= EDB_LEN)
4362 fi->q.edb_buffer_indx = 0;
4363 if (fi->q.free_edb_list[fi->q.edb_buffer_indx + i] != EDB_FREE) {
4364 for (j = 0; j < i; j++)
4365 update_EDB_indx(fi);
4366 if (get_free_EDB(fi))
4367 return 1;
4368 i = 0;
4372 /* We got enuff FREE EDBs.
4374 if (Cmnd->use_sg) {
4375 fi->g.outb_sest_entry.edb_address = htonl(virt_to_bus(fi->q.ptr_edb[fi->q.edb_buffer_indx]));
4376 sl1 = (struct scatterlist *)Cmnd->request_buffer;
4377 for(i = 0; i < no_of_sg; i++) {
4378 int count = 0, j;
4379 count = sl1->length / SEST_BUFFER_SIZE;
4380 for (j = 0; j < count; j++) {
4381 build_EDB(fi, (char *)sl1->address, 0, SEST_BUFFER_SIZE);
4382 memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
4383 /* Mark this EDB as being in use */
4384 fi->q.free_edb_list[fi->q.edb_buffer_indx] = EDB_BUSY;
4385 /* We have already made sure that we have enuff
4386 * free EDBs that are contiguous. So this is
4387 * safe.
4389 update_EDB_indx(fi);
4390 sl1->address += SEST_BUFFER_SIZE;
4392 /* Just in case itz not a multiple of
4393 * SEST_BUFFER_SIZE bytes.
4395 if (sl1->length % SEST_BUFFER_SIZE) {
4396 build_EDB(fi, (char *)sl1->address, 0, sl1->length % SEST_BUFFER_SIZE);
4397 memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
4398 fi->q.free_edb_list[fi->q.edb_buffer_indx] = EDB_BUSY;
4399 update_EDB_indx(fi);
4401 sl1++;
4403 /* The last EDB is special. It needs the "end bit" to
4404 * be set.
4406 *(fi->q.ptr_edb[fi->q.edb_buffer_indx - 1] + 1) = *(fi->q.ptr_edb[fi->q.edb_buffer_indx - 1] + 1) | ntohs(EDB_END);
4408 else {
4409 int count = 0, j;
4410 fi->g.outb_sest_entry.edb_address = htonl(virt_to_bus(fi->q.ptr_edb[fi->q.edb_buffer_indx]));
4411 count = Cmnd->request_bufflen / SEST_BUFFER_SIZE;
4412 for (j = 0; j < count; j++) {
4413 build_EDB(fi, (char *)req_buffer, 0, SEST_BUFFER_SIZE);
4414 memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
4415 /* Mark this EDB as being in use */
4416 fi->q.free_edb_list[fi->q.edb_buffer_indx] = EDB_BUSY;
4417 /* We have already made sure that we have enuff
4418 * free EDBs that are contiguous. So this is
4419 * safe.
4421 update_EDB_indx(fi);
4422 req_buffer += SEST_BUFFER_SIZE;
4424 /* Just in case itz not a multiple of
4425 * SEST_BUFFER_SIZE bytes.
4427 if (Cmnd->request_bufflen % SEST_BUFFER_SIZE) {
4428 build_EDB(fi, (char *)req_buffer, EDB_END, Cmnd->request_bufflen % SEST_BUFFER_SIZE);
4429 memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
4430 fi->q.free_edb_list[fi->q.edb_buffer_indx] = EDB_BUSY;
4431 update_EDB_indx(fi);
4433 else {
4434 /* Mark the last EDB as the "end edb".
4436 *(fi->q.ptr_edb[fi->q.edb_buffer_indx - 1] + 1) = *(fi->q.ptr_edb[fi->q.edb_buffer_indx - 1] + 1) | htons(EDB_END);
4440 /* Finally we have something to send!.
4442 memcpy(fi->q.ptr_sest[fi->g.scsi_oxid], &fi->g.outb_sest_entry, sizeof(OUTB_SEST_ENTRY));
4443 break;
4445 return 0;
4448 static void update_FCP_CMND_indx(struct fc_info *fi)
4450 fi->q.fcp_cmnd_indx++;
4451 if (fi->q.fcp_cmnd_indx == NO_OF_FCP_CMNDS)
4452 fi->q.fcp_cmnd_indx = 0;
4455 static int get_scsi_oxid(struct fc_info *fi)
4457 u_short initial_oxid = fi->g.scsi_oxid;
4458 /* Check if the OX_ID is in use.
4459 * We could have an outstanding SCSI command.
4461 while (fi->q.free_scsi_oxid[fi->g.scsi_oxid] != OXID_AVAILABLE) {
4462 update_scsi_oxid(fi);
4463 if (fi->g.scsi_oxid == initial_oxid) {
4464 T_MSG("No free OX_IDs avaliable")
4465 reset_tachyon(fi, SOFTWARE_RESET);
4466 return 1;
4469 return 0;
4472 static void update_scsi_oxid(struct fc_info *fi)
4474 fi->g.scsi_oxid++;
4475 if (fi->g.scsi_oxid == (MAX_SCSI_XID + 1))
4476 fi->g.scsi_oxid = 0;
4479 static int get_free_SDB(struct fc_info *fi)
4481 unsigned int initial_indx = fi->q.sdb_indx;
4482 /* Check if the SDB is in use.
4483 * We could have an outstanding SCSI Read command.
4484 * We should find a free slot as we can queue a
4485 * maximum of 32 SCSI commands only.
4487 while (fi->q.sdb_slot_status[fi->q.sdb_indx] != SDB_FREE) {
4488 update_SDB_indx(fi);
4489 if (fi->q.sdb_indx == initial_indx) {
4490 T_MSG("No free SDB buffers avaliable")
4491 reset_tachyon(fi, SOFTWARE_RESET);
4492 return 1;
4495 return 0;
4498 static void update_SDB_indx(struct fc_info *fi)
4500 fi->q.sdb_indx++;
4501 if (fi->q.sdb_indx == NO_OF_SDB_ENTRIES)
4502 fi->q.sdb_indx = 0;
4505 int iph5526_release(struct Scsi_Host *host)
4507 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata*)host->hostdata;
4508 struct fc_info *fi = hostdata->fi;
4509 free_irq(host->irq, host);
4510 iounmap(fi->g.mem_base);
4511 return 0;
4514 const char *iph5526_info(struct Scsi_Host *host)
4516 static char buf[80];
4517 sprintf(buf, "Interphase 5526 Fibre Channel PCI SCSI Adapter using IRQ %d\n", host->irq);
4518 return buf;
4521 #ifdef MODULE
4523 #define NAMELEN 8 /* # of chars for storing dev->name */
4525 static struct net_device *dev_fc[MAX_FC_CARDS];
4527 static int io = 0;
4528 static int irq = 0;
4529 static int bad = 0; /* 0xbad = bad sig or no reset ack */
4530 static int scsi_registered;
4533 int init_module(void)
4535 int i = 0;
4537 driver_template.module = &__this_module;
4538 scsi_register_module(MODULE_SCSI_HA, &driver_template);
4539 if (driver_template.present)
4540 scsi_registered = TRUE;
4541 else {
4542 printk("iph5526: SCSI registeration failed!!!\n");
4543 scsi_registered = FALSE;
4544 scsi_unregister_module(MODULE_SCSI_HA, &driver_template);
4547 while(fc[i] != NULL) {
4548 dev_fc[i] = NULL;
4549 dev_fc[i] = init_fcdev(dev_fc[i], 0);
4550 if (dev_fc[i] == NULL) {
4551 printk("iph5526.c: init_fcdev failed for card #%d\n", i+1);
4552 break;
4554 dev_fc[i]->irq = irq;
4555 dev_fc[i]->mem_end = bad;
4556 dev_fc[i]->base_addr = io;
4557 dev_fc[i]->init = iph5526_probe;
4558 dev_fc[i]->priv = fc[i];
4559 fc[i]->dev = dev_fc[i];
4560 if (register_fcdev(dev_fc[i]) != 0) {
4561 kfree_s(dev_fc[i], sizeof(struct net_device));
4562 dev_fc[i] = NULL;
4563 if (i == 0) {
4564 printk("iph5526.c: IP registeration failed!!!\n");
4565 return -ENODEV;
4568 i++;
4570 if (i == 0)
4571 return -ENODEV;
4573 return 0;
4576 void cleanup_module(void)
4578 int i = 0;
4579 while(fc[i] != NULL) {
4580 struct net_device *dev = fc[i]->dev;
4581 void *priv = dev->priv;
4582 fc[i]->g.dont_init = TRUE;
4583 take_tachyon_offline(fc[i]);
4584 unregister_fcdev(dev);
4585 clean_up_memory(fc[i]);
4586 if (dev->priv)
4587 kfree(priv);
4588 kfree(dev);
4589 dev = NULL;
4590 i++;
4592 if (scsi_registered == TRUE)
4593 scsi_unregister_module(MODULE_SCSI_HA, &driver_template);
4595 #endif /* MODULE */
4597 void clean_up_memory(struct fc_info *fi)
4599 int i,j;
4600 ENTER("clean_up_memory");
4601 if (fi->q.ptr_mfsbq_base)
4602 free_pages((u_long)bus_to_virt(ntohl(*(fi->q.ptr_mfsbq_base))), 5);
4603 DPRINTK("after kfree2");
4604 for (i = 0; i < SFSBQ_LENGTH; i++)
4605 for (j = 0; j < NO_OF_ENTRIES; j++)
4606 if (fi->q.ptr_sfs_buffers[i*NO_OF_ENTRIES + j])
4607 kfree(fi->q.ptr_sfs_buffers[i*NO_OF_ENTRIES + j]);
4608 DPRINTK("after kfree1");
4609 if (fi->q.ptr_ocq_base)
4610 free_page((u_long)fi->q.ptr_ocq_base);
4611 if (fi->q.ptr_imq_base)
4612 free_page((u_long)fi->q.ptr_imq_base);
4613 if (fi->q.ptr_mfsbq_base)
4614 free_page((u_long)fi->q.ptr_mfsbq_base);
4615 if (fi->q.ptr_sfsbq_base)
4616 free_page((u_long)fi->q.ptr_sfsbq_base);
4617 if (fi->q.ptr_edb_base)
4618 free_pages((u_long)fi->q.ptr_edb_base, 5);
4619 if (fi->q.ptr_sest_base)
4620 free_pages((u_long)fi->q.ptr_sest_base, 5);
4621 if (fi->q.ptr_tachyon_header_base)
4622 free_page((u_long)fi->q.ptr_tachyon_header_base);
4623 if (fi->q.ptr_sdb_base)
4624 free_pages((u_long)fi->q.ptr_sdb_base, 5);
4625 if (fi->q.ptr_fcp_cmnd_base)
4626 free_page((u_long)fi->q.ptr_fcp_cmnd_base);
4627 DPRINTK("after free_pages");
4628 if (fi->q.ptr_host_ocq_cons_indx)
4629 kfree(fi->q.ptr_host_ocq_cons_indx);
4630 if (fi->q.ptr_host_hpcq_cons_indx)
4631 kfree(fi->q.ptr_host_hpcq_cons_indx);
4632 if (fi->q.ptr_host_imq_prod_indx)
4633 kfree(fi->q.ptr_host_imq_prod_indx);
4634 DPRINTK("after kfree3");
4635 while (fi->node_info_list) {
4636 struct fc_node_info *temp_list = fi->node_info_list;
4637 fi->node_info_list = fi->node_info_list->next;
4638 kfree(temp_list);
4640 while (fi->ox_id_list) {
4641 struct ox_id_els_map *temp = fi->ox_id_list;
4642 fi->ox_id_list = fi->ox_id_list->next;
4643 kfree(temp);
4645 LEAVE("clean_up_memory");
4648 static int initialize_register_pointers(struct fc_info *fi)
4650 ENTER("initialize_register_pointers");
4651 if(fi->g.tachyon_base == 0)
4652 return -ENOMEM;
4654 fi->i_r.ptr_ichip_hw_control_reg = ICHIP_HW_CONTROL_REG_OFF + fi->g.tachyon_base;
4655 fi->i_r.ptr_ichip_hw_status_reg = ICHIP_HW_STATUS_REG_OFF + fi->g.tachyon_base;
4656 fi->i_r.ptr_ichip_hw_addr_mask_reg = ICHIP_HW_ADDR_MASK_REG_OFF + fi->g.tachyon_base;
4657 fi->t_r.ptr_ocq_base_reg = OCQ_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4658 fi->t_r.ptr_ocq_len_reg = OCQ_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4659 fi->t_r.ptr_ocq_prod_indx_reg = OCQ_PRODUCER_REGISTER_OFFSET + fi->g.tachyon_base;
4660 fi->t_r.ptr_ocq_cons_indx_reg = OCQ_CONSUMER_REGISTER_OFFSET + fi->g.tachyon_base;
4661 fi->t_r.ptr_imq_base_reg = IMQ_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4662 fi->t_r.ptr_imq_len_reg = IMQ_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4663 fi->t_r.ptr_imq_cons_indx_reg = IMQ_CONSUMER_REGISTER_OFFSET + fi->g.tachyon_base;
4664 fi->t_r.ptr_imq_prod_indx_reg = IMQ_PRODUCER_REGISTER_OFFSET + fi->g.tachyon_base;
4665 fi->t_r.ptr_mfsbq_base_reg = MFSBQ_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4666 fi->t_r.ptr_mfsbq_len_reg = MFSBQ_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4667 fi->t_r.ptr_mfsbq_prod_reg = MFSBQ_PRODUCER_REGISTER_OFFSET + fi->g.tachyon_base;
4668 fi->t_r.ptr_mfsbq_cons_reg = MFSBQ_CONSUMER_REGISTER_OFFSET + fi->g.tachyon_base;
4669 fi->t_r.ptr_mfsbuff_len_reg = MFS_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4670 fi->t_r.ptr_sfsbq_base_reg = SFSBQ_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4671 fi->t_r.ptr_sfsbq_len_reg = SFSBQ_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4672 fi->t_r.ptr_sfsbq_prod_reg = SFSBQ_PRODUCER_REGISTER_OFFSET + fi->g.tachyon_base;
4673 fi->t_r.ptr_sfsbq_cons_reg = SFSBQ_CONSUMER_REGISTER_OFFSET + fi->g.tachyon_base;
4674 fi->t_r.ptr_sfsbuff_len_reg = SFS_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4675 fi->t_r.ptr_sest_base_reg = SEST_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4676 fi->t_r.ptr_sest_len_reg = SEST_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4677 fi->t_r.ptr_scsibuff_len_reg = SCSI_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4678 fi->t_r.ptr_tach_config_reg = TACHYON_CONFIG_REGISTER_OFFSET + fi->g.tachyon_base;
4679 fi->t_r.ptr_tach_control_reg = TACHYON_CONTROL_REGISTER_OFFSET + fi->g.tachyon_base;
4680 fi->t_r.ptr_tach_status_reg = TACHYON_STATUS_REGISTER_OFFSET + fi->g.tachyon_base;
4681 fi->t_r.ptr_tach_flush_oxid_reg = TACHYON_FLUSH_SEST_REGISTER_OFFSET + fi->g.tachyon_base;
4682 fi->t_r.ptr_fm_config_reg = FMGR_CONFIG_REGISTER_OFFSET + fi->g.tachyon_base;
4683 fi->t_r.ptr_fm_control_reg = FMGR_CONTROL_REGISTER_OFFSET + fi->g.tachyon_base;
4684 fi->t_r.ptr_fm_status_reg = FMGR_STATUS_REGISTER_OFFSET + fi->g.tachyon_base;
4685 fi->t_r.ptr_fm_tov_reg = FMGR_TIMER_REGISTER_OFFSET + fi->g.tachyon_base;
4686 fi->t_r.ptr_fm_wwn_hi_reg = FMGR_WWN_HI_REGISTER_OFFSET + fi->g.tachyon_base;
4687 fi->t_r.ptr_fm_wwn_low_reg = FMGR_WWN_LO_REGISTER_OFFSET + fi->g.tachyon_base;
4688 fi->t_r.ptr_fm_rx_al_pa_reg = FMGR_RCVD_ALPA_REGISTER_OFFSET + fi->g.tachyon_base;
4690 LEAVE("initialize_register_pointers");
4691 return 1;
4697 * Local variables:
4698 * compile-command: "gcc -DKERNEL -Wall -O6 -fomit-frame-pointer -I/usr/src/linux/net/tcp -c iph5526.c"
4699 * version-control: t
4700 * kept-new-versions: 5
4701 * End: