Merge with Linu 2.4.0-test6-pre6.
[linux-2.6/linux-mips.git] / drivers / net / fc / iph5526.c
blob8d4ebe1a377aa82a5e2a0f4158b90d942c5462c0
1 /**********************************************************************
2 * iph5526.c: IP/SCSI driver for the Interphase 5526 PCI Fibre Channel
3 * Card.
4 * Copyright (C) 1999 Vineet M Abraham <vma@iol.unh.edu>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *********************************************************************/
16 /**********************************************************************
17 Log:
18 Vineet M Abraham
19 02.12.99 Support multiple cards.
20 03.15.99 Added Fabric support.
21 04.04.99 Added N_Port support.
22 04.15.99 Added SCSI support.
23 06.18.99 Added ABTS Protocol.
24 06.24.99 Fixed data corruption when multiple XFER_RDYs are received.
25 07.07.99 Can be loaded as part of the Kernel. Changed semaphores. Added
26 more checks before invalidating SEST entries.
27 07.08.99 Added Broadcast IP stuff and fixed an unicast timeout bug.
28 ***********************************************************************/
29 /* TODO:
30 R_T_TOV set to 15msec in Loop topology. Need to be 100 msec.
31 SMP testing.
32 Fix ADISC Tx before completing FLOGI.
33 */
35 static const char *version =
36 "iph5526.c:v1.0 07.08.99 Vineet Abraham (vma@iol.unh.edu)\n";
38 #include <linux/module.h>
39 #include <linux/config.h>
40 #include <linux/kernel.h>
41 #include <linux/sched.h>
42 #include <linux/errno.h>
43 #include <linux/pci.h>
44 #include <linux/init.h>
45 #include <linux/mm.h>
46 #include <linux/delay.h>
47 #include <linux/skbuff.h>
48 #include <linux/if_arp.h>
49 #include <linux/timer.h>
50 #include <linux/spinlock.h>
51 #include <asm/system.h>
52 #include <asm/io.h>
54 #include <linux/netdevice.h>
55 #include <linux/fcdevice.h> /* had the declarations for init_fcdev among others + includes if_fcdevice.h */
57 #include <linux/blk.h>
58 #include "../../scsi/sd.h"
59 #include "../../scsi/scsi.h"
60 #include "../../scsi/hosts.h"
61 #include "../../fc4/fcp.h"
63 /* driver specific header files */
64 #include "tach.h"
65 #include "tach_structs.h"
66 #include "iph5526_ip.h"
67 #include "iph5526_scsi.h"
68 #include "iph5526_novram.c"
70 #define RUN_AT(x) (jiffies + (x))
72 #define DEBUG_5526_0 0
73 #define DEBUG_5526_1 0
74 #define DEBUG_5526_2 0
76 #if DEBUG_5526_0
77 #define DPRINTK(format, a...) {printk("%s: ", fi->name); \
78 printk(format, ##a); \
79 printk("\n");}
80 #define ENTER(x) {printk("%s: ", fi->name); \
81 printk("iph5526.c : entering %s()\n", x);}
82 #define LEAVE(x) {printk("%s: ", fi->name); \
83 printk("iph5526.c : leaving %s()\n",x);}
85 #else
86 #define DPRINTK(format, a...) {}
87 #define ENTER(x) {}
88 #define LEAVE(x) {}
89 #endif
91 #if DEBUG_5526_1
92 #define DPRINTK1(format, a...) {printk("%s: ", fi->name); \
93 printk(format, ##a); \
94 printk("\n");}
95 #else
96 #define DPRINTK1(format, a...) {}
97 #endif
99 #if DEBUG_5526_2
100 #define DPRINTK2(format, a...) {printk("%s: ", fi->name); \
101 printk(format, ##a); \
102 printk("\n");}
103 #else
104 #define DPRINTK2(format, a...) {}
105 #endif
107 #define T_MSG(format, a...) {printk("%s: ", fi->name); \
108 printk(format, ##a);\
109 printk("\n");}
111 #define ALIGNED_SFS_ADDR(addr) ((((unsigned long)(addr) + (SFS_BUFFER_SIZE - 1)) & ~(SFS_BUFFER_SIZE - 1)) - (unsigned long)(addr))
112 #define ALIGNED_ADDR(addr, len) ((((unsigned long)(addr) + (len - 1)) & ~(len - 1)) - (unsigned long)(addr))
115 #define MAX_FC_CARDS 2
116 static struct fc_info *fc[MAX_FC_CARDS+1];
117 static unsigned int pci_irq_line = 0;
118 static struct {
119 unsigned short vendor_id;
120 unsigned short device_id;
121 char *name;
123 clone_list[] __initdata = {
124 {PCI_VENDOR_ID_INTERPHASE, PCI_DEVICE_ID_INTERPHASE_5526, "Interphase Fibre Channel HBA"},
125 {PCI_VENDOR_ID_INTERPHASE, PCI_DEVICE_ID_INTERPHASE_55x6, "Interphase Fibre Channel HBA"},
126 {0,}
129 static void tachyon_interrupt(int irq, void *dev_id, struct pt_regs *regs);
130 static void tachyon_interrupt_handler(int irq, void* dev_id, struct pt_regs* regs);
132 static int initialize_register_pointers(struct fc_info *fi);
133 void clean_up_memory(struct fc_info *fi);
135 static int tachyon_init(struct fc_info *fi);
136 static int build_queues(struct fc_info *fi);
137 static void build_tachyon_header(struct fc_info *fi, u_int my_id, u_int r_ctl, u_int d_id, u_int type, u_char seq_id, u_char df_ctl, u_short ox_id, u_short rx_id, char *data);
138 static int get_free_header(struct fc_info *fi);
139 static void build_EDB(struct fc_info *fi, char *data, u_short flags, u_short len);
140 static int get_free_EDB(struct fc_info *fi);
141 static void build_ODB(struct fc_info *fi, u_char seq_id, u_int d_id, u_int len, u_int cntl, u_short mtu, u_short ox_id, u_short rx_id, int NW_header, int int_required, u_int frame_class);
142 static void write_to_tachyon_registers(struct fc_info *fi);
143 static void reset_latch(struct fc_info *fi);
144 static void reset_tachyon(struct fc_info *fi, u_int value);
145 static void take_tachyon_offline(struct fc_info *fi);
146 static void read_novram(struct fc_info *fi);
147 static void reset_ichip(struct fc_info *fi);
148 static void update_OCQ_indx(struct fc_info *fi);
149 static void update_IMQ_indx(struct fc_info *fi, int count);
150 static void update_SFSBQ_indx(struct fc_info *fi);
151 static void update_MFSBQ_indx(struct fc_info *fi, int count);
152 static void update_tachyon_header_indx(struct fc_info *fi);
153 static void update_EDB_indx(struct fc_info *fi);
154 static void handle_FM_interrupt(struct fc_info *fi);
155 static void handle_MFS_interrupt(struct fc_info *fi);
156 static void handle_OOO_interrupt(struct fc_info *fi);
157 static void handle_SFS_interrupt(struct fc_info *fi);
158 static void handle_OCI_interrupt(struct fc_info *fi);
159 static void handle_SFS_BUF_WARN_interrupt(struct fc_info *fi);
160 static void handle_MFS_BUF_WARN_interrupt(struct fc_info *fi);
161 static void handle_IMQ_BUF_WARN_interrupt(struct fc_info *fi);
162 static void handle_Unknown_Frame_interrupt(struct fc_info *fi);
163 static void handle_Busied_Frame_interrupt(struct fc_info *fi);
164 static void handle_Bad_SCSI_Frame_interrupt(struct fc_info *fi);
165 static void handle_Inbound_SCSI_Status_interrupt(struct fc_info *fi);
166 static void handle_Inbound_SCSI_Command_interrupt(struct fc_info *fi);
167 static void completion_message_handler(struct fc_info *fi, u_int imq_int_type);
168 static void fill_login_frame(struct fc_info *fi, u_int logi);
170 static int tx_exchange(struct fc_info *fi, char *data, u_int len, u_int r_ctl, u_int type, u_int d_id, u_int mtu, int int_required, u_short ox_id, u_int frame_class);
171 static int tx_sequence(struct fc_info *fi, char *data, u_int len, u_int mtu, u_int d_id, u_short ox_id, u_short rx_id, u_char seq_id, int NW_flag, int int_required, u_int frame_class);
172 static int validate_login(struct fc_info *fi, u_int *base_ptr);
173 static void add_to_address_cache(struct fc_info *fi, u_int *base_ptr);
174 static void remove_from_address_cache(struct fc_info *fi, u_int *data, u_int cmnd_code);
175 static int node_logged_in_prev(struct fc_info *fi, u_int *buff_addr);
176 static int sid_logged_in(struct fc_info *fi, u_int s_id);
177 static struct fc_node_info *look_up_cache(struct fc_info *fi, char *data);
178 static int display_cache(struct fc_info *fi);
180 static void tx_logi(struct fc_info *fi, u_int logi, u_int d_id);
181 static void tx_logi_acc(struct fc_info *fi, u_int logi, u_int d_id, u_short received_ox_id);
182 static void tx_prli(struct fc_info *fi, u_int command_code, u_int d_id, u_short received_ox_id);
183 static void tx_logo(struct fc_info *fi, u_int d_id, u_short received_ox_id);
184 static void tx_adisc(struct fc_info *fi, u_int cmnd_code, u_int d_id, u_short received_ox_id);
185 static void tx_ls_rjt(struct fc_info *fi, u_int d_id, u_short received_ox_id, u_short reason_code, u_short expln_code);
186 static u_int plogi_ok(struct fc_info *fi, u_int *buff_addr, int size);
187 static void tx_acc(struct fc_info *fi, u_int d_id, u_short received_ox_id);
188 static void tx_name_server_req(struct fc_info *fi, u_int req);
189 static void rscn_handler(struct fc_info *fi, u_int node_id);
190 static void tx_scr(struct fc_info *fi);
191 static void scr_timer(unsigned long data);
192 static void explore_fabric(struct fc_info *fi, u_int *buff_addr);
193 static void perform_adisc(struct fc_info *fi);
194 static void local_port_discovery(struct fc_info *fi);
195 static void add_to_ox_id_list(struct fc_info *fi, u_int transaction_id, u_int cmnd_code);
196 static u_int remove_from_ox_id_list(struct fc_info *fi, u_short received_ox_id);
197 static void add_display_cache_timer(struct fc_info *fi);
199 /* Timers... */
200 static void nos_ols_timer(unsigned long data);
201 static void loop_timer(unsigned long data);
202 static void fabric_explore_timer(unsigned long data);
203 static void port_discovery_timer(unsigned long data);
204 static void display_cache_timer(unsigned long data);
206 /* SCSI Stuff */
207 static int add_to_sest(struct fc_info *fi, Scsi_Cmnd *Cmnd, struct fc_node_info *ni);
208 static struct fc_node_info *resolve_target(struct fc_info *fi, u_char target);
209 static void update_FCP_CMND_indx(struct fc_info *fi);
210 static int get_free_SDB(struct fc_info *fi);
211 static void update_SDB_indx(struct fc_info *fi);
212 static void mark_scsi_sid(struct fc_info *fi, u_int *buff_addr, u_char action);
213 static void invalidate_SEST_entry(struct fc_info *fi, u_short received_ox_id);
214 static int abort_exchange(struct fc_info *fi, u_short ox_id);
215 static void flush_tachyon_cache(struct fc_info *fi, u_short ox_id);
216 static int get_scsi_oxid(struct fc_info *fi);
217 static void update_scsi_oxid(struct fc_info *fi);
219 Scsi_Host_Template driver_template = IPH5526_SCSI_FC;
221 static void iph5526_timeout(struct net_device *dev);
223 #ifdef CONFIG_PCI
224 static int iph5526_probe_pci(struct net_device *dev);
225 #endif
228 int __init iph5526_probe(struct net_device *dev)
230 #ifdef CONFIG_PCI
231 if (pci_present() && (iph5526_probe_pci(dev) == 0))
232 return 0;
233 #endif
234 return -ENODEV;
237 #ifdef CONFIG_PCI
238 static int __init iph5526_probe_pci(struct net_device *dev)
240 #ifndef MODULE
241 struct fc_info *fi;
242 static int count = 0;
243 #endif
244 #ifdef MODULE
245 struct fc_info *fi = (struct fc_info *)dev->priv;
246 #endif
248 #ifndef MODULE
249 if(fc[count] != NULL) {
250 if (dev == NULL) {
251 dev = init_fcdev(NULL, 0);
252 if (dev == NULL)
253 return -ENOMEM;
255 fi = fc[count];
256 #endif
257 fi->dev = dev;
258 dev->base_addr = fi->base_addr;
259 dev->irq = fi->irq;
260 if (dev->priv == NULL)
261 dev->priv = fi;
262 fcdev_init(dev);
263 /* Assign ur MAC address.
265 dev->dev_addr[0] = (fi->g.my_port_name_high & 0x0000FF00) >> 8;
266 dev->dev_addr[1] = fi->g.my_port_name_high;
267 dev->dev_addr[2] = (fi->g.my_port_name_low & 0xFF000000) >> 24;
268 dev->dev_addr[3] = (fi->g.my_port_name_low & 0x00FF0000) >> 16;
269 dev->dev_addr[4] = (fi->g.my_port_name_low & 0x0000FF00) >> 8;
270 dev->dev_addr[5] = fi->g.my_port_name_low;
271 #ifndef MODULE
272 count++;
274 else
275 return -ENODEV;
276 #endif
277 display_cache(fi);
278 return 0;
280 #endif /* CONFIG_PCI */
282 static int __init fcdev_init(struct net_device *dev)
284 dev->open = iph5526_open;
285 dev->stop = iph5526_close;
286 dev->hard_start_xmit = iph5526_send_packet;
287 dev->get_stats = iph5526_get_stats;
288 dev->set_multicast_list = NULL;
289 dev->change_mtu = iph5526_change_mtu;
290 dev->tx_timeout = iph5526_timeout;
291 dev->watchdog_timeo = 5*HZ;
292 #ifndef MODULE
293 fc_setup(dev);
294 #endif
295 return 0;
298 /* initialize tachyon and take it OnLine */
299 static int tachyon_init(struct fc_info *fi)
301 ENTER("tachyon_init");
302 if (build_queues(fi) == 0) {
303 T_MSG("build_queues() failed");
304 return 0;
307 /* Retrieve your port/node name.
309 read_novram(fi);
311 reset_ichip(fi);
313 reset_tachyon(fi, SOFTWARE_RESET);
315 LEAVE("tachyon_init");
316 return 1;
319 /* Build the 4 Qs - IMQ, OCQ, MFSBQ, SFSBQ */
320 /* Lots of dma_pages needed as Tachyon DMAs almost everything into
321 * host memory.
323 static int build_queues(struct fc_info *fi)
325 int i,j;
326 u_char *addr;
327 ENTER("build_queues");
328 /* Initializing Queue Variables.
330 fi->q.ptr_host_ocq_cons_indx = NULL;
331 fi->q.ptr_host_hpcq_cons_indx = NULL;
332 fi->q.ptr_host_imq_prod_indx = NULL;
334 fi->q.ptr_ocq_base = NULL;
335 fi->q.ocq_len = 0;
336 fi->q.ocq_end = 0;
337 fi->q.ocq_prod_indx = 0;
339 fi->q.ptr_imq_base = NULL;
340 fi->q.imq_len = 0;
341 fi->q.imq_end = 0;
342 fi->q.imq_cons_indx = 0;
343 fi->q.imq_prod_indx = 0;
345 fi->q.ptr_mfsbq_base = NULL;
346 fi->q.mfsbq_len = 0;
347 fi->q.mfsbq_end = 0;
348 fi->q.mfsbq_prod_indx = 0;
349 fi->q.mfsbq_cons_indx = 0;
350 fi->q.mfsbuff_len = 0;
351 fi->q.mfsbuff_end = 0;
352 fi->g.mfs_buffer_count = 0;
354 fi->q.ptr_sfsbq_base = NULL;
355 fi->q.sfsbq_len = 0;
356 fi->q.sfsbq_end = 0;
357 fi->q.sfsbq_prod_indx = 0;
358 fi->q.sfsbq_cons_indx = 0;
359 fi->q.sfsbuff_len = 0;
360 fi->q.sfsbuff_end = 0;
362 fi->q.sdb_indx = 0;
363 fi->q.fcp_cmnd_indx = 0;
365 fi->q.ptr_edb_base = NULL;
366 fi->q.edb_buffer_indx = 0;
367 fi->q.ptr_tachyon_header_base = NULL;
368 fi->q.tachyon_header_indx = 0;
369 fi->node_info_list = NULL;
370 fi->ox_id_list = NULL;
371 fi->g.loop_up = FALSE;
372 fi->g.ptp_up = FALSE;
373 fi->g.link_up = FALSE;
374 fi->g.fabric_present = FALSE;
375 fi->g.n_port_try = FALSE;
376 fi->g.dont_init = FALSE;
377 fi->g.nport_timer_set = FALSE;
378 fi->g.lport_timer_set = FALSE;
379 fi->g.no_of_targets = 0;
380 fi->g.sem = 0;
381 fi->g.perform_adisc = FALSE;
382 fi->g.e_i = 0;
384 /* build OCQ */
385 if ( (fi->q.ptr_ocq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
386 T_MSG("failed to get OCQ page");
387 return 0;
389 /* set up the OCQ structures */
390 for (i = 0; i < OCQ_LENGTH; i++)
391 fi->q.ptr_odb[i] = fi->q.ptr_ocq_base + NO_OF_ENTRIES*i;
393 /* build IMQ */
394 if ( (fi->q.ptr_imq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
395 T_MSG("failed to get IMQ page");
396 return 0;
398 for (i = 0; i < IMQ_LENGTH; i++)
399 fi->q.ptr_imqe[i] = fi->q.ptr_imq_base + NO_OF_ENTRIES*i;
401 /* build MFSBQ */
402 if ( (fi->q.ptr_mfsbq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
403 T_MSG("failed to get MFSBQ page");
404 return 0;
406 memset((char *)fi->q.ptr_mfsbq_base, 0, MFSBQ_LENGTH * 32);
407 /* Allocate one huge chunk of memory... helps while reassembling
408 * frames.
410 if ( (addr = (u_char *)__get_free_pages(GFP_KERNEL, 5) ) == 0) {
411 T_MSG("failed to get MFSBQ page");
412 return 0;
414 /* fill in addresses of empty buffers */
415 for (i = 0; i < MFSBQ_LENGTH; i++) {
416 for (j = 0; j < NO_OF_ENTRIES; j++) {
417 *(fi->q.ptr_mfsbq_base + i*NO_OF_ENTRIES + j) = htonl(virt_to_bus(addr));
418 addr += MFS_BUFFER_SIZE;
422 /* The number of entries in each MFS buffer is 8. There are 8
423 * MFS buffers. That leaves us with 4096-256 bytes. We use them
424 * as temporary space for ELS frames. This is done to make sure that
425 * the addresses are aligned.
427 fi->g.els_buffer[0] = fi->q.ptr_mfsbq_base + MFSBQ_LENGTH*NO_OF_ENTRIES;
428 for (i = 1; i < MAX_PENDING_FRAMES; i++)
429 fi->g.els_buffer[i] = fi->g.els_buffer[i-1] + 64;
431 /* build SFSBQ */
432 if ( (fi->q.ptr_sfsbq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
433 T_MSG("failed to get SFSBQ page");
434 return 0;
436 memset((char *)fi->q.ptr_sfsbq_base, 0, SFSBQ_LENGTH * 32);
437 /* fill in addresses of empty buffers */
438 for (i = 0; i < SFSBQ_LENGTH; i++)
439 for (j = 0; j < NO_OF_ENTRIES; j++){
440 addr = kmalloc(SFS_BUFFER_SIZE*2, GFP_KERNEL);
441 if (addr == NULL){
442 T_MSG("ptr_sfs_buffer : memory not allocated");
443 return 0;
445 else {
446 int offset = ALIGNED_SFS_ADDR(addr);
447 memset((char *)addr, 0, SFS_BUFFER_SIZE);
448 fi->q.ptr_sfs_buffers[i*NO_OF_ENTRIES +j] = (u_int *)addr;
449 addr += offset;
450 *(fi->q.ptr_sfsbq_base + i*NO_OF_ENTRIES + j) = htonl(virt_to_bus(addr));
454 /* The number of entries in each SFS buffer is 8. There are 8
455 * MFS buffers. That leaves us with 4096-256 bytes. We use them
456 * as temporary space for ARP frames. This is done inorder to
457 * support HW_Types of 0x1 and 0x6.
459 fi->g.arp_buffer = (char *)fi->q.ptr_sfsbq_base + SFSBQ_LENGTH*NO_OF_ENTRIES*4;
461 /* build EDB */
462 if ((fi->q.ptr_edb_base = (u_int *)__get_free_pages(GFP_KERNEL, 5) ) == 0) {
463 T_MSG("failed to get EDB page");
464 return 0;
466 for (i = 0; i < EDB_LEN; i++)
467 fi->q.ptr_edb[i] = fi->q.ptr_edb_base + 2*i;
469 /* build SEST */
471 /* OX_IDs range from 0x0 - 0x4FFF.
473 if ((fi->q.ptr_sest_base = (u_int *)__get_free_pages(GFP_KERNEL, 5)) == 0) {
474 T_MSG("failed to get SEST page");
475 return 0;
477 for (i = 0; i < SEST_LENGTH; i++)
478 fi->q.ptr_sest[i] = fi->q.ptr_sest_base + NO_OF_ENTRIES*i;
480 if ((fi->q.ptr_sdb_base = (u_int *)__get_free_pages(GFP_KERNEL, 5)) == 0) {
481 T_MSG("failed to get SDB page");
482 return 0;
484 for (i = 0 ; i < NO_OF_SDB_ENTRIES; i++)
485 fi->q.ptr_sdb_slot[i] = fi->q.ptr_sdb_base + (SDB_SIZE/4)*i;
487 if ((fi->q.ptr_fcp_cmnd_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) {
488 T_MSG("failed to get FCP_CMND page");
489 return 0;
491 for (i = 0; i < NO_OF_FCP_CMNDS; i++)
492 fi->q.ptr_fcp_cmnd[i] = fi->q.ptr_fcp_cmnd_base + NO_OF_ENTRIES*i;
494 /* Allocate space for Tachyon Header as well...
496 if ((fi->q.ptr_tachyon_header_base = (u_int *)__get_free_pages(GFP_KERNEL, 0) ) == 0) {
497 T_MSG("failed to get tachyon_header page");
498 return 0;
500 for (i = 0; i < NO_OF_TACH_HEADERS; i++)
501 fi->q.ptr_tachyon_header[i] = fi->q.ptr_tachyon_header_base + 16*i;
503 /* Allocate memory for indices.
504 * Indices should be aligned on 32 byte boundries.
506 fi->q.host_ocq_cons_indx = kmalloc(2*32, GFP_KERNEL);
507 if (fi->q.host_ocq_cons_indx == NULL){
508 T_MSG("fi->q.host_ocq_cons_indx : memory not allocated");
509 return 0;
511 fi->q.ptr_host_ocq_cons_indx = fi->q.host_ocq_cons_indx;
512 if ((u_long)(fi->q.host_ocq_cons_indx) % 32)
513 fi->q.host_ocq_cons_indx++;
515 fi->q.host_hpcq_cons_indx = kmalloc(2*32, GFP_KERNEL);
516 if (fi->q.host_hpcq_cons_indx == NULL){
517 T_MSG("fi->q.host_hpcq_cons_indx : memory not allocated");
518 return 0;
520 fi->q.ptr_host_hpcq_cons_indx= fi->q.host_hpcq_cons_indx;
521 if ((u_long)(fi->q.host_hpcq_cons_indx) % 32)
522 fi->q.host_hpcq_cons_indx++;
524 fi->q.host_imq_prod_indx = kmalloc(2*32, GFP_KERNEL);
525 if (fi->q.host_imq_prod_indx == NULL){
526 T_MSG("fi->q.host_imq_prod_indx : memory not allocated");
527 return 0;
529 fi->q.ptr_host_imq_prod_indx = fi->q.host_imq_prod_indx;
530 if ((u_long)(fi->q.host_imq_prod_indx) % 32)
531 fi->q.host_imq_prod_indx++;
533 LEAVE("build_queues");
534 return 1;
538 static void write_to_tachyon_registers(struct fc_info *fi)
540 u_int bus_addr, bus_indx_addr, i;
542 ENTER("write_to_tachyon_registers");
544 /* Clear Queues each time Tachyon is reset */
545 memset((char *)fi->q.ptr_ocq_base, 0, OCQ_LENGTH * 32);
546 memset((char *)fi->q.ptr_imq_base, 0, IMQ_LENGTH * 32);
547 memset((char *)fi->q.ptr_edb_base, 0, EDB_LEN * 8);
548 memset((char *)fi->q.ptr_sest_base, 0, SEST_LENGTH * 32);
549 memset((char *)fi->q.ptr_sdb_base, 0, NO_OF_SDB_ENTRIES * SDB_SIZE);
550 memset((char *)fi->q.ptr_tachyon_header_base, 0xFF, NO_OF_TACH_HEADERS * TACH_HEADER_SIZE);
551 for (i = 0; i < SEST_LENGTH; i++)
552 fi->q.free_scsi_oxid[i] = OXID_AVAILABLE;
553 for (i = 0; i < NO_OF_SDB_ENTRIES; i++)
554 fi->q.sdb_slot_status[i] = SDB_FREE;
556 take_tachyon_offline(fi);
557 writel(readl(fi->t_r.ptr_tach_config_reg) | SCSI_ENABLE | WRITE_STREAM_SIZE | READ_STREAM_SIZE | PARITY_EVEN | OOO_REASSEMBLY_DISABLE, fi->t_r.ptr_tach_config_reg);
559 /* Write OCQ registers */
560 fi->q.ocq_prod_indx = 0;
561 *(fi->q.host_ocq_cons_indx) = 0;
563 /* The Tachyon needs to be passed the "real" address */
564 bus_addr = virt_to_bus(fi->q.ptr_ocq_base);
565 writel(bus_addr, fi->t_r.ptr_ocq_base_reg);
566 writel(OCQ_LENGTH - 1, fi->t_r. ptr_ocq_len_reg);
567 bus_indx_addr = virt_to_bus(fi->q.host_ocq_cons_indx);
568 writel(bus_indx_addr, fi->t_r.ptr_ocq_cons_indx_reg);
570 /* Write IMQ registers */
571 fi->q.imq_cons_indx = 0;
572 *(fi->q.host_imq_prod_indx) = 0;
573 bus_addr = virt_to_bus(fi->q.ptr_imq_base);
574 writel(bus_addr, fi->t_r.ptr_imq_base_reg);
575 writel(IMQ_LENGTH - 1, fi->t_r.ptr_imq_len_reg);
576 bus_indx_addr = virt_to_bus(fi->q.host_imq_prod_indx);
577 writel(bus_indx_addr, fi->t_r.ptr_imq_prod_indx_reg);
579 /* Write MFSBQ registers */
580 fi->q.mfsbq_prod_indx = MFSBQ_LENGTH - 1;
581 fi->q.mfsbuff_end = MFS_BUFFER_SIZE - 1;
582 fi->q.mfsbq_cons_indx = 0;
583 bus_addr = virt_to_bus(fi->q.ptr_mfsbq_base);
584 writel(bus_addr, fi->t_r.ptr_mfsbq_base_reg);
585 writel(MFSBQ_LENGTH - 1, fi->t_r.ptr_mfsbq_len_reg);
586 writel(fi->q.mfsbuff_end, fi->t_r.ptr_mfsbuff_len_reg);
587 /* Do this last as tachyon will prefetch the
588 * first entry as soon as we write to it.
590 writel(fi->q.mfsbq_prod_indx, fi->t_r.ptr_mfsbq_prod_reg);
592 /* Write SFSBQ registers */
593 fi->q.sfsbq_prod_indx = SFSBQ_LENGTH - 1;
594 fi->q.sfsbuff_end = SFS_BUFFER_SIZE - 1;
595 fi->q.sfsbq_cons_indx = 0;
596 bus_addr = virt_to_bus(fi->q.ptr_sfsbq_base);
597 writel(bus_addr, fi->t_r.ptr_sfsbq_base_reg);
598 writel(SFSBQ_LENGTH - 1, fi->t_r.ptr_sfsbq_len_reg);
599 writel(fi->q.sfsbuff_end, fi->t_r.ptr_sfsbuff_len_reg);
600 /* Do this last as tachyon will prefetch the first
601 * entry as soon as we write to it.
603 writel(fi->q.sfsbq_prod_indx, fi->t_r.ptr_sfsbq_prod_reg);
605 /* Write SEST registers */
606 bus_addr = virt_to_bus(fi->q.ptr_sest_base);
607 writel(bus_addr, fi->t_r.ptr_sest_base_reg);
608 writel(SEST_LENGTH - 1, fi->t_r.ptr_sest_len_reg);
609 /* the last 2 bits _should_ be 1 */
610 writel(SEST_BUFFER_SIZE - 1, fi->t_r.ptr_scsibuff_len_reg);
612 /* write AL_TIME & E_D_TOV into the registers */
613 writel(TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
614 /* Tell Tachyon to pick a Soft Assigned AL_PA */
615 writel(LOOP_INIT_SOFT_ADDRESS, fi->t_r.ptr_fm_config_reg);
617 /* Read the WWN from EEPROM . But, for now we assign it here. */
618 writel(WORLD_WIDE_NAME_LOW, fi->t_r.ptr_fm_wwn_low_reg);
619 writel(WORLD_WIDE_NAME_HIGH, fi->t_r.ptr_fm_wwn_hi_reg);
621 DPRINTK1("TACHYON initializing as L_Port...\n");
622 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
624 LEAVE("write_to_tachyon_registers");
628 static void tachyon_interrupt(int irq, void* dev_id, struct pt_regs* regs)
630 struct Scsi_Host *host = dev_id;
631 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
632 struct fc_info *fi = hostdata->fi;
633 u_long flags;
634 spin_lock_irqsave(&fi->fc_lock, flags);
635 tachyon_interrupt_handler(irq, dev_id, regs);
636 spin_unlock_irqrestore(&fi->fc_lock, flags);
639 static void tachyon_interrupt_handler(int irq, void* dev_id, struct pt_regs* regs)
641 struct Scsi_Host *host = dev_id;
642 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
643 struct fc_info *fi = hostdata->fi;
644 u_int *ptr_imq_entry;
645 u_int imq_int_type, current_IMQ_index = 0, prev_IMQ_index;
646 int index, no_of_entries = 0;
648 DPRINTK("\n");
649 ENTER("tachyon_interrupt");
650 if (fi->q.host_imq_prod_indx != NULL) {
651 current_IMQ_index = ntohl(*(fi->q.host_imq_prod_indx));
653 else {
654 /* _Should not_ happen */
655 T_MSG("IMQ_indx NULL. DISABLING INTERRUPTS!!!\n");
656 writel(0x0, fi->i_r.ptr_ichip_hw_control_reg);
659 if (current_IMQ_index > fi->q.imq_cons_indx)
660 no_of_entries = current_IMQ_index - fi->q.imq_cons_indx;
661 else
662 if (current_IMQ_index < fi->q.imq_cons_indx)
663 no_of_entries = IMQ_LENGTH - (fi->q.imq_cons_indx - current_IMQ_index);
665 if (no_of_entries == 0) {
666 u_int ichip_status;
667 ichip_status = readl(fi->i_r.ptr_ichip_hw_status_reg);
668 if (ichip_status & 0x20) {
669 /* Should _never_ happen. Might require a hard reset */
670 T_MSG("Too bad... PCI Bus Error. Resetting (i)chip");
671 reset_ichip(fi);
672 T_MSG("DISABLING INTERRUPTS!!!\n");
673 writel(0x0, fi->i_r.ptr_ichip_hw_control_reg);
677 prev_IMQ_index = current_IMQ_index;
678 for (index = 0; index < no_of_entries; index++) {
679 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
680 imq_int_type = ntohl(*ptr_imq_entry);
682 completion_message_handler(fi, imq_int_type);
683 if ((fi->g.link_up == FALSE) && ((imq_int_type == MFS_BUF_WARN) || (imq_int_type == SFS_BUF_WARN) || (imq_int_type == IMQ_BUF_WARN)))
684 break;
685 update_IMQ_indx(fi, 1);
687 /* Check for more entries */
688 current_IMQ_index = ntohl(*(fi->q.host_imq_prod_indx));
689 if (current_IMQ_index != prev_IMQ_index) {
690 no_of_entries++;
691 prev_IMQ_index = current_IMQ_index;
693 } /*end of for loop*/
694 return;
695 LEAVE("tachyon_interrupt");
699 static void handle_SFS_BUF_WARN_interrupt(struct fc_info *fi)
701 int i;
702 ENTER("handle_SFS_BUF_WARN_interrupt");
703 if (fi->g.link_up == FALSE) {
704 reset_tachyon(fi, SOFTWARE_RESET);
705 return;
707 /* Free up all but one entry in the Q.
709 for (i = 0; i < ((SFSBQ_LENGTH - 1) * NO_OF_ENTRIES); i++) {
710 handle_SFS_interrupt(fi);
711 update_IMQ_indx(fi, 1);
713 LEAVE("handle_SFS_BUF_WARN_interrupt");
716 /* Untested_Code_Begin */
717 static void handle_MFS_BUF_WARN_interrupt(struct fc_info *fi)
719 int i;
720 ENTER("handle_MFS_BUF_WARN_interrupt");
721 if (fi->g.link_up == FALSE) {
722 reset_tachyon(fi, SOFTWARE_RESET);
723 return;
725 /* FIXME: freeing up 8 entries.
727 for (i = 0; i < NO_OF_ENTRIES; i++) {
728 handle_MFS_interrupt(fi);
729 update_IMQ_indx(fi, 1);
731 LEAVE("handle_MFS_BUF_WARN_interrupt");
733 /*Untested_Code_End */
735 static void handle_IMQ_BUF_WARN_interrupt(struct fc_info *fi)
737 u_int *ptr_imq_entry;
738 u_int imq_int_type, current_IMQ_index = 0, temp_imq_cons_indx;
739 int index, no_of_entries = 0;
741 ENTER("handle_IMQ_BUF_WARN_interrupt");
742 if (fi->g.link_up == FALSE) {
743 reset_tachyon(fi, SOFTWARE_RESET);
744 return;
746 current_IMQ_index = ntohl(*(fi->q.host_imq_prod_indx));
748 if (current_IMQ_index > fi->q.imq_cons_indx)
749 no_of_entries = current_IMQ_index - fi->q.imq_cons_indx;
750 else
751 if (current_IMQ_index < fi->q.imq_cons_indx)
752 no_of_entries = IMQ_LENGTH - (fi->q.imq_cons_indx - current_IMQ_index);
753 /* We dont want to look at the same IMQ entry again.
755 temp_imq_cons_indx = fi->q.imq_cons_indx + 1;
756 if (no_of_entries != 0)
757 no_of_entries -= 1;
758 for (index = 0; index < no_of_entries; index++) {
759 ptr_imq_entry = fi->q.ptr_imqe[temp_imq_cons_indx];
760 imq_int_type = ntohl(*ptr_imq_entry);
761 if (imq_int_type != IMQ_BUF_WARN)
762 completion_message_handler(fi, imq_int_type);
763 temp_imq_cons_indx++;
764 if (temp_imq_cons_indx == IMQ_LENGTH)
765 temp_imq_cons_indx = 0;
766 } /*end of for loop*/
767 if (no_of_entries != 0)
768 update_IMQ_indx(fi, no_of_entries);
769 LEAVE("handle_IMQ_BUF_WARN_interrupt");
772 static void completion_message_handler(struct fc_info *fi, u_int imq_int_type)
774 switch(imq_int_type) {
775 case OUTBOUND_COMPLETION:
776 DPRINTK("OUTBOUND_COMPLETION message received");
777 break;
778 case OUTBOUND_COMPLETION_I:
779 DPRINTK("OUTBOUND_COMPLETION_I message received");
780 handle_OCI_interrupt(fi);
781 break;
782 case OUT_HI_PRI_COMPLETION:
783 DPRINTK("OUT_HI_PRI_COMPLETION message received");
784 break;
785 case OUT_HI_PRI_COMPLETION_I:
786 DPRINTK("OUT_HI_PRI_COMPLETION_I message received");
787 break;
788 case INBOUND_MFS_COMPLETION:
789 DPRINTK("INBOUND_MFS_COMPLETION message received");
790 handle_MFS_interrupt(fi);
791 break;
792 case INBOUND_OOO_COMPLETION:
793 DPRINTK("INBOUND_OOO_COMPLETION message received");
794 handle_OOO_interrupt(fi);
795 break;
796 case INBOUND_SFS_COMPLETION:
797 DPRINTK("INBOUND_SFS_COMPLETION message received");
798 handle_SFS_interrupt(fi);
799 break;
800 case INBOUND_UNKNOWN_FRAME_I:
801 DPRINTK("INBOUND_UNKNOWN_FRAME message received");
802 handle_Unknown_Frame_interrupt(fi);
803 break;
804 case INBOUND_BUSIED_FRAME:
805 DPRINTK("INBOUND_BUSIED_FRAME message received");
806 handle_Busied_Frame_interrupt(fi);
807 break;
808 case FRAME_MGR_INTERRUPT:
809 DPRINTK("FRAME_MGR_INTERRUPT message received");
810 handle_FM_interrupt(fi);
811 break;
812 case READ_STATUS:
813 DPRINTK("READ_STATUS message received");
814 break;
815 case SFS_BUF_WARN:
816 DPRINTK("SFS_BUF_WARN message received");
817 handle_SFS_BUF_WARN_interrupt(fi);
818 break;
819 case MFS_BUF_WARN:
820 DPRINTK("MFS_BUF_WARN message received");
821 handle_MFS_BUF_WARN_interrupt(fi);
822 break;
823 case IMQ_BUF_WARN:
824 DPRINTK("IMQ_BUF_WARN message received");
825 handle_IMQ_BUF_WARN_interrupt(fi);
826 break;
827 case INBOUND_C1_TIMEOUT:
828 DPRINTK("INBOUND_C1_TIMEOUT message received");
829 break;
830 case BAD_SCSI_FRAME:
831 DPRINTK("BAD_SCSI_FRAME message received");
832 handle_Bad_SCSI_Frame_interrupt(fi);
833 break;
834 case INB_SCSI_STATUS_COMPLETION:
835 DPRINTK("INB_SCSI_STATUS_COMPL message received");
836 handle_Inbound_SCSI_Status_interrupt(fi);
837 break;
838 case INBOUND_SCSI_COMMAND:
839 DPRINTK("INBOUND_SCSI_COMMAND message received");
840 handle_Inbound_SCSI_Command_interrupt(fi);
841 break;
842 case INBOUND_SCSI_DATA_COMPLETION:
843 DPRINTK("INBOUND_SCSI_DATA message received");
844 /* Only for targets */
845 break;
846 default:
847 T_MSG("DEFAULT message received, type = %x", imq_int_type);
848 return;
850 reset_latch(fi);
853 static void handle_OCI_interrupt(struct fc_info *fi)
855 u_int *ptr_imq_entry;
856 u_long transaction_id = 0;
857 unsigned short status, seq_count, transmitted_ox_id;
858 struct Scsi_Host *host = fi->host;
859 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
860 Scsi_Cmnd *Cmnd;
861 u_int tag;
863 ENTER("handle_OCI_interrupt");
864 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
865 transaction_id = ntohl(*(ptr_imq_entry + 1));
866 status = ntohl(*(ptr_imq_entry + 2)) >> 16;
867 seq_count = ntohl(*(ptr_imq_entry + 3));
868 DPRINTK("transaction_id= %x", (u_int)transaction_id);
869 tag = transaction_id & 0xFFFF0000;
870 transmitted_ox_id = transaction_id;
872 /* The INT could be either due to TIME_OUT | BAD_ALPA.
873 * But we check only for TimeOuts. Bad AL_PA will
874 * caught by FM_interrupt handler.
877 if ((status == OCM_TIMEOUT_OR_BAD_ALPA) && (!fi->g.port_discovery) && (!fi->g.perform_adisc)){
878 DPRINTK("Frame TimeOut on OX_ID = %x", (u_int)transaction_id);
880 /* Is it a SCSI frame that is timing out ? Not a very good check...
882 if ((transmitted_ox_id <= MAX_SCSI_OXID) && ((tag == FC_SCSI_BAD_TARGET) || (tag < 0x00FF0000))) {
883 /* If it is a Bad AL_PA, we report it as BAD_TARGET.
884 * Else, we allow the command to time-out. A Link
885 * re-initialization could be taking place.
887 if (tag == FC_SCSI_BAD_TARGET) {
888 Cmnd = hostdata->cmnd_handler[transmitted_ox_id & MAX_SCSI_XID];
889 hostdata->cmnd_handler[transmitted_ox_id & MAX_SCSI_XID] = NULL;
890 if (Cmnd != NULL) {
891 Cmnd->result = DID_BAD_TARGET << 16;
892 (*Cmnd->scsi_done) (Cmnd);
894 else
895 T_MSG("NULL Command out of handler!");
896 } /* if Bad Target */
897 else {
898 u_char missing_target = tag >> 16;
899 struct fc_node_info *q = fi->node_info_list;
900 /* A Node that we thought was logged in has gone
901 * away. We are the optimistic kind and we keep
902 * hoping that our dear little Target will come back
903 * to us. For now we log him out.
905 DPRINTK2("Missing Target = %d", missing_target);
906 while (q != NULL) {
907 if (q->target_id == missing_target) {
908 T_MSG("Target %d Logged out", q->target_id);
909 q->login = LOGIN_ATTEMPTED;
910 if (fi->num_nodes > 0)
911 fi->num_nodes--;
912 tx_logi(fi, ELS_PLOGI, q->d_id);
913 break;
915 else
916 q = q->next;
919 } /* End of SCSI frame timing out. */
920 else {
921 if (seq_count > 1) {
922 /* An IP frame was transmitted to a Bad AL_PA. Free up
923 * the skb used.
925 dev_kfree_skb_irq((struct sk_buff *)(bus_to_virt(transaction_id)));
926 netif_wake_queue(fi->dev);
928 } /* End of IP frame timing out. */
929 } /* End of frame timing out. */
930 else {
931 /* Frame was transmitted successfully. Check if it was an ELS
932 * frame or an IP frame or a Bad_Target_Notification frame (in
933 * case of a ptp_link). Ugly!
935 if ((status == 0) && (seq_count == 0)) {
936 u_int tag = transaction_id & 0xFFFF0000;
937 /* Continue with port discovery after an ELS is successfully
938 * transmitted. (status == 0).
940 DPRINTK("tag = %x", tag);
941 switch(tag) {
942 case ELS_FLOGI:
943 /* Letz use the Name Server instead */
944 fi->g.explore_fabric = TRUE;
945 fi->g.port_discovery = FALSE;
946 fi->g.alpa_list_index = MAX_NODES;
947 add_to_ox_id_list(fi, transaction_id, tag);
948 break;
949 case ELS_PLOGI:
950 if (fi->g.fabric_present && (fi->g.name_server == FALSE))
951 add_to_ox_id_list(fi,transaction_id,ELS_NS_PLOGI);
952 else
953 add_to_ox_id_list(fi, transaction_id, tag);
954 break;
955 case FC_SCSI_BAD_TARGET:
956 Cmnd = hostdata->cmnd_handler[transmitted_ox_id & MAX_SCSI_XID];
957 hostdata->cmnd_handler[transmitted_ox_id & MAX_SCSI_XID] = NULL;
958 if (Cmnd != NULL) {
959 Cmnd->result = DID_BAD_TARGET << 16;
960 (*Cmnd->scsi_done) (Cmnd);
962 else
963 T_MSG("NULL Command out of handler!");
964 break;
965 default:
966 add_to_ox_id_list(fi, transaction_id, tag);
969 if (fi->g.alpa_list_index >= MAX_NODES) {
970 if (fi->g.port_discovery == TRUE) {
971 fi->g.port_discovery = FALSE;
972 add_display_cache_timer(fi);
974 fi->g.alpa_list_index = MAX_NODES;
976 if (fi->g.port_discovery == TRUE)
977 local_port_discovery(fi);
979 else {
980 /* An IP frame has been successfully transmitted.
981 * Free the skb that was used for this IP frame.
983 if ((status == 0) && (seq_count > 1)) {
984 dev_kfree_skb_irq((struct sk_buff *)(bus_to_virt(transaction_id)));
985 netif_wake_queue(fi->dev);
989 LEAVE("handle_OCI_interrupt");
992 /* Right now we discard OOO frames */
993 static void handle_OOO_interrupt(struct fc_info *fi)
995 u_int *ptr_imq_entry;
996 int queue_indx, offset, payload_size;
997 int no_of_buffers = 1; /* header is in a separate buffer */
998 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
999 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1000 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1001 queue_indx = queue_indx >> 16;
1002 payload_size = ntohl(*(ptr_imq_entry + 2)) - TACHYON_HEADER_LEN;
1003 /* Calculate total number of buffers */
1004 no_of_buffers += payload_size / MFS_BUFFER_SIZE;
1005 if (payload_size % MFS_BUFFER_SIZE)
1006 no_of_buffers++;
1008 /* provide Tachyon will another set of buffers */
1009 fi->g.mfs_buffer_count += no_of_buffers;
1010 if (fi->g.mfs_buffer_count >= NO_OF_ENTRIES) {
1011 int count = fi->g.mfs_buffer_count / NO_OF_ENTRIES;
1012 fi->g.mfs_buffer_count -= NO_OF_ENTRIES * count;
1013 update_MFSBQ_indx(fi, count);
1017 static void handle_MFS_interrupt(struct fc_info *fi)
1019 u_int *ptr_imq_entry, *buff_addr;
1020 u_int type_of_frame, s_id;
1021 int queue_indx, offset, payload_size, starting_indx, starting_offset;
1022 u_short received_ox_id;
1023 int no_of_buffers = 1; /* header is in a separate buffer */
1024 struct sk_buff *skb;
1025 int wrap_around = FALSE, no_of_wrap_buffs = NO_OF_ENTRIES - 1;
1026 ENTER("handle_MFS_interrupt");
1027 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1028 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1029 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1030 queue_indx = queue_indx >> 16;
1031 DPRINTK("queue_indx = %d, offset = %d\n", queue_indx, offset);
1032 payload_size = ntohl(*(ptr_imq_entry + 2)) - TACHYON_HEADER_LEN;
1033 DPRINTK("payload_size = %d", payload_size);
1034 /* Calculate total number of buffers */
1035 no_of_buffers += payload_size / MFS_BUFFER_SIZE;
1036 if (payload_size % MFS_BUFFER_SIZE)
1037 no_of_buffers++;
1038 DPRINTK("no_of_buffers = %d", no_of_buffers);
1040 if ((no_of_buffers - 1) <= offset) {
1041 starting_offset = offset - (no_of_buffers - 1);
1042 starting_indx = queue_indx;
1044 else {
1045 int temp = no_of_buffers - (offset + 1);
1046 int no_of_queues = temp / NO_OF_ENTRIES;
1047 starting_offset = temp % NO_OF_ENTRIES;
1048 if (starting_offset != 0) {
1049 no_of_wrap_buffs = starting_offset - 1; //exclude header
1050 starting_offset = NO_OF_ENTRIES - starting_offset;
1051 no_of_queues++;
1053 starting_indx = queue_indx - no_of_queues;
1054 if (starting_indx < 0) {
1055 no_of_wrap_buffs -= (starting_indx + 1) * NO_OF_ENTRIES;
1056 starting_indx = MFSBQ_LENGTH + starting_indx;
1057 wrap_around = TRUE;
1061 DPRINTK("starting_indx = %d, starting offset = %d no_of_wrap_buffs = %d\n", starting_indx, starting_offset, no_of_wrap_buffs);
1062 /* Get Tachyon Header from first buffer */
1063 buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_mfsbq_base + starting_indx*NO_OF_ENTRIES + starting_offset)));
1066 /* extract Type of Frame */
1067 type_of_frame = (u_int)ntohl(*(buff_addr + 4)) & 0xFF000000;
1068 s_id = (u_int)ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
1069 received_ox_id = ntohl(*(buff_addr + 6)) >> 16;
1070 buff_addr += MFS_BUFFER_SIZE/4;
1071 DPRINTK("type_of_frame = %x, s_id = %x, ox_id = %x", type_of_frame, s_id, received_ox_id);
1073 switch(type_of_frame) {
1074 case TYPE_LLC_SNAP:
1075 skb = dev_alloc_skb(payload_size);
1076 if (skb == NULL) {
1077 printk(KERN_NOTICE "%s: In handle_MFS_interrupt() Memory squeeze, dropping packet.\n", fi->name);
1078 fi->fc_stats.rx_dropped++;
1079 fi->g.mfs_buffer_count += no_of_buffers;
1080 if (fi->g.mfs_buffer_count >= NO_OF_ENTRIES) {
1081 int count = fi->g.mfs_buffer_count / NO_OF_ENTRIES;
1082 fi->g.mfs_buffer_count -= NO_OF_ENTRIES * count;
1083 update_MFSBQ_indx(fi, count);
1084 return;
1087 if (wrap_around) {
1088 int wrap_size = no_of_wrap_buffs * MFS_BUFFER_SIZE;
1089 int tail_size = payload_size - wrap_size;
1090 DPRINTK("wrap_size = %d, tail_size = %d\n", wrap_size, tail_size);
1091 if (no_of_wrap_buffs)
1092 memcpy(skb_put(skb, wrap_size), buff_addr, wrap_size);
1093 buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_mfsbq_base)));
1094 memcpy(skb_put(skb, tail_size), buff_addr, tail_size);
1096 else
1097 memcpy(skb_put(skb, payload_size), buff_addr, payload_size);
1098 rx_net_mfs_packet(fi, skb);
1099 break;
1100 default:
1101 T_MSG("Unknown Frame Type received. Type = %x", type_of_frame);
1104 /* provide Tachyon will another set of buffers */
1105 fi->g.mfs_buffer_count += no_of_buffers;
1106 if (fi->g.mfs_buffer_count >= NO_OF_ENTRIES) {
1107 int count = fi->g.mfs_buffer_count / NO_OF_ENTRIES;
1108 fi->g.mfs_buffer_count -= NO_OF_ENTRIES * count;
1109 update_MFSBQ_indx(fi, count);
1111 LEAVE("handle_MFS_interrupt");
1114 static void handle_Unknown_Frame_interrupt(struct fc_info *fi)
1116 u_int *ptr_imq_entry;
1117 int queue_indx, offset;
1118 ENTER("handle_Unknown_Frame_interrupt");
1119 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1120 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1121 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1122 queue_indx = queue_indx >> 16;
1123 /* We discard the "unknown" frame */
1124 /* provide Tachyon will another set of buffers */
1125 if (offset == (NO_OF_ENTRIES - 1))
1126 update_SFSBQ_indx(fi);
1127 LEAVE("handle_Unknown_Frame_interrupt");
1130 static void handle_Busied_Frame_interrupt(struct fc_info *fi)
1132 u_int *ptr_imq_entry;
1133 int queue_indx, offset;
1134 ENTER("handle_Busied_Frame_interrupt");
1135 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1136 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1137 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1138 queue_indx = queue_indx >> 16;
1139 /* We discard the "busied" frame */
1140 /* provide Tachyon will another set of buffers */
1141 if (offset == (NO_OF_ENTRIES - 1))
1142 update_SFSBQ_indx(fi);
1143 LEAVE("handle_Busied_Frame_interrupt");
1146 static void handle_Bad_SCSI_Frame_interrupt(struct fc_info *fi)
1148 u_int *ptr_imq_entry, *buff_addr, *tach_header, *ptr_edb;
1149 u_int s_id, rctl, frame_class, burst_len, transfered_len, len = 0;
1150 int queue_indx, offset, payload_size, i;
1151 u_short ox_id, rx_id, x_id, mtu = 512;
1152 u_char target_id = 0xFF;
1154 ENTER("handle_Bad_SCSI_Frame_interrupt");
1155 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1156 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1157 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1158 queue_indx = queue_indx >> 16;
1159 payload_size = ntohl(*(ptr_imq_entry + 2));
1161 buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_sfsbq_base + queue_indx*NO_OF_ENTRIES + offset)));
1163 rctl = ntohl(*(buff_addr + 2)) & 0xFF000000;
1164 s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
1165 ox_id = ntohl(*(buff_addr + 6)) >> 16;
1166 rx_id = ntohl(*(buff_addr + 6));
1167 x_id = ox_id & MAX_SCSI_XID;
1169 /* Any frame that comes in with OX_ID that matches an OX_ID
1170 * that has been allocated for SCSI, will be called a Bad
1171 * SCSI frame if the Exchange is not valid any more.
1173 * We will also get a Bad SCSI frame interrupt if we receive
1174 * a XFER_RDY with offset != 0. Tachyon washes its hands off
1175 * this Exchange. We have to take care of ourselves. Grrr...
1177 if (rctl == DATA_DESCRIPTOR) {
1178 struct fc_node_info *q = fi->node_info_list;
1179 while (q != NULL) {
1180 if (q->d_id == s_id) {
1181 target_id = q->target_id;
1182 mtu = q->mtu;
1183 break;
1185 else
1186 q = q->next;
1188 frame_class = target_id;
1189 transfered_len = ntohl(*(buff_addr + 8));
1190 burst_len = ntohl(*(buff_addr + 9));
1192 build_ODB(fi, fi->g.seq_id, s_id, burst_len, 0, mtu, ox_id, rx_id, 0, 0, frame_class << 16);
1193 /* Update the SEQ_ID and Relative Offset in the
1194 * Tachyon Header Structure.
1196 tach_header = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 5)));
1197 *(tach_header + 5) = htonl(fi->g.seq_id << 24);
1198 *(tach_header + 7) = htonl(transfered_len);
1199 fi->g.odb.hdr_addr = *(fi->q.ptr_sest[x_id] + 5);
1201 /* Invalidate the EDBs used
1203 ptr_edb = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 7)));
1205 for (i = 0; i < EDB_LEN; i++)
1206 if (fi->q.ptr_edb[i] == ptr_edb)
1207 break;
1208 ptr_edb--;
1210 if (i < EDB_LEN) {
1211 int j;
1212 do {
1213 ptr_edb += 2;
1214 len += (htonl(*ptr_edb) & 0xFFFF);
1215 j = i;
1216 fi->q.free_edb_list[i++] = EDB_FREE;
1217 if (i == EDB_LEN) {
1218 i = 0;
1219 ptr_edb = fi->q.ptr_edb_base - 1;
1221 } while (len < transfered_len);
1222 if (len > transfered_len) {
1223 ptr_edb--;
1224 fi->q.free_edb_list[j] = EDB_BUSY;
1226 else
1227 ptr_edb++;
1229 else {
1230 T_MSG("EDB not found while freeing");
1231 if (offset == (NO_OF_ENTRIES - 1))
1232 update_SFSBQ_indx(fi);
1233 return;
1236 /* Update the EDB pointer in the ODB.
1238 fi->g.odb.edb_addr = htonl(virt_to_bus(ptr_edb));
1239 memcpy(fi->q.ptr_odb[fi->q.ocq_prod_indx], &(fi->g.odb), sizeof(ODB));
1240 /* Update the EDB pointer in the SEST entry. We might need
1241 * this if get another XFER_RDY for the same Exchange.
1243 *(fi->q.ptr_sest[x_id] + 7) = htonl(virt_to_bus(ptr_edb));
1245 update_OCQ_indx(fi);
1246 if (fi->g.seq_id == MAX_SEQ_ID)
1247 fi->g.seq_id = 0;
1248 else
1249 fi->g.seq_id++;
1251 else
1252 /* Could be a BA_ACC or a BA_RJT.
1254 if (rctl == RCTL_BASIC_ACC) {
1255 u_int bls_type = remove_from_ox_id_list(fi, ox_id);
1256 DPRINTK1("BA_ACC received from S_ID 0x%x with OX_ID = %x in response to %x", s_id, ox_id, bls_type);
1257 if (bls_type == RCTL_BASIC_ABTS) {
1258 u_int STE_bit;
1259 /* Invalidate resources for that Exchange.
1261 STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
1262 if (STE_bit & SEST_V) {
1263 *(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
1264 invalidate_SEST_entry(fi, ox_id);
1268 else
1269 if (rctl == RCTL_BASIC_RJT) {
1270 u_int bls_type = remove_from_ox_id_list(fi, ox_id);
1271 DPRINTK1("BA_RJT received from S_ID 0x%x with OX_ID = %x in response to %x", s_id, ox_id, bls_type);
1272 if (bls_type == RCTL_BASIC_ABTS) {
1273 u_int STE_bit;
1274 /* Invalidate resources for that Exchange.
1276 STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
1277 if (STE_bit & SEST_V) {
1278 *(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
1279 invalidate_SEST_entry(fi, ox_id);
1283 else
1284 DPRINTK1("Frame with R_CTL = %x received from S_ID 0x%x with OX_ID %x", rctl, s_id, ox_id);
1286 /* Else, discard the "Bad" SCSI frame.
1289 /* provide Tachyon will another set of buffers
1291 if (offset == (NO_OF_ENTRIES - 1))
1292 update_SFSBQ_indx(fi);
1293 LEAVE("handle_Bad_SCSI_Frame_interrupt");
1296 static void handle_Inbound_SCSI_Status_interrupt(struct fc_info *fi)
1298 struct Scsi_Host *host = fi->host;
1299 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
1300 u_int *ptr_imq_entry, *buff_addr, *ptr_rsp_info, *ptr_sense_info = NULL;
1301 int queue_indx, offset, payload_size;
1302 u_short received_ox_id, x_id;
1303 Scsi_Cmnd *Cmnd;
1304 u_int fcp_status, fcp_rsp_info_len = 0, fcp_sense_info_len = 0, s_id;
1305 ENTER("handle_SCSI_status_interrupt");
1307 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1308 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1309 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1310 queue_indx = queue_indx >> 16;
1311 buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_sfsbq_base + queue_indx*NO_OF_ENTRIES + offset)));
1312 payload_size = ntohl(*(ptr_imq_entry + 2));
1313 received_ox_id = ntohl(*(buff_addr + 6)) >> 16;
1315 buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_sfsbq_base + queue_indx*NO_OF_ENTRIES + offset)));
1317 fcp_status = ntohl(*(buff_addr + 10));
1318 ptr_rsp_info = buff_addr + 14;
1319 if (fcp_status & FCP_STATUS_RSP_LEN)
1320 fcp_rsp_info_len = ntohl(*(buff_addr + 13));
1322 if (fcp_status & FCP_STATUS_SENSE_LEN) {
1323 ptr_sense_info = ptr_rsp_info + fcp_rsp_info_len / 4;
1324 fcp_sense_info_len = ntohl(*(buff_addr + 12));
1325 DPRINTK("sense_info = %x", (u_int)ntohl(*ptr_sense_info));
1327 DPRINTK("fcp_status = %x, fcp_rsp_len = %x", fcp_status, fcp_rsp_info_len);
1328 x_id = received_ox_id & MAX_SCSI_XID;
1329 Cmnd = hostdata->cmnd_handler[x_id];
1330 hostdata->cmnd_handler[x_id] = NULL;
1331 if (Cmnd != NULL) {
1332 memset(Cmnd->sense_buffer, 0, sizeof(Cmnd->sense_buffer));
1333 /* Check if there is a Sense field */
1334 if (fcp_status & FCP_STATUS_SENSE_LEN) {
1335 int size = sizeof(Cmnd->sense_buffer);
1336 if (fcp_sense_info_len < size)
1337 size = fcp_sense_info_len;
1338 memcpy(Cmnd->sense_buffer, (char *)ptr_sense_info, size);
1340 Cmnd->result = fcp_status & FCP_STATUS_MASK;
1341 (*Cmnd->scsi_done) (Cmnd);
1343 else
1344 T_MSG("NULL Command out of handler!");
1346 invalidate_SEST_entry(fi, received_ox_id);
1347 s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
1348 fi->q.free_scsi_oxid[x_id] = OXID_AVAILABLE;
1350 /* provide Tachyon will another set of buffers */
1351 if (offset == (NO_OF_ENTRIES - 1))
1352 update_SFSBQ_indx(fi);
1353 LEAVE("handle_SCSI_status_interrupt");
1356 static void invalidate_SEST_entry(struct fc_info *fi, u_short received_ox_id)
1358 u_short x_id = received_ox_id & MAX_SCSI_XID;
1359 /* Invalidate SEST entry if it is an OutBound SEST Entry
1361 if (!(received_ox_id & SCSI_READ_BIT)) {
1362 u_int *ptr_tach_header, *ptr_edb;
1363 u_short temp_ox_id = NOT_SCSI_XID;
1364 int i;
1365 *(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
1367 /* Invalidate the Tachyon Header structure
1369 ptr_tach_header = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 5)));
1370 for (i = 0; i < NO_OF_TACH_HEADERS; i++)
1371 if(fi->q.ptr_tachyon_header[i] == ptr_tach_header)
1372 break;
1373 if (i < NO_OF_TACH_HEADERS)
1374 memset(ptr_tach_header, 0xFF, 32);
1375 else
1376 T_MSG("Tachyon Header not found while freeing in invalidate_SEST_entry()");
1378 /* Invalidate the EDB used
1380 ptr_edb = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 7)));
1381 for (i = 0; i < EDB_LEN; i++)
1382 if (fi->q.ptr_edb[i] == ptr_edb)
1383 break;
1384 ptr_edb--;
1385 if (i < EDB_LEN) {
1386 do {
1387 ptr_edb += 2;
1388 fi->q.free_edb_list[i++] = EDB_FREE;
1389 if (i == EDB_LEN) {
1390 i = 0;
1391 ptr_edb = fi->q.ptr_edb_base - 1;
1393 } while ((htonl(*ptr_edb) & 0x80000000) != 0x80000000);
1395 else
1396 T_MSG("EDB not found while freeing in invalidate_SEST_entry()");
1398 /* Search for its other header structure and destroy it!
1400 if ((ptr_tach_header + 16) < (fi->q.ptr_tachyon_header_base + (MY_PAGE_SIZE/4)))
1401 ptr_tach_header += 16;
1402 else
1403 ptr_tach_header = fi->q.ptr_tachyon_header_base;
1404 while (temp_ox_id != x_id) {
1405 temp_ox_id = ntohl(*(ptr_tach_header + 6)) >> 16;
1406 if (temp_ox_id == x_id) {
1407 /* Paranoid checking...
1409 for (i = 0; i < NO_OF_TACH_HEADERS; i++)
1410 if(fi->q.ptr_tachyon_header[i] == ptr_tach_header)
1411 break;
1412 if (i < NO_OF_TACH_HEADERS)
1413 memset(ptr_tach_header, 0xFF, 32);
1414 else
1415 T_MSG("Tachyon Header not found while freeing in invalidate_SEST_entry()");
1416 break;
1418 else {
1419 if ((ptr_tach_header + 16) < (fi->q.ptr_tachyon_header_base + (MY_PAGE_SIZE/4)))
1420 ptr_tach_header += 16;
1421 else
1422 ptr_tach_header = fi->q.ptr_tachyon_header_base;
1426 else {
1427 u_short sdb_table_indx;
1428 /* An Inbound Command has completed or needs to be Aborted.
1429 * Clear up the SDB buffers.
1431 sdb_table_indx = *(fi->q.ptr_sest[x_id] + 5);
1432 fi->q.sdb_slot_status[sdb_table_indx] = SDB_FREE;
1436 static void handle_Inbound_SCSI_Command_interrupt(struct fc_info *fi)
1438 u_int *ptr_imq_entry;
1439 int queue_indx, offset;
1440 ENTER("handle_Inbound_SCSI_Command_interrupt");
1441 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1442 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1443 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1444 queue_indx = queue_indx >> 16;
1445 /* We discard the SCSI frame as we shouldn't be receiving
1446 * a SCSI Command in the first place
1448 /* provide Tachyon will another set of buffers */
1449 if (offset == (NO_OF_ENTRIES - 1))
1450 update_SFSBQ_indx(fi);
1451 LEAVE("handle_Inbound_SCSI_Command_interrupt");
1454 static void handle_SFS_interrupt(struct fc_info *fi)
1456 u_int *ptr_imq_entry, *buff_addr;
1457 u_int class_of_frame, type_of_frame, s_id, els_type = 0, rctl;
1458 int queue_indx, offset, payload_size, login_state;
1459 u_short received_ox_id, fs_cmnd_code;
1460 ENTER("handle_SFS_interrupt");
1461 ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx];
1462 offset = ntohl(*(ptr_imq_entry + 1)) & 0x00000007;
1463 queue_indx = ntohl(*(ptr_imq_entry + 1)) & 0xFFFF0000;
1464 queue_indx = queue_indx >> 16;
1465 DPRINTK("queue_indx = %d, offset = %d\n", queue_indx, offset);
1466 payload_size = ntohl(*(ptr_imq_entry + 2));
1467 DPRINTK("payload_size = %d", payload_size);
1469 buff_addr = bus_to_virt(ntohl(*(fi->q.ptr_sfsbq_base + queue_indx*NO_OF_ENTRIES + offset)));
1471 /* extract Type of Frame */
1472 type_of_frame = ntohl(*(buff_addr + 4)) & 0xFF000000;
1473 s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
1474 received_ox_id = ntohl(*(buff_addr + 6)) >> 16;
1475 switch(type_of_frame) {
1476 case TYPE_BLS:
1477 rctl = ntohl(*(buff_addr + 2)) & 0xFF000000;
1478 switch(rctl) {
1479 case RCTL_BASIC_ABTS:
1480 /* As an Initiator, we should never be receiving
1481 * this.
1483 DPRINTK1("ABTS received from S_ID 0x%x with OX_ID = %x", s_id, received_ox_id);
1484 break;
1486 break;
1487 case TYPE_ELS:
1488 class_of_frame = ntohl(*(buff_addr + 8));
1489 login_state = sid_logged_in(fi, s_id);
1490 switch(class_of_frame & 0xFF000000) {
1491 case ELS_PLOGI:
1492 if (s_id != fi->g.my_id) {
1493 u_int ret_code;
1494 DPRINTK1("PLOGI received from D_ID 0x%x with 0X_ID = %x", s_id, received_ox_id);
1495 if ((ret_code = plogi_ok(fi, buff_addr, payload_size)) == 0){
1496 tx_logi_acc(fi, ELS_ACC, s_id, received_ox_id);
1497 add_to_address_cache(fi, buff_addr);
1499 else {
1500 u_short cmnd_code = ret_code >> 16;
1501 u_short expln_code = ret_code;
1502 tx_ls_rjt(fi, s_id, received_ox_id, cmnd_code, expln_code);
1505 break;
1506 case ELS_ACC:
1507 els_type = remove_from_ox_id_list(fi, received_ox_id);
1508 DPRINTK1("ELS_ACC received from D_ID 0x%x in response to ELS %x", s_id, els_type);
1509 switch(els_type) {
1510 case ELS_PLOGI:
1511 add_to_address_cache(fi, buff_addr);
1512 tx_prli(fi, ELS_PRLI, s_id, OX_ID_FIRST_SEQUENCE);
1513 break;
1514 case ELS_FLOGI:
1515 add_to_address_cache(fi, buff_addr);
1516 fi->g.my_id = ntohl(*(buff_addr + 2)) & 0x00FFFFFF;
1517 fi->g.fabric_present = TRUE;
1518 fi->g.my_ddaa = fi->g.my_id & 0xFFFF00;
1519 /* Login to the Name Server
1521 tx_logi(fi, ELS_PLOGI, DIRECTORY_SERVER);
1522 break;
1523 case ELS_NS_PLOGI:
1524 fi->g.name_server = TRUE;
1525 add_to_address_cache(fi, buff_addr);
1526 tx_name_server_req(fi, FCS_RFC_4);
1527 tx_scr(fi);
1528 /* Some devices have a delay before
1529 * registering with the Name Server
1531 udelay(500);
1532 tx_name_server_req(fi, FCS_GP_ID4);
1533 break;
1534 case ELS_PRLI:
1535 mark_scsi_sid(fi, buff_addr, ADD_ENTRY);
1536 break;
1537 case ELS_ADISC:
1538 if (!(validate_login(fi, buff_addr)))
1539 tx_logo(fi, s_id, OX_ID_FIRST_SEQUENCE);
1540 break;
1542 break;
1543 case ELS_PDISC:
1544 DPRINTK1("ELS_PDISC received from D_ID 0x%x", s_id);
1545 tx_logo(fi, s_id, received_ox_id);
1546 break;
1547 case ELS_ADISC:
1548 DPRINTK1("ELS_ADISC received from D_ID 0x%x", s_id);
1549 if (node_logged_in_prev(fi, buff_addr))
1550 tx_adisc(fi, ELS_ACC, s_id, received_ox_id);
1551 else
1552 tx_logo(fi, s_id, received_ox_id);
1553 break;
1554 case ELS_PRLI:
1555 DPRINTK1("ELS_PRLI received from D_ID 0x%x", s_id);
1556 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN)) {
1557 tx_prli(fi, ELS_ACC, s_id, received_ox_id);
1558 mark_scsi_sid(fi, buff_addr, ADD_ENTRY);
1560 else
1561 tx_logo(fi, s_id, received_ox_id);
1562 break;
1563 case ELS_PRLO:
1564 DPRINTK1("ELS_PRLO received from D_ID 0x%x", s_id);
1565 if ((login_state == NODE_LOGGED_OUT) || (login_state == NODE_NOT_PRESENT))
1566 tx_logo(fi, s_id, received_ox_id);
1567 else
1568 if (login_state == NODE_LOGGED_IN)
1570 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1571 else
1572 if (login_state == NODE_PROCESS_LOGGED_IN) {
1573 tx_prli(fi, ELS_ACC, s_id, received_ox_id);
1574 mark_scsi_sid(fi, buff_addr, DELETE_ENTRY);
1576 break;
1577 case ELS_LS_RJT:
1578 els_type = remove_from_ox_id_list(fi, received_ox_id);
1579 DPRINTK1("ELS_LS_RJT received from D_ID 0x%x in response to %x", s_id, els_type);
1580 /* We should be chking the reason code.
1582 switch (els_type) {
1583 case ELS_ADISC:
1584 tx_logi(fi, ELS_PLOGI, s_id);
1585 break;
1587 break;
1588 case ELS_LOGO:
1589 els_type = remove_from_ox_id_list(fi, received_ox_id);
1590 DPRINTK1("ELS_LOGO received from D_ID 0x%x in response to %x", s_id, els_type);
1591 remove_from_address_cache(fi, buff_addr, ELS_LOGO);
1592 tx_acc(fi, s_id, received_ox_id);
1593 if (els_type == ELS_ADISC)
1594 tx_logi(fi, ELS_PLOGI, s_id);
1595 break;
1596 case ELS_RSCN:
1597 DPRINTK1("ELS_RSCN received from D_ID 0x%x", s_id);
1598 tx_acc(fi, s_id, received_ox_id);
1599 remove_from_address_cache(fi, buff_addr, ELS_RSCN);
1600 break;
1601 case ELS_FARP_REQ:
1602 /* We do not support FARP.
1603 So, silently discard it */
1604 DPRINTK1("ELS_FARP_REQ received from D_ID 0x%x", s_id);
1605 break;
1606 case ELS_ABTX:
1607 DPRINTK1("ELS_ABTX received from D_ID 0x%x", s_id);
1608 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1609 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1610 else
1611 tx_logo(fi, s_id, received_ox_id);
1612 break;
1613 case ELS_FLOGI:
1614 DPRINTK1("ELS_FLOGI received from D_ID 0x%x", s_id);
1615 if (fi->g.ptp_up == TRUE) {
1616 /* The node could have come up as an N_Port
1617 * in a Loop! So,try initializing as an NL_port
1619 take_tachyon_offline(fi);
1620 /* write AL_TIME & E_D_TOV into the registers */
1621 writel(TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
1622 writel(LOOP_INIT_SOFT_ADDRESS, fi->t_r.ptr_fm_config_reg);
1623 DPRINTK1("FLOGI received, TACHYON initializing as L_Port...\n");
1624 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
1626 else {
1627 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1628 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1629 else
1630 tx_logo(fi, s_id, received_ox_id);
1632 break;
1633 case ELS_ADVC:
1634 DPRINTK1("ELS_ADVC received from D_ID 0x%x", s_id);
1635 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1636 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1637 else
1638 tx_logo(fi, s_id, received_ox_id);
1639 break;
1640 case ELS_ECHO:
1641 DPRINTK1("ELS_ECHO received from D_ID 0x%x", s_id);
1642 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1643 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1644 else
1645 tx_logo(fi, s_id, received_ox_id);
1646 break;
1647 case ELS_ESTC:
1648 DPRINTK1("ELS_ESTC received from D_ID 0x%x", s_id);
1649 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1650 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1651 else
1652 tx_logo(fi, s_id, received_ox_id);
1653 break;
1654 case ELS_ESTS:
1655 DPRINTK1("ELS_ESTS received from D_ID 0x%x", s_id);
1656 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1657 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1658 else
1659 tx_logo(fi, s_id, received_ox_id);
1660 break;
1661 case ELS_RCS:
1662 DPRINTK1("ELS_RCS received from D_ID 0x%x", s_id);
1663 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1664 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1665 else
1666 tx_logo(fi, s_id, received_ox_id);
1667 break;
1668 case ELS_RES:
1669 DPRINTK1("ELS_RES received from D_ID 0x%x", s_id);
1670 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1671 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1672 else
1673 tx_logo(fi, s_id, received_ox_id);
1674 break;
1675 case ELS_RLS:
1676 DPRINTK1("ELS_RLS received from D_ID 0x%x", s_id);
1677 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1678 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1679 else
1680 tx_logo(fi, s_id, received_ox_id);
1681 break;
1682 case ELS_RRQ:
1683 DPRINTK1("ELS_RRQ received from D_ID 0x%x", s_id);
1684 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1685 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1686 else
1687 tx_logo(fi, s_id, received_ox_id);
1688 break;
1689 case ELS_RSS:
1690 DPRINTK1("ELS_RSS received from D_ID 0x%x", s_id);
1691 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1692 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1693 else
1694 tx_logo(fi, s_id, received_ox_id);
1695 break;
1696 case ELS_RTV:
1697 DPRINTK1("ELS_RTV received from D_ID 0x%x", s_id);
1698 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1699 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1700 else
1701 tx_logo(fi, s_id, received_ox_id);
1702 break;
1703 case ELS_RSI:
1704 DPRINTK1("ELS_RSI received from D_ID 0x%x", s_id);
1705 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1706 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1707 else
1708 tx_logo(fi, s_id, received_ox_id);
1709 break;
1710 case ELS_TEST:
1711 /* No reply sequence */
1712 DPRINTK1("ELS_TEST received from D_ID 0x%x", s_id);
1713 break;
1714 case ELS_RNC:
1715 DPRINTK1("ELS_RNC received from D_ID 0x%x", s_id);
1716 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1717 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1718 else
1719 tx_logo(fi, s_id, received_ox_id);
1720 break;
1721 case ELS_RVCS:
1722 DPRINTK1("ELS_RVCS received from D_ID 0x%x", s_id);
1723 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1724 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1725 else
1726 tx_logo(fi, s_id, received_ox_id);
1727 break;
1728 case ELS_TPLS:
1729 DPRINTK1("ELS_TPLS received from D_ID 0x%x", s_id);
1730 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1731 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1732 else
1733 tx_logo(fi, s_id, received_ox_id);
1734 break;
1735 case ELS_GAID:
1736 DPRINTK1("ELS_GAID received from D_ID 0x%x", s_id);
1737 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1738 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1739 else
1740 tx_logo(fi, s_id, received_ox_id);
1741 break;
1742 case ELS_FACT:
1743 DPRINTK1("ELS_FACT received from D_ID 0x%x", s_id);
1744 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1745 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1746 else
1747 tx_logo(fi, s_id, received_ox_id);
1748 break;
1749 case ELS_FAN:
1750 /* Hmmm... You don't support FAN ??? */
1751 DPRINTK1("ELS_FAN received from D_ID 0x%x", s_id);
1752 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1753 break;
1754 case ELS_FDACT:
1755 DPRINTK1("ELS_FDACT received from D_ID 0x%x", s_id);
1756 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1757 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1758 else
1759 tx_logo(fi, s_id, received_ox_id);
1760 break;
1761 case ELS_NACT:
1762 DPRINTK1("ELS_NACT received from D_ID 0x%x", s_id);
1763 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1764 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1765 else
1766 tx_logo(fi, s_id, received_ox_id);
1767 break;
1768 case ELS_NDACT:
1769 DPRINTK1("ELS_NDACT received from D_ID 0x%x", s_id);
1770 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1771 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1772 else
1773 tx_logo(fi, s_id, received_ox_id);
1774 break;
1775 case ELS_QoSR:
1776 DPRINTK1("ELS_QoSR received from D_ID 0x%x", s_id);
1777 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1778 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1779 else
1780 tx_logo(fi, s_id, received_ox_id);
1781 break;
1782 case ELS_FDISC:
1783 DPRINTK1("ELS_FDISC received from D_ID 0x%x", s_id);
1784 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1785 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1786 else
1787 tx_logo(fi, s_id, received_ox_id);
1788 break;
1789 default:
1790 DPRINTK1("ELS Frame %x received from D_ID 0x%x", class_of_frame, s_id);
1791 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN))
1792 tx_ls_rjt(fi, s_id, received_ox_id, CMND_NOT_SUPP, NO_EXPLN);
1793 else
1794 tx_logo(fi, s_id, received_ox_id);
1795 break;
1797 break;
1798 case TYPE_FC_SERVICES:
1799 fs_cmnd_code = (ntohl(*(buff_addr + 10)) & 0xFFFF0000) >>16;
1800 switch(fs_cmnd_code) {
1801 case FCS_ACC:
1802 els_type = remove_from_ox_id_list(fi, received_ox_id);
1803 DPRINTK1("FCS_ACC received from D_ID 0x%x in response to %x", s_id, els_type);
1804 if (els_type == FCS_GP_ID4)
1805 explore_fabric(fi, buff_addr);
1806 break;
1807 case FCS_REJECT:
1808 DPRINTK1("FCS_REJECT received from D_ID 0x%x in response to %x", s_id, els_type);
1809 break;
1811 break;
1812 case TYPE_LLC_SNAP:
1813 rx_net_packet(fi, (u_char *)buff_addr, payload_size);
1814 break;
1815 default:
1816 T_MSG("Frame Type %x received from %x", type_of_frame, s_id);
1819 /* provide Tachyon will another set of buffers */
1820 if (offset == (NO_OF_ENTRIES - 1))
1821 update_SFSBQ_indx(fi);
1822 LEAVE("handle_SFS_interrupt");
1825 static void handle_FM_interrupt(struct fc_info *fi)
1827 u_int fm_status;
1828 u_int tachyon_status;
1830 ENTER("handle_FM_interrupt");
1831 fm_status = readl(fi->t_r.ptr_fm_status_reg);
1832 tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
1833 DPRINTK("FM_status = %x, Tachyon_status = %x", fm_status, tachyon_status);
1834 if (fm_status & LINK_DOWN) {
1835 T_MSG("Fibre Channel Link DOWN");
1836 fm_status = readl(fi->t_r.ptr_fm_status_reg);
1838 del_timer(&fi->explore_timer);
1839 del_timer(&fi->nport_timer);
1840 del_timer(&fi->lport_timer);
1841 del_timer(&fi->display_cache_timer);
1842 fi->g.link_up = FALSE;
1843 if (fi->g.ptp_up == TRUE)
1844 fi->g.n_port_try = FALSE;
1845 fi->g.ptp_up = FALSE;
1846 fi->g.port_discovery = FALSE;
1847 fi->g.explore_fabric = FALSE;
1848 fi->g.perform_adisc = FALSE;
1850 /* Logout will all nodes */
1851 if (fi->node_info_list) {
1852 struct fc_node_info *temp_list = fi->node_info_list;
1853 while(temp_list) {
1854 temp_list->login = LOGIN_ATTEMPTED;
1855 temp_list = temp_list->next;
1857 fi->num_nodes = 0;
1860 if ((fi->g.n_port_try == FALSE) && (fi->g.dont_init == FALSE)){
1861 take_tachyon_offline(fi);
1862 /* write AL_TIME & E_D_TOV into the registers */
1863 writel(TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
1865 if ((fi->g.fabric_present == TRUE) && (fi->g.loop_up == TRUE)) {
1866 u_int al_pa = fi->g.my_id & 0xFF;
1867 writel((al_pa << 24) | LOOP_INIT_FABRIC_ADDRESS | LOOP_INIT_PREVIOUS_ADDRESS, fi->t_r.ptr_fm_config_reg);
1869 else
1870 if (fi->g.loop_up == TRUE) {
1871 u_int al_pa = fi->g.my_id & 0xFF;
1872 writel((al_pa << 24) | LOOP_INIT_PREVIOUS_ADDRESS, fi->t_r.ptr_fm_config_reg);
1874 else
1875 writel(LOOP_INIT_SOFT_ADDRESS, fi->t_r.ptr_fm_config_reg);
1876 fi->g.loop_up = FALSE;
1877 DPRINTK1("In LDWN TACHYON initializing as L_Port...\n");
1878 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
1882 if (fm_status & NON_PARTICIPATING) {
1883 T_MSG("Did not acquire an AL_PA. I am not participating");
1885 else
1886 if ((fm_status & LINK_UP) && ((fm_status & LINK_DOWN) == 0)) {
1887 T_MSG("Fibre Channel Link UP");
1888 if ((fm_status & NON_PARTICIPATING) != TRUE) {
1889 fi->g.link_up = TRUE;
1890 if (tachyon_status & OSM_FROZEN) {
1891 reset_tachyon(fi, ERROR_RELEASE);
1892 reset_tachyon(fi, OCQ_RESET);
1894 init_timer(&fi->explore_timer);
1895 init_timer(&fi->nport_timer);
1896 init_timer(&fi->lport_timer);
1897 init_timer(&fi->display_cache_timer);
1898 if ((fm_status & OLD_PORT) == 0) {
1899 fi->g.loop_up = TRUE;
1900 fi->g.ptp_up = FALSE;
1901 fi->g.my_id = readl(fi->t_r.ptr_fm_config_reg) >> 24;
1902 DPRINTK1("My AL_PA = %x", fi->g.my_id);
1903 fi->g.port_discovery = TRUE;
1904 fi->g.explore_fabric = FALSE;
1906 else
1907 if (((fm_status & 0xF0) == OLD_PORT) && ((fm_status & 0x0F) == PORT_STATE_ACTIVE)) {
1908 fi->g.loop_up = FALSE;
1909 fi->g.my_id = 0x0;
1910 /* In a point-to-point configuration, we expect to be
1911 * connected to an F_Port. This driver does not yet support
1912 * a configuration where it is connected to another N_Port
1913 * directly.
1915 fi->g.explore_fabric = TRUE;
1916 fi->g.port_discovery = FALSE;
1917 if (fi->g.n_port_try == FALSE) {
1918 take_tachyon_offline(fi);
1919 /* write R_T_TOV & E_D_TOV into the registers */
1920 writel(PTP_TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
1921 writel(BB_CREDIT | NPORT, fi->t_r.ptr_fm_config_reg);
1922 fi->g.n_port_try = TRUE;
1923 DPRINTK1("In LUP TACHYON initializing as N_Port...\n");
1924 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
1926 else {
1927 fi->g.ptp_up = TRUE;
1928 tx_logi(fi, ELS_FLOGI, F_PORT);
1931 fi->g.my_ddaa = 0x0;
1932 fi->g.fabric_present = FALSE;
1933 /* We havn't sent out any Name Server Reqs */
1934 fi->g.name_server = FALSE;
1935 fi->g.alpa_list_index = 0;
1936 fi->g.ox_id = NOT_SCSI_XID;
1937 fi->g.my_mtu = FRAME_SIZE;
1939 /* Implicitly LOGO with all logged-in nodes.
1941 if (fi->node_info_list) {
1942 struct fc_node_info *temp_list = fi->node_info_list;
1943 while(temp_list) {
1944 temp_list->login = LOGIN_ATTEMPTED;
1945 temp_list = temp_list->next;
1947 fi->num_nodes = 0;
1948 fi->g.perform_adisc = TRUE;
1949 //fi->g.perform_adisc = FALSE;
1950 fi->g.port_discovery = FALSE;
1951 tx_logi(fi, ELS_FLOGI, F_PORT);
1953 else {
1954 /* If Link coming up for the _first_ time or no nodes
1955 * were logged in before...
1957 fi->g.scsi_oxid = 0;
1958 fi->g.seq_id = 0x00;
1959 fi->g.perform_adisc = FALSE;
1962 /* reset OX_ID table */
1963 while (fi->ox_id_list) {
1964 struct ox_id_els_map *temp = fi->ox_id_list;
1965 fi->ox_id_list = fi->ox_id_list->next;
1966 kfree(temp);
1968 fi->ox_id_list = NULL;
1969 } /* End of if partipating */
1972 if (fm_status & ELASTIC_STORE_ERROR) {
1973 /* Too much junk on the Link
1975 /* Trying to clear it up by Txing PLOGI to urself */
1976 if (fi->g.link_up == TRUE)
1977 tx_logi(fi, ELS_PLOGI, fi->g.my_id);
1980 if (fm_status & LOOP_UP) {
1981 if (tachyon_status & OSM_FROZEN) {
1982 reset_tachyon(fi, ERROR_RELEASE);
1983 reset_tachyon(fi, OCQ_RESET);
1987 if (fm_status & NOS_OLS_RECEIVED){
1988 if (fi->g.nport_timer_set == FALSE) {
1989 DPRINTK("NOS/OLS Received");
1990 DPRINTK("FM_status = %x", fm_status);
1991 fi->nport_timer.function = nos_ols_timer;
1992 fi->nport_timer.data = (unsigned long)fi;
1993 fi->nport_timer.expires = RUN_AT((3*HZ)/100); /* 30 msec */
1994 init_timer(&fi->nport_timer);
1995 add_timer(&fi->nport_timer);
1996 fi->g.nport_timer_set = TRUE;
2000 if (((fm_status & 0xF0) == OLD_PORT) && (((fm_status & 0x0F) == PORT_STATE_LF1) || ((fm_status & 0x0F) == PORT_STATE_LF2))) {
2001 DPRINTK1("Link Fail-I in OLD-PORT.");
2002 take_tachyon_offline(fi);
2003 reset_tachyon(fi, SOFTWARE_RESET);
2006 if (fm_status & LOOP_STATE_TIMEOUT){
2007 if ((fm_status & 0xF0) == ARBITRATING)
2008 DPRINTK1("ED_TOV timesout.In ARBITRATING state...");
2009 if ((fm_status & 0xF0) == ARB_WON)
2010 DPRINTK1("ED_TOV timesout.In ARBITRATION WON state...");
2011 if ((fm_status & 0xF0) == OPEN)
2012 DPRINTK1("ED_TOV timesout.In OPEN state...");
2013 if ((fm_status & 0xF0) == OPENED)
2014 DPRINTK1("ED_TOV timesout.In OPENED state...");
2015 if ((fm_status & 0xF0) == TX_CLS)
2016 DPRINTK1("ED_TOV timesout.In XMITTED CLOSE state...");
2017 if ((fm_status & 0xF0) == RX_CLS)
2018 DPRINTK1("ED_TOV timesout.In RECEIVED CLOSE state...");
2019 if ((fm_status & 0xF0) == INITIALIZING)
2020 DPRINTK1("ED_TOV timesout.In INITIALIZING state...");
2021 DPRINTK1("Initializing Loop...");
2022 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
2025 if ((fm_status & BAD_ALPA) && (fi->g.loop_up == TRUE)) {
2026 u_char bad_alpa = (readl(fi->t_r.ptr_fm_rx_al_pa_reg) & 0xFF00) >> 8;
2027 if (tachyon_status & OSM_FROZEN) {
2028 reset_tachyon(fi, ERROR_RELEASE);
2029 reset_tachyon(fi, OCQ_RESET);
2031 /* Fix for B34 */
2032 tx_logi(fi, ELS_PLOGI, fi->g.my_id);
2034 if (!fi->g.port_discovery && !fi->g.perform_adisc) {
2035 if (bad_alpa != 0xFE)
2036 DPRINTK("Bad AL_PA = %x", bad_alpa);
2038 else {
2039 if ((fi->g.perform_adisc == TRUE) && (bad_alpa == 0x00)) {
2040 DPRINTK1("Performing ADISC...");
2041 fi->g.fabric_present = FALSE;
2042 perform_adisc(fi);
2047 if (fm_status & LIPF_RECEIVED){
2048 DPRINTK("LIP(F8) Received");
2051 if (fm_status & LINK_FAILURE) {
2052 if (fm_status & LOSS_OF_SIGNAL)
2053 DPRINTK1("Detected Loss of Signal.");
2054 if (fm_status & OUT_OF_SYNC)
2055 DPRINTK1("Detected Loss of Synchronization.");
2058 if (fm_status & TRANSMIT_PARITY_ERROR) {
2059 /* Bad! Should not happen. Solution-> Hard Reset.
2061 T_MSG("Parity Error. Perform Hard Reset!");
2064 if (fi->g.alpa_list_index >= MAX_NODES){
2065 if (fi->g.port_discovery == TRUE) {
2066 fi->g.port_discovery = FALSE;
2067 add_display_cache_timer(fi);
2069 fi->g.alpa_list_index = MAX_NODES;
2072 if (fi->g.port_discovery == TRUE)
2073 local_port_discovery(fi);
2075 LEAVE("handle_FM_interrupt");
2076 return;
2079 static void local_port_discovery(struct fc_info *fi)
2081 if (fi->g.loop_up == TRUE) {
2082 /* If this is not here, some of the Bad AL_PAs are missed.
2084 udelay(20);
2085 if ((fi->g.alpa_list_index == 0) && (fi->g.fabric_present == FALSE)){
2086 tx_logi(fi, ELS_FLOGI, F_PORT);
2088 else {
2089 int login_state = sid_logged_in(fi, fi->g.my_ddaa | alpa_list[fi->g.alpa_list_index]);
2090 while ((fi->g.alpa_list_index == 0) || ((fi->g.alpa_list_index < MAX_NODES) && ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN) || (alpa_list[fi->g.alpa_list_index] == (fi->g.my_id & 0xFF)))))
2091 fi->g.alpa_list_index++;
2092 if (fi->g.alpa_list_index < MAX_NODES)
2093 tx_logi(fi, ELS_PLOGI, alpa_list[fi->g.alpa_list_index]);
2095 fi->g.alpa_list_index++;
2096 if (fi->g.alpa_list_index >= MAX_NODES){
2097 if (fi->g.port_discovery == TRUE) {
2098 fi->g.port_discovery = FALSE;
2099 add_display_cache_timer(fi);
2101 fi->g.alpa_list_index = MAX_NODES;
2106 static void nos_ols_timer(unsigned long data)
2108 struct fc_info *fi = (struct fc_info*)data;
2109 u_int fm_status;
2110 fm_status = readl(fi->t_r.ptr_fm_status_reg);
2111 DPRINTK1("FM_status in timer= %x", fm_status);
2112 fi->g.nport_timer_set = FALSE;
2113 del_timer(&fi->nport_timer);
2114 if ((fi->g.ptp_up == TRUE) || (fi->g.loop_up == TRUE))
2115 return;
2116 if (((fm_status & 0xF0) == OLD_PORT) && (((fm_status & 0x0F) == PORT_STATE_ACTIVE) || ((fm_status & 0x0F) == PORT_STATE_OFFLINE))) {
2117 DPRINTK1("In OLD-PORT after E_D_TOV.");
2118 take_tachyon_offline(fi);
2119 /* write R_T_TOV & E_D_TOV into the registers */
2120 writel(PTP_TOV_VALUES, fi->t_r.ptr_fm_tov_reg);
2121 writel(BB_CREDIT | NPORT, fi->t_r.ptr_fm_config_reg);
2122 fi->g.n_port_try = TRUE;
2123 DPRINTK1("In timer, TACHYON initializing as N_Port...\n");
2124 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
2126 else
2127 if ((fi->g.lport_timer_set == FALSE) && ((fm_status & 0xF0) == LOOP_FAIL)) {
2128 DPRINTK1("Loop Fail after E_D_TOV.");
2129 fi->lport_timer.function = loop_timer;
2130 fi->lport_timer.data = (unsigned long)fi;
2131 fi->lport_timer.expires = RUN_AT((8*HZ)/100);
2132 init_timer(&fi->lport_timer);
2133 add_timer(&fi->lport_timer);
2134 fi->g.lport_timer_set = TRUE;
2135 take_tachyon_offline(fi);
2136 reset_tachyon(fi, SOFTWARE_RESET);
2138 else
2139 if (((fm_status & 0xF0) == OLD_PORT) && (((fm_status & 0x0F) == PORT_STATE_LF1) || ((fm_status & 0x0F) == PORT_STATE_LF2))) {
2140 DPRINTK1("Link Fail-II in OLD-PORT.");
2141 take_tachyon_offline(fi);
2142 reset_tachyon(fi, SOFTWARE_RESET);
2146 static void loop_timer(unsigned long data)
2148 struct fc_info *fi = (struct fc_info*)data;
2149 fi->g.lport_timer_set = FALSE;
2150 del_timer(&fi->lport_timer);
2151 if ((fi->g.ptp_up == TRUE) || (fi->g.loop_up == TRUE))
2152 return;
2155 static void add_display_cache_timer(struct fc_info *fi)
2157 fi->display_cache_timer.function = display_cache_timer;
2158 fi->display_cache_timer.data = (unsigned long)fi;
2159 fi->display_cache_timer.expires = RUN_AT(fi->num_nodes * HZ);
2160 init_timer(&fi->display_cache_timer);
2161 add_timer(&fi->display_cache_timer);
2164 static void display_cache_timer(unsigned long data)
2166 struct fc_info *fi = (struct fc_info*)data;
2167 del_timer(&fi->display_cache_timer);
2168 display_cache(fi);
2169 return;
2172 static void reset_tachyon(struct fc_info *fi, u_int value)
2174 u_int tachyon_status, reset_done = OCQ_RESET_STATUS | SCSI_FREEZE_STATUS;
2175 int not_done = 1, i = 0;
2176 writel(value, fi->t_r.ptr_tach_control_reg);
2177 if (value == OCQ_RESET)
2178 fi->q.ocq_prod_indx = 0;
2179 tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
2181 /* Software resets are immediately done, whereas other aren't. It
2182 about 30 clocks to do the reset */
2183 if (value != SOFTWARE_RESET) {
2184 while(not_done) {
2185 if (i++ > 100000) {
2186 T_MSG("Reset was unsuccessful! Tachyon Status = %x", tachyon_status);
2187 break;
2189 tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
2190 if ((tachyon_status & reset_done) == 0)
2191 not_done = 0;
2194 else {
2195 write_to_tachyon_registers(fi);
2199 static void take_tachyon_offline(struct fc_info *fi)
2201 u_int fm_status = readl(fi->t_r.ptr_fm_status_reg);
2203 /* The first two conditions will never be true. The Manual and
2204 * the errata say this. But the current implementation is
2205 * decently stable.
2207 //if ((fm_status & 0xF0) == LOOP_FAIL) {
2208 if (fm_status == LOOP_FAIL) {
2209 // workaround as in P. 89
2210 writel(HOST_CONTROL, fi->t_r.ptr_fm_control_reg);
2211 if (fi->g.loop_up == TRUE)
2212 writel(SOFTWARE_RESET, fi->t_r.ptr_tach_control_reg);
2213 else {
2214 writel(OFFLINE, fi->t_r.ptr_fm_control_reg);
2215 writel(EXIT_HOST_CONTROL, fi->t_r.ptr_fm_control_reg);
2218 else
2219 //if ((fm_status & LOOP_UP) == LOOP_UP) {
2220 if (fm_status == LOOP_UP) {
2221 writel(SOFTWARE_RESET, fi->t_r.ptr_tach_control_reg);
2223 else
2224 writel(OFFLINE, fi->t_r.ptr_fm_control_reg);
2228 static void read_novram(struct fc_info *fi)
2230 int off = 0;
2231 fi->n_r.ptr_novram_hw_control_reg = fi->i_r.ptr_ichip_hw_control_reg;
2232 fi->n_r.ptr_novram_hw_status_reg = fi->i_r.ptr_ichip_hw_status_reg;
2233 iph5526_nr_do_init(fi);
2234 if (fi->clone_id == PCI_VENDOR_ID_INTERPHASE)
2235 off = 32;
2237 fi->g.my_node_name_high = (fi->n_r.data[off] << 16) | fi->n_r.data[off+1];
2238 fi->g.my_node_name_low = (fi->n_r.data[off+2] << 16) | fi->n_r.data[off+3];
2239 fi->g.my_port_name_high = (fi->n_r.data[off+4] << 16) | fi->n_r.data[off+5];
2240 fi->g.my_port_name_low = (fi->n_r.data[off+6] << 16) | fi->n_r.data[off+7];
2241 DPRINTK("node_name = %x %x", fi->g.my_node_name_high, fi->g.my_node_name_low);
2242 DPRINTK("port_name = %x %x", fi->g.my_port_name_high, fi->g.my_port_name_low);
2245 static void reset_ichip(struct fc_info *fi)
2247 /* (i)chip reset */
2248 writel(ICHIP_HCR_RESET, fi->i_r.ptr_ichip_hw_control_reg);
2249 /*wait for chip to get reset */
2250 udelay(10000);
2251 /*de-assert reset */
2252 writel(ICHIP_HCR_DERESET, fi->i_r.ptr_ichip_hw_control_reg);
2254 /* enable INT lines on the (i)chip */
2255 writel(ICHIP_HCR_ENABLE_INTA , fi->i_r.ptr_ichip_hw_control_reg);
2256 /* enable byte swap */
2257 writel(ICHIP_HAMR_BYTE_SWAP_ADDR_TR, fi->i_r.ptr_ichip_hw_addr_mask_reg);
2260 static void tx_logi(struct fc_info *fi, u_int logi, u_int d_id)
2262 int int_required = 1;
2263 u_short ox_id = OX_ID_FIRST_SEQUENCE;
2264 u_int r_ctl = RCTL_ELS_UCTL;
2265 u_int type = TYPE_ELS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2266 u_int my_mtu = fi->g.my_mtu;
2267 ENTER("tx_logi");
2268 /* We dont want interrupted for our own logi.
2269 * It screws up the port discovery process.
2271 if (d_id == fi->g.my_id)
2272 int_required = 0;
2273 fill_login_frame(fi, logi);
2274 fi->g.type_of_frame = FC_ELS;
2275 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.login, sizeof(LOGIN));
2276 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),sizeof(LOGIN), r_ctl, type, d_id, my_mtu, int_required, ox_id, logi);
2277 fi->g.e_i++;
2278 if (fi->g.e_i == MAX_PENDING_FRAMES)
2279 fi->g.e_i = 0;
2280 LEAVE("tx_logi");
2281 return;
2284 static void tx_logi_acc(struct fc_info *fi, u_int logi, u_int d_id, u_short received_ox_id)
2286 int int_required = 0;
2287 u_int r_ctl = RCTL_ELS_SCTL;
2288 u_int type = TYPE_ELS | EXCHANGE_RESPONDER | LAST_SEQUENCE;
2289 u_int my_mtu = fi->g.my_mtu;
2290 ENTER("tx_logi_acc");
2291 fill_login_frame(fi, logi);
2292 fi->g.type_of_frame = FC_ELS;
2293 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.login, sizeof(LOGIN));
2294 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),sizeof(LOGIN), r_ctl, type, d_id, my_mtu, int_required, received_ox_id, logi);
2295 fi->g.e_i++;
2296 if (fi->g.e_i == MAX_PENDING_FRAMES)
2297 fi->g.e_i = 0;
2298 LEAVE("tx_logi_acc");
2299 return;
2302 static void tx_prli(struct fc_info *fi, u_int command_code, u_int d_id, u_short received_ox_id)
2304 int int_required = 1;
2305 u_int r_ctl = RCTL_ELS_UCTL;
2306 u_int type = TYPE_ELS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2307 u_int my_mtu = fi->g.my_mtu;
2308 ENTER("tx_prli");
2309 if (command_code == ELS_PRLI)
2310 fi->g.prli.cmnd_code = htons((ELS_PRLI | PAGE_LEN) >> 16);
2311 else {
2312 fi->g.prli.cmnd_code = htons((ELS_ACC | PAGE_LEN) >> 16);
2313 int_required = 0;
2314 type = TYPE_ELS | EXCHANGE_RESPONDER | LAST_SEQUENCE;
2315 r_ctl = RCTL_ELS_SCTL;
2317 fi->g.prli.payload_length = htons(PRLI_LEN);
2318 fi->g.prli.type_code = htons(FCP_TYPE_CODE);
2319 fi->g.prli.est_image_pair = htons(IMAGE_PAIR);
2320 fi->g.prli.responder_pa = 0;
2321 fi->g.prli.originator_pa = 0;
2322 fi->g.prli.service_params = htonl(INITIATOR_FUNC | READ_XFER_RDY_DISABLED);
2323 fi->g.type_of_frame = FC_ELS;
2324 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.prli, sizeof(PRLI));
2325 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]), sizeof(PRLI), r_ctl, type, d_id, my_mtu, int_required, received_ox_id, command_code);
2326 fi->g.e_i++;
2327 if (fi->g.e_i == MAX_PENDING_FRAMES)
2328 fi->g.e_i = 0;
2329 LEAVE("tx_prli");
2330 return;
2333 static void tx_logo(struct fc_info *fi, u_int d_id, u_short received_ox_id)
2335 int int_required = 1;
2336 u_int r_ctl = RCTL_ELS_UCTL;
2337 u_int type = TYPE_ELS | EXCHANGE_RESPONDER | SEQUENCE_RESPONDER | FIRST_SEQUENCE | END_SEQUENCE | SEQUENCE_INITIATIVE;
2338 int size = sizeof(LOGO);
2339 char fc_id[3];
2340 u_int my_mtu = fi->g.my_mtu;
2341 ENTER("tx_logo");
2342 fi->g.logo.logo_cmnd = htonl(ELS_LOGO);
2343 fi->g.logo.reserved = 0;
2344 memcpy(fc_id, &(fi->g.my_id), 3);
2345 fi->g.logo.n_port_id_0 = fc_id[0];
2346 fi->g.logo.n_port_id_1 = fc_id[1];
2347 fi->g.logo.n_port_id_2 = fc_id[2];
2348 fi->g.logo.port_name_up = htonl(N_PORT_NAME_HIGH);
2349 fi->g.logo.port_name_low = htonl(N_PORT_NAME_LOW);
2350 fi->g.type_of_frame = FC_ELS;
2351 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.logo, sizeof(LOGO));
2352 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, received_ox_id, ELS_LOGO);
2353 fi->g.e_i++;
2354 if (fi->g.e_i == MAX_PENDING_FRAMES)
2355 fi->g.e_i = 0;
2356 LEAVE("tx_logo");
2359 static void tx_adisc(struct fc_info *fi, u_int cmnd_code, u_int d_id, u_short received_ox_id)
2361 int int_required = 0;
2362 u_int r_ctl = RCTL_ELS_SCTL;
2363 u_int type = TYPE_ELS | EXCHANGE_RESPONDER | SEQUENCE_RESPONDER | FIRST_SEQUENCE | END_SEQUENCE;
2364 int size = sizeof(ADISC);
2365 u_int my_mtu = fi->g.my_mtu;
2366 fi->g.adisc.ls_cmnd_code = htonl(cmnd_code);
2367 fi->g.adisc.hard_address = htonl(0);
2368 fi->g.adisc.port_name_high = htonl(N_PORT_NAME_HIGH);
2369 fi->g.adisc.port_name_low = htonl(N_PORT_NAME_LOW);
2370 fi->g.adisc.node_name_high = htonl(NODE_NAME_HIGH);
2371 fi->g.adisc.node_name_low = htonl(NODE_NAME_LOW);
2372 fi->g.adisc.n_port_id = htonl(fi->g.my_id);
2373 if (cmnd_code == ELS_ADISC) {
2374 int_required = 1;
2375 r_ctl = RCTL_ELS_UCTL;
2376 type = TYPE_ELS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2378 fi->g.type_of_frame = FC_ELS;
2379 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.adisc, size);
2380 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, received_ox_id, cmnd_code);
2381 fi->g.e_i++;
2382 if (fi->g.e_i == MAX_PENDING_FRAMES)
2383 fi->g.e_i = 0;
2386 static void tx_ls_rjt(struct fc_info *fi, u_int d_id, u_short received_ox_id, u_short reason_code, u_short expln_code)
2388 int int_required = 0;
2389 u_int r_ctl = RCTL_ELS_SCTL;
2390 u_int type = TYPE_ELS | EXCHANGE_RESPONDER | LAST_SEQUENCE;
2391 int size = sizeof(LS_RJT);
2392 u_int my_mtu = fi->g.my_mtu;
2393 ENTER("tx_ls_rjt");
2394 fi->g.ls_rjt.cmnd_code = htonl(ELS_LS_RJT);
2395 fi->g.ls_rjt.reason_code = htonl((reason_code << 16) | expln_code);
2396 fi->g.type_of_frame = FC_ELS;
2397 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.ls_rjt, size);
2398 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, received_ox_id, ELS_LS_RJT);
2399 fi->g.e_i++;
2400 if (fi->g.e_i == MAX_PENDING_FRAMES)
2401 fi->g.e_i = 0;
2402 LEAVE("tx_ls_rjt");
2405 static void tx_abts(struct fc_info *fi, u_int d_id, u_short ox_id)
2407 int int_required = 1;
2408 u_int r_ctl = RCTL_BASIC_ABTS;
2409 u_int type = TYPE_BLS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2410 int size = 0;
2411 u_int my_mtu = fi->g.my_mtu;
2412 ENTER("tx_abts");
2413 fi->g.type_of_frame = FC_BLS;
2414 tx_exchange(fi, NULL, size, r_ctl, type, d_id, my_mtu, int_required, ox_id, RCTL_BASIC_ABTS);
2415 LEAVE("tx_abts");
2418 static u_int plogi_ok(struct fc_info *fi, u_int *buff_addr, int size)
2420 int ret_code = 0;
2421 u_short mtu = ntohl(*(buff_addr + 10)) & 0x00000FFF;
2422 u_short class3 = ntohl(*(buff_addr + 25)) >> 16;
2423 u_short class3_conc_seq = ntohl(*(buff_addr + 27)) >> 16;
2424 u_short open_seq = ntohl(*(buff_addr + 28)) >> 16;
2425 DPRINTK1("mtu = %x class3 = %x conc_seq = %x open_seq = %x", mtu, class3, class3_conc_seq, open_seq);
2426 size -= TACHYON_HEADER_LEN;
2427 if (!(class3 & 0x8000)) {
2428 DPRINTK1("Received PLOGI with class3 = %x", class3);
2429 ret_code = (LOGICAL_ERR << 16) | NO_EXPLN;
2430 return ret_code;
2432 if (mtu < 256) {
2433 DPRINTK1("Received PLOGI with MTU set to %x", mtu);
2434 ret_code = (LOGICAL_ERR << 16) | RECV_FIELD_SIZE;
2435 return ret_code;
2437 if (size != PLOGI_LEN) {
2438 DPRINTK1("Received PLOGI of size %x", size);
2439 ret_code = (LOGICAL_ERR << 16) | INV_PAYLOAD_LEN;
2440 return ret_code;
2442 if (class3_conc_seq == 0) {
2443 DPRINTK1("Received PLOGI with conc_seq == 0");
2444 ret_code = (LOGICAL_ERR << 16) | CONC_SEQ;
2445 return ret_code;
2447 if (open_seq == 0) {
2448 DPRINTK1("Received PLOGI with open_seq == 0");
2449 ret_code = (LOGICAL_ERR << 16) | NO_EXPLN;
2450 return ret_code;
2453 /* Could potentially check for more fields, but might end up
2454 not talking to most of the devices. ;-) */
2455 /* Things that could get checked are:
2456 common_features = 0x8800
2457 total_concurrent_seq = at least 1
2459 return ret_code;
2462 static void tx_acc(struct fc_info *fi, u_int d_id, u_short received_ox_id)
2464 int int_required = 0;
2465 u_int r_ctl = RCTL_ELS_SCTL;
2466 u_int type = TYPE_ELS | EXCHANGE_RESPONDER | LAST_SEQUENCE;
2467 int size = sizeof(ACC);
2468 u_int my_mtu = fi->g.my_mtu;
2469 ENTER("tx_acc");
2470 fi->g.acc.cmnd_code = htonl(ELS_ACC);
2471 fi->g.type_of_frame = FC_ELS;
2472 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.acc, size);
2473 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, received_ox_id, ELS_ACC);
2474 fi->g.e_i++;
2475 if (fi->g.e_i == MAX_PENDING_FRAMES)
2476 fi->g.e_i = 0;
2477 LEAVE("tx_acc");
2481 static void tx_name_server_req(struct fc_info *fi, u_int req)
2483 int int_required = 1, i, size = 0;
2484 u_short ox_id = OX_ID_FIRST_SEQUENCE;
2485 u_int type = TYPE_FC_SERVICES | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2486 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_CONTROL;
2487 u_int my_mtu = fi->g.my_mtu, d_id = DIRECTORY_SERVER;
2488 CT_HDR ct_hdr;
2489 ENTER("tx_name_server_req");
2490 /* Fill up CT_Header */
2491 ct_hdr.rev_in_id = htonl(FC_CT_REV);
2492 ct_hdr.fs_type = DIRECTORY_SERVER_APP;
2493 ct_hdr.fs_subtype = NAME_SERVICE;
2494 ct_hdr.options = 0;
2495 ct_hdr.resv1 = 0;
2496 ct_hdr.cmnd_resp_code = htons(req >> 16);
2497 ct_hdr.max_res_size = 0;
2498 ct_hdr.resv2 = 0;
2499 ct_hdr.reason_code = 0;
2500 ct_hdr.expln_code = 0;
2501 ct_hdr.vendor_unique = 0;
2503 fi->g.type_of_frame = FC_ELS;
2504 switch(req) {
2505 case FCS_RFC_4:
2506 memcpy(&(fi->g.rfc_4.ct_hdr), &ct_hdr, sizeof(CT_HDR));
2507 fi->g.rfc_4.s_id = htonl(fi->g.my_id);
2508 for (i = 0; i < 32; i++)
2509 fi->g.rfc_4.bit_map[i] = 0;
2510 /* We support IP & SCSI */
2511 fi->g.rfc_4.bit_map[2] = 0x01;
2512 fi->g.rfc_4.bit_map[3] = 0x20;
2513 size = sizeof(RFC_4);
2514 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.rfc_4, size);
2515 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, ox_id, req);
2516 break;
2517 case FCS_GP_ID4:
2518 memcpy(&(fi->g.gp_id4.ct_hdr), &ct_hdr, sizeof(CT_HDR));
2519 fi->g.gp_id4.port_type = htonl(PORT_TYPE_NX_PORTS);
2520 size = sizeof(GP_ID4);
2521 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.gp_id4, size);
2522 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, ox_id, req);
2523 break;
2525 fi->g.e_i++;
2526 if (fi->g.e_i == MAX_PENDING_FRAMES)
2527 fi->g.e_i = 0;
2528 LEAVE("tx_name_server_req");
2531 static void tx_scr(struct fc_info *fi)
2533 int int_required = 1, size = sizeof(SCR);
2534 u_short ox_id = OX_ID_FIRST_SEQUENCE;
2535 u_int type = TYPE_ELS | SEQUENCE_INITIATIVE | FIRST_SEQUENCE;
2536 u_int r_ctl = RCTL_ELS_UCTL;
2537 u_int my_mtu = fi->g.my_mtu, d_id = FABRIC_CONTROLLER;
2538 ENTER("tx_scr");
2539 fi->g.scr.cmnd_code = htonl(ELS_SCR);
2540 fi->g.scr.reg_function = htonl(FULL_REGISTRATION);
2541 fi->g.type_of_frame = FC_ELS;
2542 memcpy(fi->g.els_buffer[fi->g.e_i], &fi->g.scr, size);
2543 tx_exchange(fi, (char *)(fi->g.els_buffer[fi->g.e_i]),size, r_ctl, type, d_id, my_mtu, int_required, ox_id, ELS_SCR);
2544 fi->g.e_i++;
2545 if (fi->g.e_i == MAX_PENDING_FRAMES)
2546 fi->g.e_i = 0;
2547 LEAVE("tx_scr");
2550 static void perform_adisc(struct fc_info *fi)
2552 int count = 0;
2553 /* Will be set to TRUE when timer expires in a PLDA environment.
2555 fi->g.port_discovery = FALSE;
2557 if (fi->node_info_list) {
2558 struct fc_node_info *temp_list = fi->node_info_list;
2559 while(temp_list) {
2560 /* Tx ADISC to all non-fabric based
2561 * entities.
2563 if ((temp_list->d_id & 0xFF0000) != 0xFF0000)
2564 tx_adisc(fi, ELS_ADISC, temp_list->d_id, OX_ID_FIRST_SEQUENCE);
2565 temp_list = temp_list->next;
2566 udelay(20);
2567 count++;
2570 /* Perform Port Discovery after timer expires.
2571 * We are giving time for the ADISCed nodes to respond
2572 * so that we dont have to perform PLOGI to those whose
2573 * login are _still_ valid.
2575 fi->explore_timer.function = port_discovery_timer;
2576 fi->explore_timer.data = (unsigned long)fi;
2577 fi->explore_timer.expires = RUN_AT((count*3*HZ)/100);
2578 init_timer(&fi->explore_timer);
2579 add_timer(&fi->explore_timer);
2582 static void explore_fabric(struct fc_info *fi, u_int *buff_addr)
2584 u_int *addr = buff_addr + 12; /* index into payload */
2585 u_char control_code;
2586 u_int d_id;
2587 int count = 0;
2588 ENTER("explore_fabric");
2589 DPRINTK1("entering explore_fabric");
2591 /*fi->g.perform_adisc = TRUE;
2592 fi->g.explore_fabric = TRUE;
2593 perform_adisc(fi);*/
2595 do {
2596 d_id = ntohl(*addr) & 0x00FFFFFF;
2597 if (d_id != fi->g.my_id) {
2598 if (sid_logged_in(fi, d_id) == NODE_NOT_PRESENT)
2599 tx_logi(fi, ELS_PLOGI, d_id);
2600 else
2601 if (sid_logged_in(fi, d_id) == NODE_LOGGED_OUT)
2602 tx_adisc(fi, ELS_ADISC, d_id, OX_ID_FIRST_SEQUENCE);
2603 count++;
2605 control_code = (ntohl(*addr) & 0xFF000000) >> 24;
2606 addr++;
2607 DPRINTK1("cc = %x, d_id = %x", control_code, d_id);
2608 } while (control_code != 0x80);
2610 fi->explore_timer.function = fabric_explore_timer;
2611 fi->explore_timer.data = (unsigned long)fi;
2612 /* We give 30 msec for each device to respond and then send out
2613 * our SCSI enquiries.
2615 fi->explore_timer.expires = RUN_AT((count*3*HZ)/100);
2616 init_timer(&fi->explore_timer);
2617 add_timer(&fi->explore_timer);
2619 DPRINTK1("leaving explore_fabric");
2620 LEAVE("explore_fabric");
2623 static void fabric_explore_timer(unsigned long data)
2625 struct fc_info *fi = (struct fc_info*)data;
2626 del_timer(&fi->explore_timer);
2628 if ((fi->g.loop_up == TRUE) && (fi->g.ptp_up == FALSE)) {
2629 /* Initiate Local Port Discovery on the Local Loop.
2631 fi->g.port_discovery = TRUE;
2632 fi->g.alpa_list_index = 1;
2633 local_port_discovery(fi);
2635 fi->g.explore_fabric = FALSE;
2636 return;
2639 static void port_discovery_timer(unsigned long data)
2641 struct fc_info *fi = (struct fc_info*)data;
2642 del_timer(&fi->explore_timer);
2644 if ((fi->g.loop_up == TRUE) && (fi->g.explore_fabric != TRUE)) {
2645 fi->g.port_discovery = TRUE;
2646 fi->g.alpa_list_index = 1;
2647 local_port_discovery(fi);
2649 fi->g.perform_adisc = FALSE;
2650 return;
2653 static void add_to_ox_id_list(struct fc_info *fi, u_int transaction_id, u_int cmnd_code)
2655 struct ox_id_els_map *p, *q = fi->ox_id_list, *r = NULL;
2656 int size = sizeof(struct ox_id_els_map);
2657 while (q != NULL) {
2658 r = q;
2659 q = q->next;
2661 p = (struct ox_id_els_map *)kmalloc(size, GFP_ATOMIC);
2662 if (p == NULL) {
2663 T_MSG("kmalloc failed in add_to_ox_id_list()");
2664 return;
2666 p->ox_id = transaction_id;
2667 p->els = cmnd_code;
2668 p->next = NULL;
2669 if (fi->ox_id_list == NULL)
2670 fi->ox_id_list = p;
2671 else
2672 r->next = p;
2673 return;
2676 static u_int remove_from_ox_id_list(struct fc_info *fi, u_short received_ox_id)
2678 struct ox_id_els_map *p = fi->ox_id_list, *q = fi->ox_id_list;
2679 u_int els_type;
2680 while (q != NULL) {
2681 if (q->ox_id == received_ox_id) {
2683 if (q == fi->ox_id_list)
2684 fi->ox_id_list = fi->ox_id_list->next;
2685 else
2686 if (q->next == NULL)
2687 p->next = NULL;
2688 else
2689 p->next = q->next;
2691 els_type = q->els;
2692 kfree(q);
2693 return els_type;
2695 p = q;
2696 q = q->next;
2698 if (q == NULL)
2699 DPRINTK2("Could not find ox_id %x in ox_id_els_map", received_ox_id);
2700 return 0;
2703 static void build_tachyon_header(struct fc_info *fi, u_int my_id, u_int r_ctl, u_int d_id, u_int type, u_char seq_id, u_char df_ctl, u_short ox_id, u_short rx_id, char *data)
2705 u_char alpa = d_id & 0x0000FF;
2706 u_int dest_ddaa = d_id &0xFFFF00;
2708 ENTER("build_tachyon_header");
2709 DPRINTK("d_id = %x, my_ddaa = %x", d_id, fi->g.my_ddaa);
2710 /* Does it have to go to/thru a Fabric? */
2711 if ((dest_ddaa != 0) && ((d_id == F_PORT) || (fi->g.fabric_present && (dest_ddaa != fi->g.my_ddaa))))
2712 alpa = 0x00;
2713 fi->g.tach_header.resv = 0x00000000;
2714 fi->g.tach_header.sof_and_eof = SOFI3 | EOFN;
2715 fi->g.tach_header.dest_alpa = alpa;
2716 /* Set LCr properly to have enuff credit */
2717 if (alpa == REPLICATE)
2718 fi->g.tach_header.lcr_and_time_stamp = htons(0xC00);/* LCr=3 */
2719 else
2720 fi->g.tach_header.lcr_and_time_stamp = 0;
2721 fi->g.tach_header.r_ctl_and_d_id = htonl(r_ctl | d_id);
2722 fi->g.tach_header.vc_id_and_s_id = htonl(my_id);
2723 fi->g.tach_header.type_and_f_cntl = htonl(type);
2724 fi->g.tach_header.seq_id = seq_id;
2725 fi->g.tach_header.df_cntl = df_ctl;
2726 fi->g.tach_header.seq_cnt = 0;
2727 fi->g.tach_header.ox_id = htons(ox_id);
2728 fi->g.tach_header.rx_id = htons(rx_id);
2729 fi->g.tach_header.ro = 0;
2730 if (data) {
2731 /* We use the Seq_Count to keep track of IP frames in the
2732 * OCI_interrupt handler. Initial Seq_Count of IP frames is 1.
2734 if (fi->g.type_of_frame == FC_BROADCAST)
2735 fi->g.tach_header.seq_cnt = htons(0x1);
2736 else
2737 fi->g.tach_header.seq_cnt = htons(0x2);
2738 fi->g.tach_header.nw_header.d_naa = htons(0x1000);
2739 fi->g.tach_header.nw_header.s_naa = htons(0x1000);
2740 memcpy(&(fi->g.tach_header.nw_header.dest_high), data, 2);
2741 memcpy(&(fi->g.tach_header.nw_header.dest_low), data + 2, 4);
2742 memcpy(&(fi->g.tach_header.nw_header.source_high), data + 6, 2);
2743 memcpy(&(fi->g.tach_header.nw_header.source_low), data + 8, 4);
2745 LEAVE("build_tachyon_header");
2748 static void build_EDB(struct fc_info *fi, char *data, u_short flags, u_short len)
2750 fi->g.edb.buf_addr = ntohl((u_int)virt_to_bus(data));
2751 fi->g.edb.ehf = ntohs(flags);
2752 if (len % 4)
2753 len += (4 - (len % 4));
2754 fi->g.edb.buf_len = ntohs(len);
2757 static void build_ODB(struct fc_info *fi, u_char seq_id, u_int d_id, u_int len, u_int cntl, u_short mtu, u_short ox_id, u_short rx_id, int NW_header, int int_required, u_int frame_class)
2759 fi->g.odb.seq_d_id = htonl(seq_id << 24 | d_id);
2760 fi->g.odb.tot_len = len;
2761 if (NW_header)
2762 fi->g.odb.tot_len += NW_HEADER_LEN;
2763 if (fi->g.odb.tot_len % 4)
2764 fi->g.odb.tot_len += (4 - (fi->g.odb.tot_len % 4));
2765 fi->g.odb.tot_len = htonl(fi->g.odb.tot_len);
2766 switch(int_required) {
2767 case NO_COMP_AND_INT:
2768 fi->g.odb.cntl = htons(ODB_CLASS_3 | ODB_EE_CREDIT | ODB_NO_INT | ODB_NO_COMP | cntl);
2769 break;
2770 case INT_AND_COMP_REQ:
2771 fi->g.odb.cntl = htons(ODB_CLASS_3 | ODB_EE_CREDIT | cntl);
2772 break;
2773 case NO_INT_COMP_REQ:
2774 fi->g.odb.cntl = htons(ODB_CLASS_3 | ODB_EE_CREDIT | ODB_NO_INT | cntl);
2775 break;
2777 fi->g.odb.rx_id = htons(rx_id);
2778 fi->g.odb.cs_enable = 0;
2779 fi->g.odb.cs_seed = htons(1);
2781 fi->g.odb.hdr_addr = htonl(virt_to_bus(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx]));
2782 fi->g.odb.frame_len = htons(mtu);
2784 if (NW_header) {
2785 /* The pointer to the sk_buff is in here. Freed up when the
2786 * OCI_interrupt is received.
2788 fi->g.odb.trans_id = htonl(frame_class);
2789 fi->g.odb.hdr_len = TACHYON_HEADER_LEN + NW_HEADER_LEN;
2791 else {
2792 /* helps in tracking transmitted OX_IDs */
2793 fi->g.odb.trans_id = htonl((frame_class & 0xFFFF0000) | ox_id);
2794 fi->g.odb.hdr_len = TACHYON_HEADER_LEN;
2796 fi->g.odb.hdr_len = htons(fi->g.odb.hdr_len);
2798 fi->g.odb.edb_addr = htonl(virt_to_bus(fi->q.ptr_edb[fi->q.edb_buffer_indx]));
2801 static void fill_login_frame(struct fc_info *fi, u_int logi)
2803 int i;
2804 fi->g.login.ls_cmnd_code= htonl(logi);
2805 fi->g.login.fc_ph_version = htons(PH_VERSION);
2806 if (fi->g.loop_up)
2807 fi->g.login.buff_to_buff_credit = htons(LOOP_BB_CREDIT);
2808 else
2809 if (fi->g.ptp_up)
2810 fi->g.login.buff_to_buff_credit = htons(PT2PT_BB_CREDIT);
2811 if ((logi != ELS_FLOGI) || (logi == ELS_ACC))
2812 fi->g.login.common_features = htons(PLOGI_C_F);
2813 else
2814 if (logi == ELS_FLOGI)
2815 fi->g.login.common_features = htons(FLOGI_C_F);
2816 fi->g.login.recv_data_field_size = htons(FRAME_SIZE);
2817 fi->g.login.n_port_total_conc_seq = htons(CONCURRENT_SEQUENCES);
2818 fi->g.login.rel_off_by_info_cat = htons(RO_INFO_CATEGORY);
2819 fi->g.login.ED_TOV = htonl(E_D_TOV);
2820 fi->g.login.n_port_name_high = htonl(N_PORT_NAME_HIGH);
2821 fi->g.login.n_port_name_low = htonl(N_PORT_NAME_LOW);
2822 fi->g.login.node_name_high = htonl(NODE_NAME_HIGH);
2823 fi->g.login.node_name_low = htonl(NODE_NAME_LOW);
2825 /* Fill Class 1 parameters */
2826 fi->g.login.c_of_s[0].service_options = htons(0);
2827 fi->g.login.c_of_s[0].initiator_ctl = htons(0);
2828 fi->g.login.c_of_s[0].recipient_ctl = htons(0);
2829 fi->g.login.c_of_s[0].recv_data_field_size = htons(0);
2830 fi->g.login.c_of_s[0].concurrent_sequences = htons(0);
2831 fi->g.login.c_of_s[0].n_port_end_to_end_credit = htons(0);
2832 fi->g.login.c_of_s[0].open_seq_per_exchange = htons(0);
2833 fi->g.login.c_of_s[0].resv = htons(0);
2835 /* Fill Class 2 parameters */
2836 fi->g.login.c_of_s[1].service_options = htons(0);
2837 fi->g.login.c_of_s[1].initiator_ctl = htons(0);
2838 fi->g.login.c_of_s[1].recipient_ctl = htons(0);
2839 fi->g.login.c_of_s[1].recv_data_field_size = htons(0);
2840 fi->g.login.c_of_s[1].concurrent_sequences = htons(0);
2841 fi->g.login.c_of_s[1].n_port_end_to_end_credit = htons(0);
2842 fi->g.login.c_of_s[1].open_seq_per_exchange = htons(0);
2843 fi->g.login.c_of_s[1].resv = htons(0);
2845 /* Fill Class 3 parameters */
2846 if (logi == ELS_FLOGI)
2847 fi->g.login.c_of_s[2].service_options = htons(SERVICE_VALID | SEQUENCE_DELIVERY);
2848 else
2849 fi->g.login.c_of_s[2].service_options = htons(SERVICE_VALID);
2850 fi->g.login.c_of_s[2].initiator_ctl = htons(0);
2851 fi->g.login.c_of_s[2].recipient_ctl = htons(0);
2852 fi->g.login.c_of_s[2].recv_data_field_size = htons(FRAME_SIZE);
2853 fi->g.login.c_of_s[2].concurrent_sequences = htons(CLASS3_CONCURRENT_SEQUENCE);
2854 fi->g.login.c_of_s[2].n_port_end_to_end_credit = htons(0);
2855 fi->g.login.c_of_s[2].open_seq_per_exchange = htons(CLASS3_OPEN_SEQUENCE);
2856 fi->g.login.c_of_s[2].resv = htons(0);
2858 for(i = 0; i < 4; i++) {
2859 fi->g.login.resv[i] = 0;
2860 fi->g.login.vendor_version_level[i] = 0;
2865 /* clear the Interrupt Latch on the (i)chip, so that you can receive
2866 * Interrupts from Tachyon in future
2868 static void reset_latch(struct fc_info *fi)
2870 writel(readl(fi->i_r.ptr_ichip_hw_status_reg) | ICHIP_HSR_INT_LATCH, fi->i_r.ptr_ichip_hw_status_reg);
2873 static void update_OCQ_indx(struct fc_info *fi)
2875 fi->q.ocq_prod_indx++;
2876 if (fi->q.ocq_prod_indx == OCQ_LENGTH)
2877 fi->q.ocq_prod_indx = 0;
2878 writel(fi->q.ocq_prod_indx, fi->t_r.ptr_ocq_prod_indx_reg);
2881 static void update_IMQ_indx(struct fc_info *fi, int count)
2883 fi->q.imq_cons_indx += count;
2884 if (fi->q.imq_cons_indx >= IMQ_LENGTH)
2885 fi->q.imq_cons_indx -= IMQ_LENGTH;
2886 writel(fi->q.imq_cons_indx, fi->t_r.ptr_imq_cons_indx_reg);
2889 static void update_SFSBQ_indx(struct fc_info *fi)
2891 fi->q.sfsbq_prod_indx++;
2892 if (fi->q.sfsbq_prod_indx == SFSBQ_LENGTH)
2893 fi->q.sfsbq_prod_indx = 0;
2894 writel(fi->q.sfsbq_prod_indx, fi->t_r.ptr_sfsbq_prod_reg);
2897 static void update_MFSBQ_indx(struct fc_info *fi, int count)
2899 fi->q.mfsbq_prod_indx += count;
2900 if (fi->q.mfsbq_prod_indx >= MFSBQ_LENGTH)
2901 fi->q.mfsbq_prod_indx -= MFSBQ_LENGTH;
2902 writel(fi->q.mfsbq_prod_indx, fi->t_r.ptr_mfsbq_prod_reg);
2906 static void update_tachyon_header_indx(struct fc_info *fi)
2908 fi->q.tachyon_header_indx++;
2909 if (fi->q.tachyon_header_indx == NO_OF_TACH_HEADERS)
2910 fi->q.tachyon_header_indx = 0;
2913 static void update_EDB_indx(struct fc_info *fi)
2915 fi->q.edb_buffer_indx++;
2916 if (fi->q.edb_buffer_indx == EDB_LEN)
2917 fi->q.edb_buffer_indx = 0;
2920 static int iph5526_open(struct net_device *dev)
2922 netif_start_queue(dev);
2923 MOD_INC_USE_COUNT;
2924 return 0;
2927 static int iph5526_close(struct net_device *dev)
2929 netif_stop_queue(dev);
2930 MOD_DEC_USE_COUNT;
2931 return 0;
2934 static void iph5526_timeout(struct net_device *dev)
2936 struct fc_info *fi = (struct fc_info*)dev->priv;
2937 printk(KERN_WARNING "%s: timed out on send.\n", dev->name);
2938 fi->fc_stats.rx_dropped++;
2939 dev->trans_start = jiffies;
2940 netif_wake_queue(dev);
2943 static int iph5526_send_packet(struct sk_buff *skb, struct net_device *dev)
2945 struct fc_info *fi = (struct fc_info*)dev->priv;
2946 int status = 0;
2947 short type = 0;
2948 u_long flags;
2949 struct fcllc *fcllc;
2951 ENTER("iph5526_send_packet");
2953 netif_stop_queue(dev);
2954 /* Strip off the pseudo header.
2956 skb->data = skb->data + 2*FC_ALEN;
2957 skb->len = skb->len - 2*FC_ALEN;
2958 fcllc = (struct fcllc *)skb->data;
2959 type = ntohs(fcllc->ethertype);
2961 spin_lock_irqsave(&fi->fc_lock, flags);
2962 switch(type) {
2963 case ETH_P_IP:
2964 status = tx_ip_packet(skb, skb->len, fi);
2965 break;
2966 case ETH_P_ARP:
2967 status = tx_arp_packet(skb->data, skb->len, fi);
2968 break;
2969 default:
2970 T_MSG("WARNING!!! Received Unknown Packet Type... Discarding...");
2971 fi->fc_stats.rx_dropped++;
2972 break;
2974 spin_unlock_irqrestore(&fi->fc_lock, flags);
2976 if (status) {
2977 fi->fc_stats.tx_bytes += skb->len;
2978 fi->fc_stats.tx_packets++;
2980 else
2981 fi->fc_stats.rx_dropped++;
2982 dev->trans_start = jiffies;
2983 /* We free up the IP buffers in the OCI_interrupt handler.
2984 * status == 0 implies that the frame was not transmitted. So the
2985 * skb is freed here.
2987 if ((type == ETH_P_ARP) || (status == 0))
2988 dev_kfree_skb(skb);
2989 else
2990 netif_wake_queue(dev);
2991 LEAVE("iph5526_send_packet");
2992 return 0;
2995 static int iph5526_change_mtu(struct net_device *dev, int mtu)
2997 return 0;
3000 static int tx_ip_packet(struct sk_buff *skb, unsigned long len, struct fc_info *fi)
3002 u_int d_id;
3003 int int_required = 1;
3004 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_DATA;
3005 u_int type = TYPE_LLC_SNAP;
3006 u_short ox_id = OX_ID_FIRST_SEQUENCE;
3007 u_int mtu;
3008 struct fc_node_info *q;
3010 ENTER("tx_ip_packet");
3011 q = look_up_cache(fi, skb->data - 2*FC_ALEN);
3012 if (q != NULL) {
3013 d_id = q->d_id;
3014 DPRINTK("Look-Up Cache Succeeded for d_id = %x", d_id);
3015 mtu = q->mtu;
3016 if (q->login == LOGIN_COMPLETED){
3017 fi->g.type_of_frame = FC_IP;
3018 return tx_exchange(fi, skb->data, len, r_ctl, type, d_id, mtu, int_required, ox_id, virt_to_bus(skb));
3021 if (q->d_id == BROADCAST) {
3022 struct fc_node_info *p = fi->node_info_list;
3023 int return_value = FALSE;
3024 fi->g.type_of_frame = FC_BROADCAST;
3025 /* Do unicast to local nodes.
3027 int_required = 0;
3028 while(p != NULL) {
3029 d_id = p->d_id;
3030 if ((d_id & 0xFFFF00) == fi->g.my_ddaa)
3031 return_value |= tx_exchange(fi, skb->data, len, r_ctl, type, d_id, fi->g.my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3032 p = p->next;
3034 kfree(q);
3035 return return_value;
3038 if (q->login != LOGIN_COMPLETED) {
3039 DPRINTK1("Node not logged in... Txing PLOGI to %x", d_id);
3040 /* FIXME: we are dumping the frame here */
3041 tx_logi(fi, ELS_PLOGI, d_id);
3044 DPRINTK2("Look-Up Cache Failed");
3045 LEAVE("tx_ip_packet");
3046 return 0;
3049 static int tx_arp_packet(char *data, unsigned long len, struct fc_info *fi)
3051 u_int opcode = data[ARP_OPCODE_0];
3052 u_int d_id;
3053 int int_required = 0, return_value = FALSE;
3054 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_DATA;
3055 u_int type = TYPE_LLC_SNAP;
3056 u_short ox_id = OX_ID_FIRST_SEQUENCE;
3057 u_int my_mtu = fi->g.my_mtu;
3058 ENTER("tx_arp_packet");
3060 opcode = opcode << 8 | data[ARP_OPCODE_1];
3061 fi->g.type_of_frame = FC_IP;
3063 if (opcode == ARPOP_REQUEST) {
3064 struct fc_node_info *q = fi->node_info_list;
3065 d_id = BROADCAST;
3066 return_value |= tx_exchange(fi, data, len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3067 /* Some devices support HW_TYPE 0x01 */
3068 memcpy(fi->g.arp_buffer, data - 2*FC_ALEN, len + 2*FC_ALEN);
3069 fi->g.arp_buffer[9 + 2*FC_ALEN] = 0x01;
3070 return_value |= tx_exchange(fi, (char *)(fi->g.arp_buffer + 2*FC_ALEN), len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3072 /* Do unicast to local nodes.
3074 while(q != NULL) {
3075 fi->g.type_of_frame = FC_BROADCAST;
3076 d_id = q->d_id;
3077 if ((d_id & 0xFFFF00) == fi->g.my_ddaa) {
3078 return_value |= tx_exchange(fi, data, len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3079 // Some devices support HW_TYPE 0x01
3080 memcpy(fi->g.arp_buffer, data - 2*FC_ALEN, len + 2*FC_ALEN);
3081 fi->g.arp_buffer[9 + 2*FC_ALEN] = 0x01;
3082 return_value |= tx_exchange(fi, (char *)(fi->g.arp_buffer + 2*FC_ALEN), len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3084 q = q->next;
3086 return return_value;
3088 else
3089 if (opcode == ARPOP_REPLY) {
3090 struct fc_node_info *q; u_int mtu;
3091 DPRINTK("We are sending out an ARP reply");
3092 q = look_up_cache(fi, data - 2*FC_ALEN);
3093 if (q != NULL) {
3094 d_id = q->d_id;
3095 DPRINTK("Look-Up Cache Succeeded for d_id = %x", d_id);
3096 mtu = q->mtu;
3097 if (q->login == LOGIN_COMPLETED){
3098 tx_exchange(fi, data, len, r_ctl, type, d_id, mtu, int_required, ox_id, TYPE_LLC_SNAP);
3099 /* Some devices support HW_TYPE 0x01 */
3100 memcpy(fi->g.arp_buffer, data - 2*FC_ALEN, len + 2*FC_ALEN);
3101 fi->g.arp_buffer[9 + 2*FC_ALEN] = 0x01;
3102 return tx_exchange(fi, (char *)(fi->g.arp_buffer + 2*FC_ALEN), len, r_ctl, type, d_id, my_mtu, int_required, ox_id, TYPE_LLC_SNAP);
3104 else {
3105 DPRINTK1("Node not logged in... Txing PLOGI to %x", d_id);
3106 tx_logi(fi, ELS_PLOGI, d_id); /* FIXME: we are dumping the frame here */
3109 DPRINTK2("Look-Up Cache Failed");
3111 else {
3112 T_MSG("Warning!!! Invalid Opcode in ARP Packet!");
3114 LEAVE("tx_arp_packet");
3115 return 0;
3119 static void rx_net_packet(struct fc_info *fi, u_char *buff_addr, int payload_size)
3121 struct net_device *dev = fi->dev;
3122 struct sk_buff *skb;
3123 u_int skb_size = 0;
3124 struct fch_hdr fch;
3125 ENTER("rx_net_packet");
3126 skb_size = payload_size - TACHYON_HEADER_LEN;
3127 DPRINTK("skb_size = %d", skb_size);
3128 fi->fc_stats.rx_bytes += skb_size - 2;
3129 skb = dev_alloc_skb(skb_size);
3130 if (skb == NULL) {
3131 printk(KERN_NOTICE "%s: In rx_net_packet() Memory squeeze, dropping packet.\n", dev->name);
3132 fi->fc_stats.rx_dropped++;
3133 return;
3135 /* Skip over the Tachyon Frame Header.
3137 buff_addr += TACHYON_HEADER_LEN;
3139 memcpy(fch.daddr, buff_addr + 2, FC_ALEN);
3140 memcpy(fch.saddr, buff_addr + 10, FC_ALEN);
3141 buff_addr += 2;
3142 memcpy(buff_addr, fch.daddr, FC_ALEN);
3143 memcpy(buff_addr + 6, fch.saddr, FC_ALEN);
3144 skb_reserve(skb, 2);
3145 memcpy(skb_put(skb, skb_size - 2), buff_addr, skb_size - 2);
3146 skb->dev = dev;
3147 skb->protocol = fc_type_trans(skb, dev);
3148 DPRINTK("protocol = %x", skb->protocol);
3150 /* Hmmm... to accept HW Type 0x01 as well...
3152 if (skb->protocol == ntohs(ETH_P_ARP))
3153 skb->data[1] = 0x06;
3154 netif_rx(skb);
3155 fi->fc_stats.rx_packets++;
3156 LEAVE("rx_net_packet");
3160 static void rx_net_mfs_packet(struct fc_info *fi, struct sk_buff *skb)
3162 struct net_device *dev = fi->dev;
3163 struct fch_hdr fch;
3164 ENTER("rx_net_mfs_packet");
3165 /* Construct your Hard Header */
3166 memcpy(fch.daddr, skb->data + 2, FC_ALEN);
3167 memcpy(fch.saddr, skb->data + 10, FC_ALEN);
3168 skb_pull(skb, 2);
3169 memcpy(skb->data, fch.daddr, FC_ALEN);
3170 memcpy(skb->data + 6, fch.saddr, FC_ALEN);
3171 skb->dev = dev;
3172 skb->protocol = fc_type_trans(skb, dev);
3173 DPRINTK("protocol = %x", skb->protocol);
3174 netif_rx(skb);
3175 LEAVE("rx_net_mfs_packet");
3178 unsigned short fc_type_trans(struct sk_buff *skb, struct net_device *dev)
3180 struct fch_hdr *fch=(struct fch_hdr *)skb->data;
3181 struct fcllc *fcllc;
3182 skb->mac.raw = skb->data;
3183 fcllc = (struct fcllc *)(skb->data + sizeof(struct fch_hdr) + 2);
3184 skb_pull(skb,sizeof(struct fch_hdr) + 2);
3186 if(*fch->daddr & 1) {
3187 if(!memcmp(fch->daddr,dev->broadcast,FC_ALEN))
3188 skb->pkt_type = PACKET_BROADCAST;
3189 else
3190 skb->pkt_type = PACKET_MULTICAST;
3192 else if(dev->flags & IFF_PROMISC) {
3193 if(memcmp(fch->daddr, dev->dev_addr, FC_ALEN))
3194 skb->pkt_type=PACKET_OTHERHOST;
3197 /* Strip the SNAP header from ARP packets since we don't
3198 * pass them through to the 802.2/SNAP layers.
3201 if (fcllc->dsap == EXTENDED_SAP &&
3202 (fcllc->ethertype == ntohs(ETH_P_IP) ||
3203 fcllc->ethertype == ntohs(ETH_P_ARP))) {
3204 skb_pull(skb, sizeof(struct fcllc));
3205 return fcllc->ethertype;
3207 return ntohs(ETH_P_802_2);
3210 static int tx_exchange(struct fc_info *fi, char *data, u_int len, u_int r_ctl, u_int type, u_int d_id, u_int mtu, int int_required, u_short tx_ox_id, u_int frame_class)
3212 u_char df_ctl;
3213 int NW_flag = 0, h_size, return_value;
3214 u_short rx_id = RX_ID_FIRST_SEQUENCE;
3215 u_int tachyon_status;
3216 u_int my_id = fi->g.my_id;
3217 ENTER("tx_exchange");
3219 tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
3220 DPRINTK("Tachyon Status = %x len = %d MTU = %d", tachyon_status, len, mtu);
3221 if (tachyon_status & OSM_FROZEN) {
3222 reset_tachyon(fi, ERROR_RELEASE);
3223 reset_tachyon(fi, OCQ_RESET);
3224 DPRINTK("Tachyon Status = %x len = %d MTU = %d", tachyon_status, len, mtu);
3226 if (tx_ox_id == OX_ID_FIRST_SEQUENCE) {
3227 switch(fi->g.type_of_frame) {
3228 case FC_SCSI_READ:
3229 tx_ox_id = fi->g.scsi_oxid | SCSI_READ_BIT;
3230 break;
3231 case FC_SCSI_WRITE:
3232 tx_ox_id = fi->g.scsi_oxid;
3233 break;
3234 default:
3235 tx_ox_id = fi->g.ox_id;
3236 break;
3239 else {
3240 switch(fi->g.type_of_frame) {
3241 case FC_SCSI_READ:
3242 rx_id = fi->g.scsi_oxid | SCSI_READ_BIT;
3243 break;
3244 case FC_SCSI_WRITE:
3245 rx_id = fi->g.scsi_oxid;
3246 break;
3247 case FC_BLS:
3248 rx_id = RX_ID_FIRST_SEQUENCE;
3249 break;
3250 default:
3251 rx_id = fi->g.ox_id;
3252 break;
3256 if (type == TYPE_LLC_SNAP) {
3257 df_ctl = 0x20;
3258 NW_flag = 1;
3259 /* Multi Frame Sequence ? If yes, set RO bit */
3260 if (len > mtu)
3261 type |= RELATIVE_OFF_PRESENT;
3262 build_tachyon_header(fi, my_id, r_ctl, d_id, type, fi->g.seq_id, df_ctl, tx_ox_id, rx_id, data - 2*FC_ALEN);
3264 else {
3265 df_ctl = 0;
3266 /* Multi Frame Sequence ? If yes, set RO bit */
3267 if (len > mtu)
3268 type |= RELATIVE_OFF_PRESENT;
3269 build_tachyon_header(fi, my_id, r_ctl, d_id, type, fi->g.seq_id, df_ctl, tx_ox_id, rx_id, NULL);
3272 /* Get free Tachyon Headers and EDBs */
3273 if (get_free_header(fi) || get_free_EDB(fi))
3274 return 0;
3276 if ((type & 0xFF000000) == TYPE_LLC_SNAP) {
3277 h_size = TACHYON_HEADER_LEN + NW_HEADER_LEN;
3278 memcpy(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx], &(fi->g.tach_header), h_size);
3280 else
3281 memcpy(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx], &(fi->g.tach_header), TACHYON_HEADER_LEN);
3283 return_value = tx_sequence(fi, data, len, mtu, d_id, tx_ox_id, rx_id, fi->g.seq_id, NW_flag, int_required, frame_class);
3285 switch(fi->g.type_of_frame) {
3286 case FC_SCSI_READ:
3287 case FC_SCSI_WRITE:
3288 update_scsi_oxid(fi);
3289 break;
3290 case FC_BLS:
3291 break;
3292 default:
3293 fi->g.ox_id++;
3294 if (fi->g.ox_id == 0xFFFF)
3295 fi->g.ox_id = NOT_SCSI_XID;
3296 break;
3299 if (fi->g.seq_id == MAX_SEQ_ID)
3300 fi->g.seq_id = 0;
3301 else
3302 fi->g.seq_id++;
3303 LEAVE("tx_exchange");
3304 return return_value;
3307 static int tx_sequence(struct fc_info *fi, char *data, u_int len, u_int mtu, u_int d_id, u_short ox_id, u_short rx_id, u_char seq_id, int NW_flag, int int_required, u_int frame_class)
3309 u_int cntl = 0;
3310 int return_value;
3311 ENTER("tx_sequence");
3312 build_EDB(fi, data, EDB_END, len);
3313 memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
3314 build_ODB(fi, seq_id, d_id, len, cntl, mtu, ox_id, rx_id, NW_flag, int_required, frame_class);
3315 memcpy(fi->q.ptr_odb[fi->q.ocq_prod_indx], &(fi->g.odb), sizeof(ODB));
3316 if (fi->g.link_up != TRUE) {
3317 DPRINTK2("Fibre Channel Link not up. Dropping Exchange!");
3318 return_value = FALSE;
3320 else {
3321 /* To be on the safe side, a check should be included
3322 * at this point to check if we are overrunning
3323 * Tachyon.
3325 update_OCQ_indx(fi);
3326 return_value = TRUE;
3328 update_EDB_indx(fi);
3329 update_tachyon_header_indx(fi);
3330 LEAVE("tx_sequence");
3331 return return_value;
3334 static int get_free_header(struct fc_info *fi)
3336 u_short temp_ox_id;
3337 u_int *tach_header, initial_indx = fi->q.tachyon_header_indx;
3338 /* Check if the header is in use.
3339 * We could have an outstanding command.
3340 * We should find a free slot as we can queue a
3341 * maximum of 32 SCSI commands only.
3343 tach_header = fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx];
3344 temp_ox_id = ntohl(*(tach_header + 6)) >> 16;
3345 /* We care about the SCSI writes only. Those are the wicked ones
3346 * that need an additional set of buffers.
3348 while(temp_ox_id <= MAX_SCSI_XID) {
3349 update_tachyon_header_indx(fi);
3350 if (fi->q.tachyon_header_indx == initial_indx) {
3351 /* Should never happen.
3353 T_MSG("No free Tachyon headers available");
3354 reset_tachyon(fi, SOFTWARE_RESET);
3355 return 1;
3357 tach_header = fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx];
3358 temp_ox_id = ntohl(*(tach_header + 6)) >> 16;
3360 return 0;
3363 static int get_free_EDB(struct fc_info *fi)
3365 unsigned int initial_indx = fi->q.edb_buffer_indx;
3366 /* Check if the EDB is in use.
3367 * We could have an outstanding SCSI Write command.
3368 * We should find a free slot as we can queue a
3369 * maximum of 32 SCSI commands only.
3371 while (fi->q.free_edb_list[fi->q.edb_buffer_indx] != EDB_FREE) {
3372 update_EDB_indx(fi);
3373 if (fi->q.edb_buffer_indx == initial_indx) {
3374 T_MSG("No free EDB buffers avaliable")
3375 reset_tachyon(fi, SOFTWARE_RESET);
3376 return 1;
3379 return 0;
3382 static int validate_login(struct fc_info *fi, u_int *base_ptr)
3384 struct fc_node_info *q = fi->node_info_list;
3385 char n_port_name[PORT_NAME_LEN];
3386 char node_name[NODE_NAME_LEN];
3387 u_int s_id;
3388 ENTER("validate_login");
3389 /*index to Port Name in the payload. We need the 8 byte Port Name */
3390 memcpy(n_port_name, base_ptr + 10, PORT_NAME_LEN);
3391 memcpy(node_name, base_ptr + 12, NODE_NAME_LEN);
3392 s_id = ntohl(*(base_ptr + 3)) & 0x00FFFFFF;
3394 /* check if Fibre Channel IDs have changed */
3395 while(q != NULL) {
3396 if (memcmp(n_port_name, q->hw_addr, PORT_NAME_LEN) == 0) {
3397 if ((s_id != q->d_id) || (memcmp(node_name, q->node_name, NODE_NAME_LEN) != 0)) {
3398 DPRINTK1("Fibre Channel ID of Node has changed. Txing LOGO.");
3399 return 0;
3401 q->login = LOGIN_COMPLETED;
3402 #if DEBUG_5526_2
3403 display_cache(fi);
3404 #endif
3405 return 1;
3407 q = q->next;
3409 DPRINTK1("Port Name does not match. Txing LOGO.");
3410 return 0;
3411 LEAVE("validate_login");
3414 static void add_to_address_cache(struct fc_info *fi, u_int *base_ptr)
3416 int size = sizeof(struct fc_node_info);
3417 struct fc_node_info *p, *q = fi->node_info_list, *r = NULL;
3418 char n_port_name[PORT_NAME_LEN];
3419 u_int s_id;
3420 ENTER("add_to_address_cache");
3421 /*index to Port Name in the payload. We need the 8 byte Port Name */
3422 memcpy(n_port_name, base_ptr + 13, PORT_NAME_LEN);
3423 s_id = ntohl(*(base_ptr + 3)) & 0x00FFFFFF;
3425 /* check if info already exists */
3426 while(q != NULL) {
3427 if (memcmp(n_port_name, q->hw_addr, PORT_NAME_LEN) == 0) {
3428 if (s_id != q->d_id) {
3429 memcpy(&(q->c_of_s[0]), base_ptr + 17, 3 * sizeof(CLASS_OF_SERVICE));
3430 q->mtu = ntohl(*(base_ptr + 10)) & 0x00000FFF;
3431 q->d_id = s_id;
3432 memcpy(q->node_name, base_ptr + 15, NODE_NAME_LEN);
3434 q->login = LOGIN_COMPLETED;
3435 q->scsi = FALSE;
3436 fi->num_nodes++;
3437 #if DEBUG_5526_2
3438 display_cache(fi);
3439 #endif
3440 return;
3442 r = q;
3443 q = q->next;
3445 p = (struct fc_node_info *)kmalloc(size, GFP_ATOMIC);
3446 if (p == NULL) {
3447 T_MSG("kmalloc failed in add_to_address_cache()");
3448 return;
3450 memcpy(&(p->c_of_s[0]), base_ptr + 17, 3 * sizeof(CLASS_OF_SERVICE));
3451 p->mtu = ntohl(*(base_ptr + 10)) & 0x00000FFF;
3452 p->d_id = s_id;
3453 memcpy(p->hw_addr, base_ptr + 13, PORT_NAME_LEN);
3454 memcpy(p->node_name, base_ptr + 15, NODE_NAME_LEN);
3455 p->login = LOGIN_COMPLETED;
3456 p->scsi = FALSE;
3457 p->target_id = 0xFF;
3458 p->next = NULL;
3459 if (fi->node_info_list == NULL)
3460 fi->node_info_list = p;
3461 else
3462 r->next = p;
3463 fi->num_nodes++;
3464 #if DEBUG_5526_2
3465 display_cache(fi);
3466 #endif
3467 LEAVE("add_to_address_cache");
3468 return;
3471 static void remove_from_address_cache(struct fc_info *fi, u_int *base_ptr, u_int cmnd_code)
3473 struct fc_node_info *q = fi->node_info_list;
3474 u_int s_id;
3475 ENTER("remove_from_address_cache");
3476 s_id = ntohl(*(base_ptr + 3)) & 0x00FFFFFF;
3477 switch(cmnd_code) {
3478 case ELS_LOGO:
3479 /* check if info exists */
3480 while (q != NULL) {
3481 if (s_id == q->d_id) {
3482 if (q->login == LOGIN_COMPLETED)
3483 q->login = LOGIN_ATTEMPTED;
3484 if (fi->num_nodes > 0)
3485 fi->num_nodes--;
3486 #if DEBUG_5526_2
3487 display_cache(fi);
3488 #endif
3489 return;
3491 q = q->next;
3493 DPRINTK1("ELS_LOGO received from node 0x%x which is not logged-in", s_id);
3494 break;
3495 case ELS_RSCN:
3497 int payload_len = ntohl(*(base_ptr + 8)) & 0xFF;
3498 int no_of_pages, i;
3499 u_char address_format;
3500 u_short received_ox_id = ntohl(*(base_ptr + 6)) >> 16;
3501 u_int node_id, mask, *page_ptr = base_ptr + 9;
3502 if ((payload_len < 4) || (payload_len > 256)) {
3503 DPRINTK1("RSCN with invalid payload length received");
3504 tx_ls_rjt(fi, s_id, received_ox_id, LOGICAL_ERR, RECV_FIELD_SIZE);
3505 return;
3507 /* Page_size includes the Command Code */
3508 no_of_pages = (payload_len / 4) - 1;
3509 for (i = 0; i < no_of_pages; i++) {
3510 address_format = ntohl(*page_ptr) >> 24;
3511 node_id = ntohl(*page_ptr) & 0x00FFFFFF;
3512 switch(address_format) {
3513 case PORT_ADDRESS_FORMAT:
3514 rscn_handler(fi, node_id);
3515 break;
3516 case AREA_ADDRESS_FORMAT:
3517 case DOMAIN_ADDRESS_FORMAT:
3518 if (address_format == AREA_ADDRESS_FORMAT)
3519 mask = 0xFFFF00;
3520 else
3521 mask = 0xFF0000;
3522 while(q != NULL) {
3523 if ((q->d_id & mask) == (node_id & mask))
3524 rscn_handler(fi, q->d_id);
3525 q = q->next;
3527 /* There might be some new nodes to be
3528 * discovered. But, some of the earlier
3529 * requests as a result of the RSCN might be
3530 * in progress. We dont want to duplicate that
3531 * effort. So letz call SCR after a lag.
3533 fi->explore_timer.function = scr_timer;
3534 fi->explore_timer.data = (unsigned long)fi;
3535 fi->explore_timer.expires = RUN_AT((no_of_pages*3*HZ)/100);
3536 init_timer(&fi->explore_timer);
3537 add_timer(&fi->explore_timer);
3538 break;
3539 default:
3540 T_MSG("RSCN with invalid address format received");
3541 tx_ls_rjt(fi, s_id, received_ox_id, LOGICAL_ERR, NO_EXPLN);
3543 page_ptr += 1;
3544 } /* end of for loop */
3545 } /* end of case RSCN: */
3546 break;
3548 #if DEBUG_5526_2
3549 display_cache(fi);
3550 #endif
3551 LEAVE("remove_from_address_cache");
3554 static void rscn_handler(struct fc_info *fi, u_int node_id)
3556 struct fc_node_info *q = fi->node_info_list;
3557 int login_state = sid_logged_in(fi, node_id);
3558 if ((login_state == NODE_LOGGED_IN) || (login_state == NODE_PROCESS_LOGGED_IN)) {
3559 while(q != NULL) {
3560 if (q->d_id == node_id) {
3561 q->login = LOGIN_ATTEMPTED;
3562 if (fi->num_nodes > 0)
3563 fi->num_nodes--;
3564 break;
3566 else
3567 q = q->next;
3570 else
3571 if (login_state == NODE_LOGGED_OUT)
3572 tx_adisc(fi, ELS_ADISC, node_id, OX_ID_FIRST_SEQUENCE);
3573 else
3574 if (login_state == NODE_LOGGED_OUT)
3575 tx_logi(fi, ELS_PLOGI, node_id);
3578 static void scr_timer(unsigned long data)
3580 struct fc_info *fi = (struct fc_info *)data;
3581 del_timer(&fi->explore_timer);
3582 tx_name_server_req(fi, FCS_GP_ID4);
3585 static int sid_logged_in(struct fc_info *fi, u_int s_id)
3587 struct fc_node_info *temp = fi->node_info_list;
3588 while(temp != NULL)
3589 if ((temp->d_id == s_id) && (temp->login == LOGIN_COMPLETED)) {
3590 if (temp->scsi != FALSE)
3591 return NODE_PROCESS_LOGGED_IN;
3592 else
3593 return NODE_LOGGED_IN;
3595 else
3596 if ((temp->d_id == s_id) && (temp->login != LOGIN_COMPLETED))
3597 return NODE_LOGGED_OUT;
3598 else
3599 temp = temp->next;
3600 return NODE_NOT_PRESENT;
3603 static void mark_scsi_sid(struct fc_info *fi, u_int *buff_addr, u_char action)
3605 struct fc_node_info *temp = fi->node_info_list;
3606 u_int s_id;
3607 u_int service_params;
3608 s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
3609 service_params = ntohl(*(buff_addr + 12)) & 0x000000F0;
3610 while(temp != NULL)
3611 if ((temp->d_id == s_id) && (temp->login == LOGIN_COMPLETED)) {
3612 if (action == DELETE_ENTRY) {
3613 temp->scsi = FALSE;
3614 #if DEBUG_5526_2
3615 display_cache(fi);
3616 #endif
3617 return;
3619 /* Check if it is a SCSI Target */
3620 if (!(service_params & TARGET_FUNC)) {
3621 temp->scsi = INITIATOR;
3622 #if DEBUG_5526_2
3623 display_cache(fi);
3624 #endif
3625 return;
3627 temp->scsi = TARGET;
3628 /* This helps to maintain the target_id no matter what your
3629 * Fibre Channel ID is.
3631 if (temp->target_id == 0xFF) {
3632 if (fi->g.no_of_targets <= MAX_SCSI_TARGETS)
3633 temp->target_id = fi->g.no_of_targets++;
3634 else
3635 T_MSG("MAX TARGETS reached!");
3637 else
3638 DPRINTK1("Target_id %d already present", temp->target_id);
3639 #if DEBUG_5526_2
3640 display_cache(fi);
3641 #endif
3642 return;
3644 else
3645 temp = temp->next;
3646 return;
3649 static int node_logged_in_prev(struct fc_info *fi, u_int *buff_addr)
3651 struct fc_node_info *temp;
3652 u_char *data = (u_char *)buff_addr;
3653 u_int s_id;
3654 char node_name[NODE_NAME_LEN];
3655 s_id = ntohl(*(buff_addr + 3)) & 0x00FFFFFF;
3656 memcpy(node_name, buff_addr + 12, NODE_NAME_LEN);
3657 /* point to port_name in the ADISC payload */
3658 data += 10 * 4;
3659 /* point to last 6 bytes of port_name */
3660 data += 2;
3661 temp = look_up_cache(fi, data);
3662 if (temp != NULL) {
3663 if ((temp->d_id == s_id) && (memcmp(node_name, temp->node_name, NODE_NAME_LEN) == 0)) {
3664 temp->login = LOGIN_COMPLETED;
3665 #if DEBUG_5526_2
3666 display_cache(fi);
3667 #endif
3668 return TRUE;
3671 return FALSE;
3674 static struct fc_node_info *look_up_cache(struct fc_info *fi, char *data)
3676 struct fc_node_info *temp_list = fi->node_info_list, *q;
3677 u_char n_port_name[FC_ALEN], temp_addr[FC_ALEN];
3678 ENTER("look_up_cache");
3679 memcpy(n_port_name, data, FC_ALEN);
3680 while(temp_list) {
3681 if (memcmp(n_port_name, &(temp_list->hw_addr[2]), FC_ALEN) == 0)
3682 return temp_list;
3683 else
3684 temp_list = temp_list->next;
3687 /* Broadcast IP ?
3689 temp_addr[0] = temp_addr[1] = temp_addr[2] = 0xFF;
3690 temp_addr[3] = temp_addr[4] = temp_addr[5] = 0xFF;
3691 if (memcmp(n_port_name, temp_addr, FC_ALEN) == 0) {
3692 q = (struct fc_node_info *)kmalloc(sizeof(struct fc_node_info), GFP_ATOMIC);
3693 if (q == NULL) {
3694 T_MSG("kmalloc failed in look_up_cache()");
3695 return NULL;
3697 q->d_id = BROADCAST;
3698 return q;
3700 LEAVE("look_up_cache");
3701 return NULL;
3704 static int display_cache(struct fc_info *fi)
3706 struct fc_node_info *q = fi->node_info_list;
3707 #if DEBUG_5526_2
3708 struct ox_id_els_map *temp_ox_id_list = fi->ox_id_list;
3709 #endif
3710 int count = 0, j;
3711 printk("\nFibre Channel Node Information for %s\n", fi->name);
3712 printk("My FC_ID = %x, My WWN = %x %x, ", fi->g.my_id, fi->g.my_node_name_high, fi->g.my_node_name_low);
3713 if (fi->g.ptp_up == TRUE)
3714 printk("Port_Type = N_Port\n");
3715 if (fi->g.loop_up == TRUE)
3716 printk("Port_Type = L_Port\n");
3717 while(q != NULL) {
3718 printk("WWN = ");
3719 for (j = 0; j < PORT_NAME_LEN; j++)
3720 printk("%x ", q->hw_addr[j]);
3721 printk("FC_ID = %x, ", q->d_id);
3722 printk("Login = ");
3723 if (q->login == LOGIN_COMPLETED)
3724 printk("ON ");
3725 else
3726 printk("OFF ");
3727 if (q->scsi == TARGET)
3728 printk("Target_ID = %d ", q->target_id);
3729 printk("\n");
3730 q = q->next;
3731 count++;
3734 #if DEBUG_5526_2
3735 printk("OX_ID -> ELS Map\n");
3736 while(temp_ox_id_list) {
3737 printk("ox_id = %x, ELS = %x\n", temp_ox_id_list->ox_id, temp_ox_id_list->els);
3738 temp_ox_id_list = temp_ox_id_list->next;
3740 #endif
3742 return 0;
3745 static struct net_device_stats * iph5526_get_stats(struct net_device *dev)
3747 struct fc_info *fi = (struct fc_info*)dev->priv;
3748 return (struct net_device_stats *) &fi->fc_stats;
3752 /* SCSI stuff starts here */
3754 int iph5526_detect(Scsi_Host_Template *tmpt)
3756 struct Scsi_Host *host = NULL;
3757 struct iph5526_hostdata *hostdata;
3758 struct fc_info *fi = NULL;
3759 int no_of_hosts = 0, timeout, i, j, count = 0;
3760 u_int pci_maddr = 0;
3761 struct pci_dev *pdev = NULL;
3763 tmpt->proc_name = "iph5526";
3764 if (pci_present() == 0) {
3765 printk("iph5526: PCI not present\n");
3766 return 0;
3769 for (i = 0; i <= MAX_FC_CARDS; i++)
3770 fc[i] = NULL;
3772 for (i = 0; i < clone_list[i].vendor_id != 0; i++)
3773 while ((pdev = pci_find_device(clone_list[i].vendor_id, clone_list[i].device_id, pdev))) {
3774 unsigned short pci_command;
3775 if (pci_enable_device(pdev))
3776 continue;
3777 if (count < MAX_FC_CARDS) {
3778 fc[count] = kmalloc(sizeof(struct fc_info), GFP_ATOMIC);
3779 if (fc[count] == NULL) {
3780 printk("iph5526.c: Unable to register card # %d\n", count + 1);
3781 return no_of_hosts;
3783 memset(fc[count], 0, sizeof(struct fc_info));
3785 else {
3786 printk("iph5526.c: Maximum Number of cards reached.\n");
3787 return no_of_hosts;
3790 fi = fc[count];
3791 sprintf(fi->name, "fc%d", count);
3793 host = scsi_register(tmpt, sizeof(struct iph5526_hostdata));
3794 hostdata = (struct iph5526_hostdata *)host->hostdata;
3795 memset(hostdata, 0 , sizeof(struct iph5526_hostdata));
3796 for (j = 0; j < MAX_SCSI_TARGETS; j++)
3797 hostdata->tag_ages[j] = jiffies;
3798 hostdata->fi = fi;
3799 fi->host = host;
3800 //host->max_id = MAX_SCSI_TARGETS;
3801 host->max_id = 5;
3802 host->hostt->use_new_eh_code = 1;
3803 host->this_id = tmpt->this_id;
3805 pci_maddr = pci_resource_start(pdev, 0);
3806 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
3807 printk("iph5526.c : Cannot find proper PCI device base address.\n");
3808 scsi_unregister(host);
3809 kfree(fc[count]);
3810 fc[count] = NULL;
3811 continue;
3814 DPRINTK("pci_maddr = %x", pci_maddr);
3815 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3817 pci_irq_line = pdev->irq;
3818 printk("iph5526.c: PCI BIOS reports %s at i/o %#x, irq %d.\n", clone_list[i].name, pci_maddr, pci_irq_line);
3819 fi->g.mem_base = ioremap(pci_maddr & PAGE_MASK, 1024);
3821 /* We use Memory Mapped IO. The initial space contains the
3822 * PCI Configuration registers followed by the (i) chip
3823 * registers followed by the Tachyon registers.
3825 /* Thatz where (i)chip maps Tachyon Address Space.
3827 fi->g.tachyon_base = (u_long)fi->g.mem_base + TACHYON_OFFSET + ( pci_maddr & ~PAGE_MASK );
3828 DPRINTK("fi->g.tachyon_base = %x", (u_int)fi->g.tachyon_base);
3829 if (fi->g.mem_base == NULL) {
3830 printk("iph5526.c : ioremap failed!!!\n");
3831 scsi_unregister(host);
3832 kfree(fc[count]);
3833 fc[count] = NULL;
3834 continue;
3836 DPRINTK("IRQ1 = %d\n", pci_irq_line);
3837 printk(version);
3838 fi->base_addr = (long) pdev;
3840 if (pci_irq_line) {
3841 int irqval = 0;
3842 /* Found it, get IRQ.
3844 irqval = request_irq(pci_irq_line, &tachyon_interrupt, pci_irq_line ? SA_SHIRQ : 0, fi->name, host);
3845 if (irqval) {
3846 printk("iph5526.c : Unable to get IRQ %d (irqval = %d).\n", pci_irq_line, irqval);
3847 scsi_unregister(host);
3848 kfree(fc[count]);
3849 fc[count] = NULL;
3850 continue;
3852 host->irq = fi->irq = pci_irq_line;
3853 pci_irq_line = 0;
3854 fi->clone_id = clone_list[i].vendor_id;
3857 if (!initialize_register_pointers(fi) || !tachyon_init(fi)) {
3858 printk("iph5526.c: TACHYON initialization failed for card # %d!!!\n", count + 1);
3859 free_irq(host->irq, host);
3860 scsi_unregister(host);
3861 if (fi)
3862 clean_up_memory(fi);
3863 kfree(fc[count]);
3864 fc[count] = NULL;
3865 break;
3867 DPRINTK1("Fibre Channel card initialized");
3868 /* Wait for the Link to come up and the login process
3869 * to complete.
3871 for(timeout = jiffies + 10*HZ; (timeout > jiffies) && ((fi->g.link_up == FALSE) || (fi->g.port_discovery == TRUE) || (fi->g.explore_fabric == TRUE) || (fi->g.perform_adisc == TRUE));)
3872 barrier();
3874 count++;
3875 no_of_hosts++;
3877 DPRINTK1("no_of_hosts = %d",no_of_hosts);
3879 /* This is to make sure that the ACC to the PRLI comes in
3880 * for the last ALPA.
3882 udelay(1000000); /* Ugly! Let the Gods forgive me */
3884 DPRINTK1("leaving iph5526_detect\n");
3885 return no_of_hosts;
3889 int iph5526_biosparam(Disk * disk, kdev_t n, int ip[])
3891 int size = disk->capacity;
3892 ip[0] = 64;
3893 ip[1] = 32;
3894 ip[2] = size >> 11;
3895 if (ip[2] > 1024) {
3896 ip[0] = 255;
3897 ip[1] = 63;
3898 ip[2] = size / (ip[0] * ip[1]);
3900 return 0;
3903 int iph5526_queuecommand(Scsi_Cmnd *Cmnd, void (*done) (Scsi_Cmnd *))
3905 int int_required = 0;
3906 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_COMMAND;
3907 u_int type = TYPE_FCP | SEQUENCE_INITIATIVE;
3908 u_int frame_class = Cmnd->target;
3909 u_short ox_id = OX_ID_FIRST_SEQUENCE;
3910 struct Scsi_Host *host = Cmnd->host;
3911 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata*)host->hostdata;
3912 struct fc_info *fi = hostdata->fi;
3913 struct fc_node_info *q;
3914 u_long flags;
3915 ENTER("iph5526_queuecommand");
3917 spin_lock_irqsave(&fi->fc_lock, flags);
3918 Cmnd->scsi_done = done;
3920 if (Cmnd->device->tagged_supported) {
3921 switch(Cmnd->tag) {
3922 case SIMPLE_QUEUE_TAG:
3923 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_SIMPLE;
3924 break;
3925 case HEAD_OF_QUEUE_TAG:
3926 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_HEAD_OF_Q;
3927 break;
3928 case ORDERED_QUEUE_TAG:
3929 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_ORDERED;
3930 break;
3931 default:
3932 if ((jiffies - hostdata->tag_ages[Cmnd->target]) > (5 * HZ)) {
3933 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_ORDERED;
3934 hostdata->tag_ages[Cmnd->target] = jiffies;
3936 else
3937 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_SIMPLE;
3938 break;
3941 /*else
3942 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_UNTAGGED;
3945 hostdata->cmnd.fcp_addr[3] = 0;
3946 hostdata->cmnd.fcp_addr[2] = 0;
3947 hostdata->cmnd.fcp_addr[1] = 0;
3948 hostdata->cmnd.fcp_addr[0] = htons(Cmnd->lun);
3950 memcpy(&hostdata->cmnd.fcp_cdb, Cmnd->cmnd, Cmnd->cmd_len);
3951 hostdata->cmnd.fcp_data_len = htonl(Cmnd->request_bufflen);
3953 /* Get an used OX_ID. We could have pending commands.
3955 if (get_scsi_oxid(fi)) {
3956 spin_unlock_irqrestore(&fi->fc_lock, flags);
3957 return 1;
3959 fi->q.free_scsi_oxid[fi->g.scsi_oxid] = OXID_INUSE;
3961 /* Maintain a handler so that we can associate the done() function
3962 * on completion of the SCSI command.
3964 hostdata->cmnd_handler[fi->g.scsi_oxid] = Cmnd;
3966 switch(Cmnd->cmnd[0]) {
3967 case WRITE_6:
3968 case WRITE_10:
3969 case WRITE_12:
3970 fi->g.type_of_frame = FC_SCSI_WRITE;
3971 hostdata->cmnd.fcp_cntl = htonl(FCP_CNTL_WRITE | hostdata->cmnd.fcp_cntl);
3972 break;
3973 default:
3974 fi->g.type_of_frame = FC_SCSI_READ;
3975 hostdata->cmnd.fcp_cntl = htonl(FCP_CNTL_READ | hostdata->cmnd.fcp_cntl);
3978 memcpy(fi->q.ptr_fcp_cmnd[fi->q.fcp_cmnd_indx], &(hostdata->cmnd), sizeof(fcp_cmd));
3980 q = resolve_target(fi, Cmnd->target);
3982 if (q == NULL) {
3983 u_int bad_id = fi->g.my_ddaa | 0xFE;
3984 /* We transmit to an non-existant AL_PA so that the "done"
3985 * function can be called while receiving the interrupt
3986 * due to a Timeout for a bad AL_PA. In a PTP configuration,
3987 * the int_required field is set, since there is no notion
3988 * of AL_PAs. This approach sucks, but works alright!
3990 if (fi->g.ptp_up == TRUE)
3991 int_required = 1;
3992 tx_exchange(fi, (char *)(&(hostdata->cmnd)), sizeof(fcp_cmd), r_ctl, type, bad_id, fi->g.my_mtu, int_required, ox_id, FC_SCSI_BAD_TARGET);
3993 spin_unlock_irqrestore(&fi->fc_lock, flags);
3994 DPRINTK1("Target ID %x not present", Cmnd->target);
3995 return 0;
3997 if (q->login == LOGIN_COMPLETED) {
3998 if (add_to_sest(fi, Cmnd, q)) {
3999 DPRINTK1("add_to_sest() failed.");
4000 spin_unlock_irqrestore(&fi->fc_lock, flags);
4001 return 0;
4003 tx_exchange(fi, (char *)(fi->q.ptr_fcp_cmnd[fi->q.fcp_cmnd_indx]), sizeof(fcp_cmd), r_ctl, type, q->d_id, q->mtu, int_required, ox_id, frame_class << 16);
4004 update_FCP_CMND_indx(fi);
4006 spin_unlock_irqrestore(&fi->fc_lock, flags);
4007 /* If q != NULL, then we have a SCSI Target.
4008 * If q->login != LOGIN_COMPLETED, then that device could be
4009 * offline temporarily. So we let the command to time-out.
4011 LEAVE("iph5526_queuecommand");
4012 return 0;
4015 int iph5526_abort(Scsi_Cmnd *Cmnd)
4017 struct Scsi_Host *host = Cmnd->host;
4018 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;
4019 struct fc_info *fi = hostdata->fi;
4020 struct fc_node_info *q;
4021 u_int r_ctl = FC4_DEVICE_DATA | UNSOLICITED_COMMAND;
4022 u_int type = TYPE_FCP | SEQUENCE_INITIATIVE;
4023 u_short ox_id = OX_ID_FIRST_SEQUENCE;
4024 int int_required = 1, i, abort_status = FALSE;
4025 u_long flags;
4027 ENTER("iph5526_abort");
4029 spin_lock_irqsave(&fi->fc_lock, flags);
4031 q = resolve_target(fi, Cmnd->target);
4032 if (q == NULL) {
4033 u_int bad_id = fi->g.my_ddaa | 0xFE;
4034 /* This should not happen as we should always be able to
4035 * resolve a target id. But, jus in case...
4036 * We transmit to an non-existant AL_PA so that the done
4037 * function can be called while receiving the interrupt
4038 * for a bad AL_PA.
4040 DPRINTK1("Unresolved Target ID!");
4041 tx_exchange(fi, (char *)(&(hostdata->cmnd)), sizeof(fcp_cmd), r_ctl, type, bad_id, fi->g.my_mtu, int_required, ox_id, FC_SCSI_BAD_TARGET);
4042 DPRINTK1("Target ID %x not present", Cmnd->target);
4043 spin_unlock_irqrestore(&fi->fc_lock, flags);
4044 return FAILED;
4047 /* If q != NULL, then we have a SCSI Target. If
4048 * q->login != LOGIN_COMPLETED, then that device could
4049 * be offline temporarily. So we let the command to time-out.
4052 /* Get the OX_ID for the Command to be aborted.
4054 for (i = 0; i <= MAX_SCSI_XID; i++) {
4055 if (hostdata->cmnd_handler[i] == Cmnd) {
4056 hostdata->cmnd_handler[i] = NULL;
4057 ox_id = i;
4058 break;
4061 if (i > MAX_SCSI_XID) {
4062 T_MSG("Command could not be resolved to OX_ID");
4063 spin_unlock_irqrestore(&fi->fc_lock, flags);
4064 return FAILED;
4067 switch(Cmnd->cmnd[0]) {
4068 case WRITE_6:
4069 case WRITE_10:
4070 case WRITE_12:
4071 break;
4072 default:
4073 ox_id |= SCSI_READ_BIT;
4075 abort_status = abort_exchange(fi, ox_id);
4077 if ((q->login == LOGIN_COMPLETED) && (abort_status == TRUE)) {
4078 /* Then, transmit an ABTS to the target. The rest
4079 * is done when the BA_ACC is received for the ABTS.
4081 tx_abts(fi, q->d_id, ox_id);
4083 else {
4084 u_int STE_bit;
4085 u_short x_id;
4086 /* Invalidate resources for that Exchange.
4088 x_id = ox_id & MAX_SCSI_XID;
4089 STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
4090 if (STE_bit & SEST_V) {
4091 *(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
4092 invalidate_SEST_entry(fi, ox_id);
4096 LEAVE("iph5526_abort");
4097 spin_unlock_irqrestore(&fi->fc_lock, flags);
4098 return SUCCESS;
4101 static int abort_exchange(struct fc_info *fi, u_short ox_id)
4103 u_short x_id;
4104 volatile u_int flush_SEST, STE_bit;
4105 x_id = ox_id & MAX_SCSI_XID;
4106 DPRINTK1("Aborting Exchange %x", ox_id);
4108 STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
4109 /* Is the Exchange still active?.
4111 if (STE_bit & SEST_V) {
4112 if (ox_id & SCSI_READ_BIT) {
4113 /* If the Exchange to be aborted is Inbound,
4114 * Flush the SEST Entry from Tachyon's Cache.
4116 *(fi->q.ptr_sest[x_id]) &= htonl(SEST_INV);
4117 flush_tachyon_cache(fi, ox_id);
4118 flush_SEST = readl(fi->t_r.ptr_tach_flush_oxid_reg);
4119 while ((flush_SEST & 0x80000000) != 0)
4120 flush_SEST = readl(fi->t_r.ptr_tach_flush_oxid_reg);
4121 STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
4122 while ((STE_bit & 0x80000000) != 0)
4123 STE_bit = ntohl(*fi->q.ptr_sest[x_id]);
4124 flush_SEST = readl(fi->t_r.ptr_tach_flush_oxid_reg);
4125 invalidate_SEST_entry(fi, ox_id);
4127 else {
4128 int i;
4129 u_int *ptr_edb;
4130 /* For In-Order Reassembly, the following is done:
4131 * First, write zero as the buffer length in the EDB.
4133 ptr_edb = bus_to_virt(ntohl(*(fi->q.ptr_sest[x_id] + 7)));
4134 for (i = 0; i < EDB_LEN; i++)
4135 if (fi->q.ptr_edb[i] == ptr_edb)
4136 break;
4137 if (i < EDB_LEN)
4138 *ptr_edb = *ptr_edb & 0x0000FFFF;
4139 else
4140 T_MSG("EDB not found while clearing in abort_exchange()");
4142 DPRINTK1("Exchange %x invalidated", ox_id);
4143 return TRUE;
4145 else {
4146 DPRINTK1("SEST Entry for exchange %x not valid", ox_id);
4147 return FALSE;
4151 static void flush_tachyon_cache(struct fc_info *fi, u_short ox_id)
4153 volatile u_int tachyon_status;
4154 if (fi->g.loop_up == TRUE) {
4155 writel(HOST_CONTROL, fi->t_r.ptr_fm_control_reg);
4156 /* Make sure that the Inbound FIFO is empty.
4158 do {
4159 tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
4160 udelay(200);
4161 }while ((tachyon_status & RECEIVE_FIFO_EMPTY) == 0);
4162 /* Ok. Go ahead and flushhhhhhhhh!
4164 writel(0x80000000 | ox_id, fi->t_r.ptr_tach_flush_oxid_reg);
4165 writel(EXIT_HOST_CONTROL, fi->t_r.ptr_fm_control_reg);
4166 return;
4168 if (fi->g.ptp_up == TRUE) {
4169 take_tachyon_offline(fi);
4170 /* Make sure that the Inbound FIFO is empty.
4172 do {
4173 tachyon_status = readl(fi->t_r.ptr_tach_status_reg);
4174 udelay(200);
4175 }while ((tachyon_status & RECEIVE_FIFO_EMPTY) == 0);
4176 writel(0x80000000 | ox_id, fi->t_r.ptr_tach_flush_oxid_reg);
4177 /* Write the Initialize command to the FM Control reg.
4179 fi->g.n_port_try = TRUE;
4180 DPRINTK1("In abort_exchange, TACHYON initializing as N_Port...\n");
4181 writel(INITIALIZE, fi->t_r.ptr_fm_control_reg);
4185 static struct fc_node_info *resolve_target(struct fc_info *fi, u_char target)
4187 struct fc_node_info *temp = fi->node_info_list;
4188 while(temp != NULL)
4189 if (temp->target_id == target) {
4190 if ((temp->scsi == TARGET) && (temp->login == LOGIN_COMPLETED))
4191 return temp;
4192 else {
4193 if (temp->login != LOGIN_COMPLETED) {
4194 /* The Target is not currently logged in.
4195 * It could be a Target on the Local Loop or
4196 * on a Remote Loop connected through a switch.
4197 * In either case, we will know whenever the Target
4198 * comes On-Line again. We let the command to
4199 * time-out so that it gets retried.
4201 T_MSG("Target %d not logged in.", temp->target_id);
4202 tx_logi(fi, ELS_PLOGI, temp->d_id);
4203 return temp;
4205 else {
4206 if (temp->scsi != TARGET) {
4207 /* For some reason, we did not get a response to
4208 * PRLI. Letz try it again...
4210 DPRINTK1("Node not PRLIied. Txing PRLI...");
4211 tx_prli(fi, ELS_PRLI, temp->d_id, OX_ID_FIRST_SEQUENCE);
4214 return temp;
4217 else
4218 temp = temp->next;
4219 return NULL;
4222 static int add_to_sest(struct fc_info *fi, Scsi_Cmnd *Cmnd, struct fc_node_info *ni)
4224 /* we have at least 1 buffer, the terminator */
4225 int no_of_sdb_buffers = 1, i;
4226 int no_of_edb_buffers = 0;
4227 u_int *req_buffer = (u_int *)Cmnd->request_buffer;
4228 u_int *ptr_sdb = NULL;
4229 struct scatterlist *sl1, *sl2 = NULL;
4230 int no_of_sg = 0;
4232 switch(fi->g.type_of_frame) {
4233 case FC_SCSI_READ:
4234 fi->g.inb_sest_entry.flags_and_byte_offset = htonl(INB_SEST_VED);
4235 fi->g.inb_sest_entry.byte_count = 0;
4236 fi->g.inb_sest_entry.no_of_recvd_frames = 0;
4237 fi->g.inb_sest_entry.no_of_expected_frames = 0;
4238 fi->g.inb_sest_entry.last_fctl = 0;
4240 if (Cmnd->use_sg) {
4241 no_of_sg = Cmnd->use_sg;
4242 sl1 = sl2 = (struct scatterlist *)Cmnd->request_buffer;
4243 for (i = 0; i < no_of_sg; i++) {
4244 no_of_sdb_buffers += sl1->length / SEST_BUFFER_SIZE;
4245 if (sl1->length % SEST_BUFFER_SIZE)
4246 no_of_sdb_buffers++;
4247 sl1++;
4250 else {
4251 no_of_sdb_buffers += Cmnd->request_bufflen / SEST_BUFFER_SIZE;
4252 if (Cmnd->request_bufflen % SEST_BUFFER_SIZE)
4253 no_of_sdb_buffers++;
4254 } /* if !use_sg */
4256 /* We are working with the premise that at the max we would
4257 * get a scatter-gather buffer containing 63 buffers
4258 * of size 1024 bytes each. Is it a _bad_ assumption?
4260 if (no_of_sdb_buffers > 512) {
4261 T_MSG("Number of SDB buffers needed = %d", no_of_sdb_buffers);
4262 T_MSG("Disable Scatter-Gather!!!");
4263 return 1;
4267 /* Store it in the sdb_table so that we can retrieve that
4268 * free up the memory when the Read Command completes.
4270 if (get_free_SDB(fi))
4271 return 1;
4272 ptr_sdb = fi->q.ptr_sdb_slot[fi->q.sdb_indx];
4273 fi->q.sdb_slot_status[fi->q.sdb_indx] = SDB_BUSY;
4274 fi->g.inb_sest_entry.sdb_address = htonl(virt_to_bus(ptr_sdb));
4276 if (Cmnd->use_sg) {
4277 int count = 0, j;
4278 for(i = 0; i < no_of_sg; i++) {
4279 char *addr_ptr = sl2->address;
4280 count = sl2->length / SEST_BUFFER_SIZE;
4281 if (sl2->length % SEST_BUFFER_SIZE)
4282 count++;
4283 for (j = 0; j < count; j++) {
4284 *(ptr_sdb) = htonl(virt_to_bus(addr_ptr));
4285 addr_ptr += SEST_BUFFER_SIZE;
4286 ptr_sdb++;
4288 count = 0;
4289 sl2++;
4292 else {
4293 for (i = 0; i < no_of_sdb_buffers - 1; i++) {
4294 *(ptr_sdb) = htonl(virt_to_bus(req_buffer));
4295 req_buffer += SEST_BUFFER_SIZE/4;
4296 ptr_sdb++;
4299 *(ptr_sdb) = htonl(0x1); /* Terminator */
4301 /* The scratch pad is used to hold the index into the SDB.
4303 fi->g.inb_sest_entry.scratch_pad = fi->q.sdb_indx;
4304 fi->g.inb_sest_entry.expected_ro = 0;
4305 fi->g.inb_sest_entry.buffer_index = 0;
4306 fi->g.inb_sest_entry.buffer_offset = 0;
4307 memcpy(fi->q.ptr_sest[fi->g.scsi_oxid], &fi->g.inb_sest_entry, sizeof(INB_SEST_ENTRY));
4308 break;
4309 case FC_SCSI_WRITE:
4310 fi->g.outb_sest_entry.flags_and_did = htonl(OUTB_SEST_VED | ni->d_id);
4311 fi->g.outb_sest_entry.max_frame_len = htons(ni->mtu << 4);
4312 fi->g.outb_sest_entry.cntl = htons(ODB_CLASS_3 | ODB_EE_CREDIT | ODB_NO_INT | ODB_NO_COMP);
4313 fi->g.outb_sest_entry.total_seq_length = INV_SEQ_LEN;
4314 fi->g.outb_sest_entry.link = htons(OUTB_SEST_LINK);
4315 fi->g.outb_sest_entry.transaction_id = htonl(fi->g.scsi_oxid);
4316 fi->g.outb_sest_entry.seq_id = fi->g.seq_id;
4317 fi->g.outb_sest_entry.reserved = 0x0;
4318 fi->g.outb_sest_entry.header_length = htons(TACHYON_HEADER_LEN);
4321 u_char df_ctl = 0;
4322 u_short rx_id = RX_ID_FIRST_SEQUENCE;
4323 u_int r_ctl = FC4_DEVICE_DATA | SOLICITED_DATA;
4324 u_int type = TYPE_FCP | SEQUENCE_INITIATIVE;
4325 /* Multi Frame Sequence ? If yes, set RO bit.
4327 if (Cmnd->request_bufflen > ni->mtu)
4328 type |= RELATIVE_OFF_PRESENT;
4329 build_tachyon_header(fi, fi->g.my_id, r_ctl, ni->d_id, type, fi->g.seq_id, df_ctl, fi->g.scsi_oxid, rx_id, NULL);
4330 if (get_free_header(fi) || get_free_EDB(fi))
4331 return 1;
4332 memcpy(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx], &(fi->g.tach_header), TACHYON_HEADER_LEN);
4333 fi->g.outb_sest_entry.header_address = htonl(virt_to_bus(fi->q.ptr_tachyon_header[fi->q.tachyon_header_indx]));
4334 update_tachyon_header_indx(fi);
4337 if (Cmnd->use_sg) {
4338 no_of_sg = Cmnd->use_sg;
4339 sl1 = sl2 = (struct scatterlist *)Cmnd->request_buffer;
4340 for (i = 0; i < no_of_sg; i++) {
4341 no_of_edb_buffers += sl1->length / SEST_BUFFER_SIZE;
4342 if (sl1->length % SEST_BUFFER_SIZE)
4343 no_of_edb_buffers++;
4344 sl1++;
4347 else {
4348 no_of_edb_buffers += Cmnd->request_bufflen / SEST_BUFFER_SIZE;
4349 if (Cmnd->request_bufflen % SEST_BUFFER_SIZE)
4350 no_of_edb_buffers++;
4351 } /* if !use_sg */
4354 /* We need "no_of_edb_buffers" _contiguous_ EDBs
4355 * that are FREE. Check for that first.
4357 for (i = 0; i < no_of_edb_buffers; i++) {
4358 int j;
4359 if ((fi->q.edb_buffer_indx + no_of_edb_buffers) >= EDB_LEN)
4360 fi->q.edb_buffer_indx = 0;
4361 if (fi->q.free_edb_list[fi->q.edb_buffer_indx + i] != EDB_FREE) {
4362 for (j = 0; j < i; j++)
4363 update_EDB_indx(fi);
4364 if (get_free_EDB(fi))
4365 return 1;
4366 i = 0;
4370 /* We got enuff FREE EDBs.
4372 if (Cmnd->use_sg) {
4373 fi->g.outb_sest_entry.edb_address = htonl(virt_to_bus(fi->q.ptr_edb[fi->q.edb_buffer_indx]));
4374 sl1 = (struct scatterlist *)Cmnd->request_buffer;
4375 for(i = 0; i < no_of_sg; i++) {
4376 int count = 0, j;
4377 count = sl1->length / SEST_BUFFER_SIZE;
4378 for (j = 0; j < count; j++) {
4379 build_EDB(fi, (char *)sl1->address, 0, SEST_BUFFER_SIZE);
4380 memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
4381 /* Mark this EDB as being in use */
4382 fi->q.free_edb_list[fi->q.edb_buffer_indx] = EDB_BUSY;
4383 /* We have already made sure that we have enuff
4384 * free EDBs that are contiguous. So this is
4385 * safe.
4387 update_EDB_indx(fi);
4388 sl1->address += SEST_BUFFER_SIZE;
4390 /* Just in case itz not a multiple of
4391 * SEST_BUFFER_SIZE bytes.
4393 if (sl1->length % SEST_BUFFER_SIZE) {
4394 build_EDB(fi, (char *)sl1->address, 0, sl1->length % SEST_BUFFER_SIZE);
4395 memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
4396 fi->q.free_edb_list[fi->q.edb_buffer_indx] = EDB_BUSY;
4397 update_EDB_indx(fi);
4399 sl1++;
4401 /* The last EDB is special. It needs the "end bit" to
4402 * be set.
4404 *(fi->q.ptr_edb[fi->q.edb_buffer_indx - 1] + 1) = *(fi->q.ptr_edb[fi->q.edb_buffer_indx - 1] + 1) | ntohs(EDB_END);
4406 else {
4407 int count = 0, j;
4408 fi->g.outb_sest_entry.edb_address = htonl(virt_to_bus(fi->q.ptr_edb[fi->q.edb_buffer_indx]));
4409 count = Cmnd->request_bufflen / SEST_BUFFER_SIZE;
4410 for (j = 0; j < count; j++) {
4411 build_EDB(fi, (char *)req_buffer, 0, SEST_BUFFER_SIZE);
4412 memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
4413 /* Mark this EDB as being in use */
4414 fi->q.free_edb_list[fi->q.edb_buffer_indx] = EDB_BUSY;
4415 /* We have already made sure that we have enuff
4416 * free EDBs that are contiguous. So this is
4417 * safe.
4419 update_EDB_indx(fi);
4420 req_buffer += SEST_BUFFER_SIZE;
4422 /* Just in case itz not a multiple of
4423 * SEST_BUFFER_SIZE bytes.
4425 if (Cmnd->request_bufflen % SEST_BUFFER_SIZE) {
4426 build_EDB(fi, (char *)req_buffer, EDB_END, Cmnd->request_bufflen % SEST_BUFFER_SIZE);
4427 memcpy(fi->q.ptr_edb[fi->q.edb_buffer_indx], &(fi->g.edb), sizeof(EDB));
4428 fi->q.free_edb_list[fi->q.edb_buffer_indx] = EDB_BUSY;
4429 update_EDB_indx(fi);
4431 else {
4432 /* Mark the last EDB as the "end edb".
4434 *(fi->q.ptr_edb[fi->q.edb_buffer_indx - 1] + 1) = *(fi->q.ptr_edb[fi->q.edb_buffer_indx - 1] + 1) | htons(EDB_END);
4438 /* Finally we have something to send!.
4440 memcpy(fi->q.ptr_sest[fi->g.scsi_oxid], &fi->g.outb_sest_entry, sizeof(OUTB_SEST_ENTRY));
4441 break;
4443 return 0;
4446 static void update_FCP_CMND_indx(struct fc_info *fi)
4448 fi->q.fcp_cmnd_indx++;
4449 if (fi->q.fcp_cmnd_indx == NO_OF_FCP_CMNDS)
4450 fi->q.fcp_cmnd_indx = 0;
4453 static int get_scsi_oxid(struct fc_info *fi)
4455 u_short initial_oxid = fi->g.scsi_oxid;
4456 /* Check if the OX_ID is in use.
4457 * We could have an outstanding SCSI command.
4459 while (fi->q.free_scsi_oxid[fi->g.scsi_oxid] != OXID_AVAILABLE) {
4460 update_scsi_oxid(fi);
4461 if (fi->g.scsi_oxid == initial_oxid) {
4462 T_MSG("No free OX_IDs avaliable")
4463 reset_tachyon(fi, SOFTWARE_RESET);
4464 return 1;
4467 return 0;
4470 static void update_scsi_oxid(struct fc_info *fi)
4472 fi->g.scsi_oxid++;
4473 if (fi->g.scsi_oxid == (MAX_SCSI_XID + 1))
4474 fi->g.scsi_oxid = 0;
4477 static int get_free_SDB(struct fc_info *fi)
4479 unsigned int initial_indx = fi->q.sdb_indx;
4480 /* Check if the SDB is in use.
4481 * We could have an outstanding SCSI Read command.
4482 * We should find a free slot as we can queue a
4483 * maximum of 32 SCSI commands only.
4485 while (fi->q.sdb_slot_status[fi->q.sdb_indx] != SDB_FREE) {
4486 update_SDB_indx(fi);
4487 if (fi->q.sdb_indx == initial_indx) {
4488 T_MSG("No free SDB buffers avaliable")
4489 reset_tachyon(fi, SOFTWARE_RESET);
4490 return 1;
4493 return 0;
4496 static void update_SDB_indx(struct fc_info *fi)
4498 fi->q.sdb_indx++;
4499 if (fi->q.sdb_indx == NO_OF_SDB_ENTRIES)
4500 fi->q.sdb_indx = 0;
4503 int iph5526_release(struct Scsi_Host *host)
4505 struct iph5526_hostdata *hostdata = (struct iph5526_hostdata*)host->hostdata;
4506 struct fc_info *fi = hostdata->fi;
4507 free_irq(host->irq, host);
4508 iounmap(fi->g.mem_base);
4509 return 0;
4512 const char *iph5526_info(struct Scsi_Host *host)
4514 static char buf[80];
4515 sprintf(buf, "Interphase 5526 Fibre Channel PCI SCSI Adapter using IRQ %d\n", host->irq);
4516 return buf;
4519 #ifdef MODULE
4521 #define NAMELEN 8 /* # of chars for storing dev->name */
4523 static struct net_device *dev_fc[MAX_FC_CARDS];
4525 static int io = 0;
4526 static int irq = 0;
4527 static int bad = 0; /* 0xbad = bad sig or no reset ack */
4528 static int scsi_registered;
4531 int init_module(void)
4533 int i = 0;
4535 driver_template.module = &__this_module;
4536 scsi_register_module(MODULE_SCSI_HA, &driver_template);
4537 if (driver_template.present)
4538 scsi_registered = TRUE;
4539 else {
4540 printk("iph5526: SCSI registeration failed!!!\n");
4541 scsi_registered = FALSE;
4542 scsi_unregister_module(MODULE_SCSI_HA, &driver_template);
4545 while(fc[i] != NULL) {
4546 dev_fc[i] = NULL;
4547 dev_fc[i] = init_fcdev(dev_fc[i], 0);
4548 if (dev_fc[i] == NULL) {
4549 printk("iph5526.c: init_fcdev failed for card #%d\n", i+1);
4550 break;
4552 dev_fc[i]->irq = irq;
4553 dev_fc[i]->mem_end = bad;
4554 dev_fc[i]->base_addr = io;
4555 dev_fc[i]->init = iph5526_probe;
4556 dev_fc[i]->priv = fc[i];
4557 fc[i]->dev = dev_fc[i];
4558 if (register_fcdev(dev_fc[i]) != 0) {
4559 kfree(dev_fc[i]);
4560 dev_fc[i] = NULL;
4561 if (i == 0) {
4562 printk("iph5526.c: IP registeration failed!!!\n");
4563 return -ENODEV;
4566 i++;
4568 if (i == 0)
4569 return -ENODEV;
4571 return 0;
4574 void cleanup_module(void)
4576 int i = 0;
4577 while(fc[i] != NULL) {
4578 struct net_device *dev = fc[i]->dev;
4579 void *priv = dev->priv;
4580 fc[i]->g.dont_init = TRUE;
4581 take_tachyon_offline(fc[i]);
4582 unregister_fcdev(dev);
4583 clean_up_memory(fc[i]);
4584 if (dev->priv)
4585 kfree(priv);
4586 kfree(dev);
4587 dev = NULL;
4588 i++;
4590 if (scsi_registered == TRUE)
4591 scsi_unregister_module(MODULE_SCSI_HA, &driver_template);
4593 #endif /* MODULE */
4595 void clean_up_memory(struct fc_info *fi)
4597 int i,j;
4598 ENTER("clean_up_memory");
4599 if (fi->q.ptr_mfsbq_base)
4600 free_pages((u_long)bus_to_virt(ntohl(*(fi->q.ptr_mfsbq_base))), 5);
4601 DPRINTK("after kfree2");
4602 for (i = 0; i < SFSBQ_LENGTH; i++)
4603 for (j = 0; j < NO_OF_ENTRIES; j++)
4604 if (fi->q.ptr_sfs_buffers[i*NO_OF_ENTRIES + j])
4605 kfree(fi->q.ptr_sfs_buffers[i*NO_OF_ENTRIES + j]);
4606 DPRINTK("after kfree1");
4607 if (fi->q.ptr_ocq_base)
4608 free_page((u_long)fi->q.ptr_ocq_base);
4609 if (fi->q.ptr_imq_base)
4610 free_page((u_long)fi->q.ptr_imq_base);
4611 if (fi->q.ptr_mfsbq_base)
4612 free_page((u_long)fi->q.ptr_mfsbq_base);
4613 if (fi->q.ptr_sfsbq_base)
4614 free_page((u_long)fi->q.ptr_sfsbq_base);
4615 if (fi->q.ptr_edb_base)
4616 free_pages((u_long)fi->q.ptr_edb_base, 5);
4617 if (fi->q.ptr_sest_base)
4618 free_pages((u_long)fi->q.ptr_sest_base, 5);
4619 if (fi->q.ptr_tachyon_header_base)
4620 free_page((u_long)fi->q.ptr_tachyon_header_base);
4621 if (fi->q.ptr_sdb_base)
4622 free_pages((u_long)fi->q.ptr_sdb_base, 5);
4623 if (fi->q.ptr_fcp_cmnd_base)
4624 free_page((u_long)fi->q.ptr_fcp_cmnd_base);
4625 DPRINTK("after free_pages");
4626 if (fi->q.ptr_host_ocq_cons_indx)
4627 kfree(fi->q.ptr_host_ocq_cons_indx);
4628 if (fi->q.ptr_host_hpcq_cons_indx)
4629 kfree(fi->q.ptr_host_hpcq_cons_indx);
4630 if (fi->q.ptr_host_imq_prod_indx)
4631 kfree(fi->q.ptr_host_imq_prod_indx);
4632 DPRINTK("after kfree3");
4633 while (fi->node_info_list) {
4634 struct fc_node_info *temp_list = fi->node_info_list;
4635 fi->node_info_list = fi->node_info_list->next;
4636 kfree(temp_list);
4638 while (fi->ox_id_list) {
4639 struct ox_id_els_map *temp = fi->ox_id_list;
4640 fi->ox_id_list = fi->ox_id_list->next;
4641 kfree(temp);
4643 LEAVE("clean_up_memory");
4646 static int initialize_register_pointers(struct fc_info *fi)
4648 ENTER("initialize_register_pointers");
4649 if(fi->g.tachyon_base == 0)
4650 return -ENOMEM;
4652 fi->i_r.ptr_ichip_hw_control_reg = ICHIP_HW_CONTROL_REG_OFF + fi->g.tachyon_base;
4653 fi->i_r.ptr_ichip_hw_status_reg = ICHIP_HW_STATUS_REG_OFF + fi->g.tachyon_base;
4654 fi->i_r.ptr_ichip_hw_addr_mask_reg = ICHIP_HW_ADDR_MASK_REG_OFF + fi->g.tachyon_base;
4655 fi->t_r.ptr_ocq_base_reg = OCQ_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4656 fi->t_r.ptr_ocq_len_reg = OCQ_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4657 fi->t_r.ptr_ocq_prod_indx_reg = OCQ_PRODUCER_REGISTER_OFFSET + fi->g.tachyon_base;
4658 fi->t_r.ptr_ocq_cons_indx_reg = OCQ_CONSUMER_REGISTER_OFFSET + fi->g.tachyon_base;
4659 fi->t_r.ptr_imq_base_reg = IMQ_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4660 fi->t_r.ptr_imq_len_reg = IMQ_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4661 fi->t_r.ptr_imq_cons_indx_reg = IMQ_CONSUMER_REGISTER_OFFSET + fi->g.tachyon_base;
4662 fi->t_r.ptr_imq_prod_indx_reg = IMQ_PRODUCER_REGISTER_OFFSET + fi->g.tachyon_base;
4663 fi->t_r.ptr_mfsbq_base_reg = MFSBQ_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4664 fi->t_r.ptr_mfsbq_len_reg = MFSBQ_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4665 fi->t_r.ptr_mfsbq_prod_reg = MFSBQ_PRODUCER_REGISTER_OFFSET + fi->g.tachyon_base;
4666 fi->t_r.ptr_mfsbq_cons_reg = MFSBQ_CONSUMER_REGISTER_OFFSET + fi->g.tachyon_base;
4667 fi->t_r.ptr_mfsbuff_len_reg = MFS_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4668 fi->t_r.ptr_sfsbq_base_reg = SFSBQ_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4669 fi->t_r.ptr_sfsbq_len_reg = SFSBQ_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4670 fi->t_r.ptr_sfsbq_prod_reg = SFSBQ_PRODUCER_REGISTER_OFFSET + fi->g.tachyon_base;
4671 fi->t_r.ptr_sfsbq_cons_reg = SFSBQ_CONSUMER_REGISTER_OFFSET + fi->g.tachyon_base;
4672 fi->t_r.ptr_sfsbuff_len_reg = SFS_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4673 fi->t_r.ptr_sest_base_reg = SEST_BASE_REGISTER_OFFSET + fi->g.tachyon_base;
4674 fi->t_r.ptr_sest_len_reg = SEST_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4675 fi->t_r.ptr_scsibuff_len_reg = SCSI_LENGTH_REGISTER_OFFSET + fi->g.tachyon_base;
4676 fi->t_r.ptr_tach_config_reg = TACHYON_CONFIG_REGISTER_OFFSET + fi->g.tachyon_base;
4677 fi->t_r.ptr_tach_control_reg = TACHYON_CONTROL_REGISTER_OFFSET + fi->g.tachyon_base;
4678 fi->t_r.ptr_tach_status_reg = TACHYON_STATUS_REGISTER_OFFSET + fi->g.tachyon_base;
4679 fi->t_r.ptr_tach_flush_oxid_reg = TACHYON_FLUSH_SEST_REGISTER_OFFSET + fi->g.tachyon_base;
4680 fi->t_r.ptr_fm_config_reg = FMGR_CONFIG_REGISTER_OFFSET + fi->g.tachyon_base;
4681 fi->t_r.ptr_fm_control_reg = FMGR_CONTROL_REGISTER_OFFSET + fi->g.tachyon_base;
4682 fi->t_r.ptr_fm_status_reg = FMGR_STATUS_REGISTER_OFFSET + fi->g.tachyon_base;
4683 fi->t_r.ptr_fm_tov_reg = FMGR_TIMER_REGISTER_OFFSET + fi->g.tachyon_base;
4684 fi->t_r.ptr_fm_wwn_hi_reg = FMGR_WWN_HI_REGISTER_OFFSET + fi->g.tachyon_base;
4685 fi->t_r.ptr_fm_wwn_low_reg = FMGR_WWN_LO_REGISTER_OFFSET + fi->g.tachyon_base;
4686 fi->t_r.ptr_fm_rx_al_pa_reg = FMGR_RCVD_ALPA_REGISTER_OFFSET + fi->g.tachyon_base;
4688 LEAVE("initialize_register_pointers");
4689 return 1;
4695 * Local variables:
4696 * compile-command: "gcc -DKERNEL -Wall -O6 -fomit-frame-pointer -I/usr/src/linux/net/tcp -c iph5526.c"
4697 * version-control: t
4698 * kept-new-versions: 5
4699 * End: