allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / drivers / scsi / gdth.c
bloba6bf96b8a2f0a8f2d03b61c866909d421d13fd35
1 /************************************************************************
2 * Linux driver for *
3 * ICP vortex GmbH: GDT ISA/EISA/PCI Disk Array Controllers *
4 * Intel Corporation: Storage RAID Controllers *
5 * *
6 * gdth.c *
7 * Copyright (C) 1995-06 ICP vortex GmbH, Achim Leubner *
8 * Copyright (C) 2002-04 Intel Corporation *
9 * Copyright (C) 2003-06 Adaptec Inc. *
10 * <achim_leubner@adaptec.com> *
11 * *
12 * Additions/Fixes: *
13 * Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com> *
14 * Johannes Dinner <johannes_dinner@adaptec.com> *
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published *
18 * by the Free Software Foundation; either version 2 of the License, *
19 * or (at your option) any later version. *
20 * *
21 * This program is distributed in the hope that it will be useful, *
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
24 * GNU General Public License for more details. *
25 * *
26 * You should have received a copy of the GNU General Public License *
27 * along with this kernel; if not, write to the Free Software *
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
29 * *
30 * Linux kernel 2.4.x, 2.6.x supported *
31 * *
32 * $Log: gdth.c,v $
33 * Revision 1.74 2006/04/10 13:44:47 achim
34 * Community changes for 2.6.x
35 * Kernel 2.2.x no longer supported
36 * scsi_request interface removed, thanks to Christoph Hellwig
38 * Revision 1.73 2004/03/31 13:33:03 achim
39 * Special command 0xfd implemented to detect 64-bit DMA support
41 * Revision 1.72 2004/03/17 08:56:04 achim
42 * 64-bit DMA only enabled if FW >= x.43
44 * Revision 1.71 2004/03/05 15:51:29 achim
45 * Screen service: separate message buffer, bugfixes
47 * Revision 1.70 2004/02/27 12:19:07 achim
48 * Bugfix: Reset bit in config (0xfe) call removed
50 * Revision 1.69 2004/02/20 09:50:24 achim
51 * Compatibility changes for kernels < 2.4.20
52 * Bugfix screen service command size
53 * pci_set_dma_mask() error handling added
55 * Revision 1.68 2004/02/19 15:46:54 achim
56 * 64-bit DMA bugfixes
57 * Drive size bugfix for drives > 1TB
59 * Revision 1.67 2004/01/14 13:11:57 achim
60 * Tool access over /proc no longer supported
61 * Bugfixes IOCTLs
63 * Revision 1.66 2003/12/19 15:04:06 achim
64 * Bugfixes support for drives > 2TB
66 * Revision 1.65 2003/12/15 11:21:56 achim
67 * 64-bit DMA support added
68 * Support for drives > 2 TB implemented
69 * Kernels 2.2.x, 2.4.x, 2.6.x supported
71 * Revision 1.64 2003/09/17 08:30:26 achim
72 * EISA/ISA controller scan disabled
73 * Command line switch probe_eisa_isa added
75 * Revision 1.63 2003/07/12 14:01:00 Daniele Bellucci <bellucda@tiscali.it>
76 * Minor cleanups in gdth_ioctl.
78 * Revision 1.62 2003/02/27 15:01:59 achim
79 * Dynamic DMA mapping implemented
80 * New (character device) IOCTL interface added
81 * Other controller related changes made
83 * Revision 1.61 2002/11/08 13:09:52 boji
84 * Added support for XSCALE based RAID Controllers
85 * Fixed SCREENSERVICE initialization in SMP cases
86 * Added checks for gdth_polling before GDTH_HA_LOCK
88 * Revision 1.60 2002/02/05 09:35:22 achim
89 * MODULE_LICENSE only if kernel >= 2.4.11
91 * Revision 1.59 2002/01/30 09:46:33 achim
92 * Small changes
94 * Revision 1.58 2002/01/29 15:30:02 achim
95 * Set default value of shared_access to Y
96 * New status S_CACHE_RESERV for clustering added
98 * Revision 1.57 2001/08/21 11:16:35 achim
99 * Bugfix free_irq()
101 * Revision 1.56 2001/08/09 11:19:39 achim
102 * Scsi_Host_Template changes
104 * Revision 1.55 2001/08/09 10:11:28 achim
105 * Command HOST_UNFREEZE_IO before cache service init.
107 * Revision 1.54 2001/07/20 13:48:12 achim
108 * Expand: gdth_analyse_hdrive() removed
110 * Revision 1.53 2001/07/17 09:52:49 achim
111 * Small OEM related change
113 * Revision 1.52 2001/06/19 15:06:20 achim
114 * New host command GDT_UNFREEZE_IO added
116 * Revision 1.51 2001/05/22 06:42:37 achim
117 * PCI: Subdevice ID added
119 * Revision 1.50 2001/05/17 13:42:16 achim
120 * Support for Intel Storage RAID Controllers added
122 * Revision 1.50 2001/05/17 12:12:34 achim
123 * Support for Intel Storage RAID Controllers added
125 * Revision 1.49 2001/03/15 15:07:17 achim
126 * New __setup interface for boot command line options added
128 * Revision 1.48 2001/02/06 12:36:28 achim
129 * Bugfix Cluster protocol
131 * Revision 1.47 2001/01/10 14:42:06 achim
132 * New switch shared_access added
134 * Revision 1.46 2001/01/09 08:11:35 achim
135 * gdth_command() removed
136 * meaning of Scsi_Pointer members changed
138 * Revision 1.45 2000/11/16 12:02:24 achim
139 * Changes for kernel 2.4
141 * Revision 1.44 2000/10/11 08:44:10 achim
142 * Clustering changes: New flag media_changed added
144 * Revision 1.43 2000/09/20 12:59:01 achim
145 * DPMEM remap functions for all PCI controller types implemented
146 * Small changes for ia64 platform
148 * Revision 1.42 2000/07/20 09:04:50 achim
149 * Small changes for kernel 2.4
151 * Revision 1.41 2000/07/04 14:11:11 achim
152 * gdth_analyse_hdrive() added to rescan drives after online expansion
154 * Revision 1.40 2000/06/27 11:24:16 achim
155 * Changes Clustering, Screenservice
157 * Revision 1.39 2000/06/15 13:09:04 achim
158 * Changes for gdth_do_cmd()
160 * Revision 1.38 2000/06/15 12:08:43 achim
161 * Bugfix gdth_sync_event(), service SCREENSERVICE
162 * Data direction for command 0xc2 changed to DOU
164 * Revision 1.37 2000/05/25 13:50:10 achim
165 * New driver parameter virt_ctr added
167 * Revision 1.36 2000/05/04 08:50:46 achim
168 * Event buffer now in gdth_ha_str
170 * Revision 1.35 2000/03/03 10:44:08 achim
171 * New event_string only valid for the RP controller family
173 * Revision 1.34 2000/03/02 14:55:29 achim
174 * New mechanism for async. event handling implemented
176 * Revision 1.33 2000/02/21 15:37:37 achim
177 * Bugfix Alpha platform + DPMEM above 4GB
179 * Revision 1.32 2000/02/14 16:17:37 achim
180 * Bugfix sense_buffer[] + raw devices
182 * Revision 1.31 2000/02/10 10:29:00 achim
183 * Delete sense_buffer[0], if command OK
185 * Revision 1.30 1999/11/02 13:42:39 achim
186 * ARRAY_DRV_LIST2 implemented
187 * Now 255 log. and 100 host drives supported
189 * Revision 1.29 1999/10/05 13:28:47 achim
190 * GDT_CLUST_RESET added
192 * Revision 1.28 1999/08/12 13:44:54 achim
193 * MOUNTALL removed
194 * Cluster drives -> removeable drives
196 * Revision 1.27 1999/06/22 07:22:38 achim
197 * Small changes
199 * Revision 1.26 1999/06/10 16:09:12 achim
200 * Cluster Host Drive support: Bugfixes
202 * Revision 1.25 1999/06/01 16:03:56 achim
203 * gdth_init_pci(): Manipulate config. space to start RP controller
205 * Revision 1.24 1999/05/26 11:53:06 achim
206 * Cluster Host Drive support added
208 * Revision 1.23 1999/03/26 09:12:31 achim
209 * Default value for hdr_channel set to 0
211 * Revision 1.22 1999/03/22 16:27:16 achim
212 * Bugfix: gdth_store_event() must not be locked with GDTH_LOCK_HA()
214 * Revision 1.21 1999/03/16 13:40:34 achim
215 * Problems with reserved drives solved
216 * gdth_eh_bus_reset() implemented
218 * Revision 1.20 1999/03/10 09:08:13 achim
219 * Bugfix: Corrections in gdth_direction_tab[] made
220 * Bugfix: Increase command timeout (gdth_update_timeout()) NOT in gdth_putq()
222 * Revision 1.19 1999/03/05 14:38:16 achim
223 * Bugfix: Heads/Sectors mapping for reserved devices possibly wrong
224 * -> gdth_eval_mapping() implemented, changes in gdth_bios_param()
225 * INIT_RETRIES set to 100s to avoid DEINIT-Timeout for controllers
226 * with BIOS disabled and memory test set to Intensive
227 * Enhanced /proc support
229 * Revision 1.18 1999/02/24 09:54:33 achim
230 * Command line parameter hdr_channel implemented
231 * Bugfix for EISA controllers + Linux 2.2.x
233 * Revision 1.17 1998/12/17 15:58:11 achim
234 * Command line parameters implemented
235 * Changes for Alpha platforms
236 * PCI controller scan changed
237 * SMP support improved (spin_lock_irqsave(),...)
238 * New async. events, new scan/reserve commands included
240 * Revision 1.16 1998/09/28 16:08:46 achim
241 * GDT_PCIMPR: DPMEM remapping, if required
242 * mdelay() added
244 * Revision 1.15 1998/06/03 14:54:06 achim
245 * gdth_delay(), gdth_flush() implemented
246 * Bugfix: gdth_release() changed
248 * Revision 1.14 1998/05/22 10:01:17 achim
249 * mj: pcibios_strerror() removed
250 * Improved SMP support (if version >= 2.1.95)
251 * gdth_halt(): halt_called flag added (if version < 2.1)
253 * Revision 1.13 1998/04/16 09:14:57 achim
254 * Reserve drives (for raw service) implemented
255 * New error handling code enabled
256 * Get controller name from board_info() IOCTL
257 * Final round of PCI device driver patches by Martin Mares
259 * Revision 1.12 1998/03/03 09:32:37 achim
260 * Fibre channel controller support added
262 * Revision 1.11 1998/01/27 16:19:14 achim
263 * SA_SHIRQ added
264 * add_timer()/del_timer() instead of GDTH_TIMER
265 * scsi_add_timer()/scsi_del_timer() instead of SCSI_TIMER
266 * New error handling included
268 * Revision 1.10 1997/10/31 12:29:57 achim
269 * Read heads/sectors from host drive
271 * Revision 1.9 1997/09/04 10:07:25 achim
272 * IO-mapping with virt_to_bus(), gdth_readb(), gdth_writeb(), ...
273 * register_reboot_notifier() to get a notify on shutown used
275 * Revision 1.8 1997/04/02 12:14:30 achim
276 * Version 1.00 (see gdth.h), tested with kernel 2.0.29
278 * Revision 1.7 1997/03/12 13:33:37 achim
279 * gdth_reset() changed, new async. events
281 * Revision 1.6 1997/03/04 14:01:11 achim
282 * Shutdown routine gdth_halt() implemented
284 * Revision 1.5 1997/02/21 09:08:36 achim
285 * New controller included (RP, RP1, RP2 series)
286 * IOCTL interface implemented
288 * Revision 1.4 1996/07/05 12:48:55 achim
289 * Function gdth_bios_param() implemented
290 * New constant GDTH_MAXC_P_L inserted
291 * GDT_WRITE_THR, GDT_EXT_INFO implemented
292 * Function gdth_reset() changed
294 * Revision 1.3 1996/05/10 09:04:41 achim
295 * Small changes for Linux 1.2.13
297 * Revision 1.2 1996/05/09 12:45:27 achim
298 * Loadable module support implemented
299 * /proc support corrections made
301 * Revision 1.1 1996/04/11 07:35:57 achim
302 * Initial revision
304 ************************************************************************/
306 /* All GDT Disk Array Controllers are fully supported by this driver.
307 * This includes the PCI/EISA/ISA SCSI Disk Array Controllers and the
308 * PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete
309 * list of all controller types.
311 * If you have one or more GDT3000/3020 EISA controllers with
312 * controller BIOS disabled, you have to set the IRQ values with the
313 * command line option "gdth=irq1,irq2,...", where the irq1,irq2,... are
314 * the IRQ values for the EISA controllers.
316 * After the optional list of IRQ values, other possible
317 * command line options are:
318 * disable:Y disable driver
319 * disable:N enable driver
320 * reserve_mode:0 reserve no drives for the raw service
321 * reserve_mode:1 reserve all not init., removable drives
322 * reserve_mode:2 reserve all not init. drives
323 * reserve_list:h,b,t,l,h,b,t,l,... reserve particular drive(s) with
324 * h- controller no., b- channel no.,
325 * t- target ID, l- LUN
326 * reverse_scan:Y reverse scan order for PCI controllers
327 * reverse_scan:N scan PCI controllers like BIOS
328 * max_ids:x x - target ID count per channel (1..MAXID)
329 * rescan:Y rescan all channels/IDs
330 * rescan:N use all devices found until now
331 * virt_ctr:Y map every channel to a virtual controller
332 * virt_ctr:N use multi channel support
333 * hdr_channel:x x - number of virtual bus for host drives
334 * shared_access:Y disable driver reserve/release protocol to
335 * access a shared resource from several nodes,
336 * appropriate controller firmware required
337 * shared_access:N enable driver reserve/release protocol
338 * probe_eisa_isa:Y scan for EISA/ISA controllers
339 * probe_eisa_isa:N do not scan for EISA/ISA controllers
340 * force_dma32:Y use only 32 bit DMA mode
341 * force_dma32:N use 64 bit DMA mode, if supported
343 * The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N,
344 * max_ids:127,rescan:N,virt_ctr:N,hdr_channel:0,
345 * shared_access:Y,probe_eisa_isa:N,force_dma32:N".
346 * Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y".
348 * When loading the gdth driver as a module, the same options are available.
349 * You can set the IRQs with "IRQ=...". However, the syntax to specify the
350 * options changes slightly. You must replace all ',' between options
351 * with ' ' and all ':' with '=' and you must use
352 * '1' in place of 'Y' and '0' in place of 'N'.
354 * Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0
355 * max_ids=127 rescan=0 virt_ctr=0 hdr_channel=0 shared_access=0
356 * probe_eisa_isa=0 force_dma32=0"
357 * The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1".
360 /* The meaning of the Scsi_Pointer members in this driver is as follows:
361 * ptr: Chaining
362 * this_residual: Command priority
363 * buffer: phys. DMA sense buffer
364 * dma_handle: phys. DMA buffer (kernel >= 2.4.0)
365 * buffers_residual: Timeout value
366 * Status: Command status (gdth_do_cmd()), DMA mem. mappings
367 * Message: Additional info (gdth_do_cmd()), DMA direction
368 * have_data_in: Flag for gdth_wait_completion()
369 * sent_command: Opcode special command
370 * phase: Service/parameter/return code special command
374 /* interrupt coalescing */
375 /* #define INT_COAL */
377 /* statistics */
378 #define GDTH_STATISTICS
380 #include <linux/module.h>
382 #include <linux/version.h>
383 #include <linux/kernel.h>
384 #include <linux/types.h>
385 #include <linux/pci.h>
386 #include <linux/string.h>
387 #include <linux/ctype.h>
388 #include <linux/ioport.h>
389 #include <linux/delay.h>
390 #include <linux/interrupt.h>
391 #include <linux/in.h>
392 #include <linux/proc_fs.h>
393 #include <linux/time.h>
394 #include <linux/timer.h>
395 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,6)
396 #include <linux/dma-mapping.h>
397 #else
398 #define DMA_32BIT_MASK 0x00000000ffffffffULL
399 #define DMA_64BIT_MASK 0xffffffffffffffffULL
400 #endif
402 #ifdef GDTH_RTC
403 #include <linux/mc146818rtc.h>
404 #endif
405 #include <linux/reboot.h>
407 #include <asm/dma.h>
408 #include <asm/system.h>
409 #include <asm/io.h>
410 #include <asm/uaccess.h>
411 #include <linux/spinlock.h>
412 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
413 #include <linux/blkdev.h>
414 #else
415 #include <linux/blk.h>
416 #include "sd.h"
417 #endif
419 #include "scsi.h"
420 #include <scsi/scsi_host.h>
421 #include "gdth_kcompat.h"
422 #include "gdth.h"
424 static void gdth_delay(int milliseconds);
425 static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs);
426 static irqreturn_t gdth_interrupt(int irq, void *dev_id);
427 static int gdth_sync_event(int hanum,int service,unchar index,Scsi_Cmnd *scp);
428 static int gdth_async_event(int hanum);
429 static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
431 static void gdth_putq(int hanum,Scsi_Cmnd *scp,unchar priority);
432 static void gdth_next(int hanum);
433 static int gdth_fill_raw_cmd(int hanum,Scsi_Cmnd *scp,unchar b);
434 static int gdth_special_cmd(int hanum,Scsi_Cmnd *scp);
435 static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source,
436 ushort idx, gdth_evt_data *evt);
437 static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
438 static void gdth_readapp_event(gdth_ha_str *ha, unchar application,
439 gdth_evt_str *estr);
440 static void gdth_clear_events(void);
442 static void gdth_copy_internal_data(int hanum,Scsi_Cmnd *scp,
443 char *buffer,ushort count);
444 static int gdth_internal_cache_cmd(int hanum,Scsi_Cmnd *scp);
445 static int gdth_fill_cache_cmd(int hanum,Scsi_Cmnd *scp,ushort hdrive);
447 static int gdth_search_eisa(ushort eisa_adr);
448 static int gdth_search_isa(ulong32 bios_adr);
449 static int gdth_search_pci(gdth_pci_str *pcistr);
450 static void gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt,
451 ushort vendor, ushort dev);
452 static void gdth_sort_pci(gdth_pci_str *pcistr, int cnt);
453 static int gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha);
454 static int gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha);
455 static int gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha);
457 static void gdth_enable_int(int hanum);
458 static int gdth_get_status(unchar *pIStatus,int irq);
459 static int gdth_test_busy(int hanum);
460 static int gdth_get_cmd_index(int hanum);
461 static void gdth_release_event(int hanum);
462 static int gdth_wait(int hanum,int index,ulong32 time);
463 static int gdth_internal_cmd(int hanum,unchar service,ushort opcode,ulong32 p1,
464 ulong64 p2,ulong64 p3);
465 static int gdth_search_drives(int hanum);
466 static int gdth_analyse_hdrive(int hanum, ushort hdrive);
468 static const char *gdth_ctr_name(int hanum);
470 static int gdth_open(struct inode *inode, struct file *filep);
471 static int gdth_close(struct inode *inode, struct file *filep);
472 static int gdth_ioctl(struct inode *inode, struct file *filep,
473 unsigned int cmd, unsigned long arg);
475 static void gdth_flush(int hanum);
476 static int gdth_halt(struct notifier_block *nb, ulong event, void *buf);
477 static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *));
478 static void gdth_scsi_done(struct scsi_cmnd *scp);
480 #ifdef DEBUG_GDTH
481 static unchar DebugState = DEBUG_GDTH;
483 #ifdef __SERIAL__
484 #define MAX_SERBUF 160
485 static void ser_init(void);
486 static void ser_puts(char *str);
487 static void ser_putc(char c);
488 static int ser_printk(const char *fmt, ...);
489 static char strbuf[MAX_SERBUF+1];
490 #ifdef __COM2__
491 #define COM_BASE 0x2f8
492 #else
493 #define COM_BASE 0x3f8
494 #endif
495 static void ser_init()
497 unsigned port=COM_BASE;
499 outb(0x80,port+3);
500 outb(0,port+1);
501 /* 19200 Baud, if 9600: outb(12,port) */
502 outb(6, port);
503 outb(3,port+3);
504 outb(0,port+1);
506 ser_putc('I');
507 ser_putc(' ');
511 static void ser_puts(char *str)
513 char *ptr;
515 ser_init();
516 for (ptr=str;*ptr;++ptr)
517 ser_putc(*ptr);
520 static void ser_putc(char c)
522 unsigned port=COM_BASE;
524 while ((inb(port+5) & 0x20)==0);
525 outb(c,port);
526 if (c==0x0a)
528 while ((inb(port+5) & 0x20)==0);
529 outb(0x0d,port);
533 static int ser_printk(const char *fmt, ...)
535 va_list args;
536 int i;
538 va_start(args,fmt);
539 i = vsprintf(strbuf,fmt,args);
540 ser_puts(strbuf);
541 va_end(args);
542 return i;
545 #define TRACE(a) {if (DebugState==1) {ser_printk a;}}
546 #define TRACE2(a) {if (DebugState==1 || DebugState==2) {ser_printk a;}}
547 #define TRACE3(a) {if (DebugState!=0) {ser_printk a;}}
549 #else /* !__SERIAL__ */
550 #define TRACE(a) {if (DebugState==1) {printk a;}}
551 #define TRACE2(a) {if (DebugState==1 || DebugState==2) {printk a;}}
552 #define TRACE3(a) {if (DebugState!=0) {printk a;}}
553 #endif
555 #else /* !DEBUG */
556 #define TRACE(a)
557 #define TRACE2(a)
558 #define TRACE3(a)
559 #endif
561 #ifdef GDTH_STATISTICS
562 static ulong32 max_rq=0, max_index=0, max_sg=0;
563 #ifdef INT_COAL
564 static ulong32 max_int_coal=0;
565 #endif
566 static ulong32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
567 static struct timer_list gdth_timer;
568 #endif
570 #define PTR2USHORT(a) (ushort)(ulong)(a)
571 #define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b)
572 #define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t))
574 #define NUMDATA(a) ( (gdth_num_str *)((a)->hostdata))
575 #define HADATA(a) (&((gdth_ext_str *)((a)->hostdata))->haext)
576 #define CMDDATA(a) (&((gdth_ext_str *)((a)->hostdata))->cmdext)
578 #define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b))
580 #define gdth_readb(addr) readb(addr)
581 #define gdth_readw(addr) readw(addr)
582 #define gdth_readl(addr) readl(addr)
583 #define gdth_writeb(b,addr) writeb((b),(addr))
584 #define gdth_writew(b,addr) writew((b),(addr))
585 #define gdth_writel(b,addr) writel((b),(addr))
587 static unchar gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */
588 static unchar gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */
589 static unchar gdth_polling; /* polling if TRUE */
590 static unchar gdth_from_wait = FALSE; /* gdth_wait() */
591 static int wait_index,wait_hanum; /* gdth_wait() */
592 static int gdth_ctr_count = 0; /* controller count */
593 static int gdth_ctr_vcount = 0; /* virt. ctr. count */
594 static int gdth_ctr_released = 0; /* gdth_release() */
595 static struct Scsi_Host *gdth_ctr_tab[MAXHA]; /* controller table */
596 static struct Scsi_Host *gdth_ctr_vtab[MAXHA*MAXBUS]; /* virt. ctr. table */
597 static unchar gdth_write_through = FALSE; /* write through */
598 static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */
599 static int elastidx;
600 static int eoldidx;
601 static int major;
603 #define DIN 1 /* IN data direction */
604 #define DOU 2 /* OUT data direction */
605 #define DNO DIN /* no data transfer */
606 #define DUN DIN /* unknown data direction */
607 static unchar gdth_direction_tab[0x100] = {
608 DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
609 DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
610 DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
611 DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
612 DOU,DOU,DIN,DIN,DIN,DNO,DUN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DUN,DUN,
613 DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DUN,
614 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
615 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
616 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
617 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,
618 DUN,DUN,DUN,DUN,DUN,DNO,DNO,DUN,DIN,DNO,DOU,DUN,DNO,DUN,DOU,DOU,
619 DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
620 DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
621 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
622 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
623 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
626 /* LILO and modprobe/insmod parameters */
627 /* IRQ list for GDT3000/3020 EISA controllers */
628 static int irq[MAXHA] __initdata =
629 {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
630 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
631 /* disable driver flag */
632 static int disable __initdata = 0;
633 /* reserve flag */
634 static int reserve_mode = 1;
635 /* reserve list */
636 static int reserve_list[MAX_RES_ARGS] =
637 {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
638 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
639 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
640 /* scan order for PCI controllers */
641 static int reverse_scan = 0;
642 /* virtual channel for the host drives */
643 static int hdr_channel = 0;
644 /* max. IDs per channel */
645 static int max_ids = MAXID;
646 /* rescan all IDs */
647 static int rescan = 0;
648 /* map channels to virtual controllers */
649 static int virt_ctr = 0;
650 /* shared access */
651 static int shared_access = 1;
652 /* enable support for EISA and ISA controllers */
653 static int probe_eisa_isa = 0;
654 /* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */
655 static int force_dma32 = 0;
657 /* parameters for modprobe/insmod */
658 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
659 module_param_array(irq, int, NULL, 0);
660 module_param(disable, int, 0);
661 module_param(reserve_mode, int, 0);
662 module_param_array(reserve_list, int, NULL, 0);
663 module_param(reverse_scan, int, 0);
664 module_param(hdr_channel, int, 0);
665 module_param(max_ids, int, 0);
666 module_param(rescan, int, 0);
667 module_param(virt_ctr, int, 0);
668 module_param(shared_access, int, 0);
669 module_param(probe_eisa_isa, int, 0);
670 module_param(force_dma32, int, 0);
671 #else
672 MODULE_PARM(irq, "i");
673 MODULE_PARM(disable, "i");
674 MODULE_PARM(reserve_mode, "i");
675 MODULE_PARM(reserve_list, "4-" __MODULE_STRING(MAX_RES_ARGS) "i");
676 MODULE_PARM(reverse_scan, "i");
677 MODULE_PARM(hdr_channel, "i");
678 MODULE_PARM(max_ids, "i");
679 MODULE_PARM(rescan, "i");
680 MODULE_PARM(virt_ctr, "i");
681 MODULE_PARM(shared_access, "i");
682 MODULE_PARM(probe_eisa_isa, "i");
683 MODULE_PARM(force_dma32, "i");
684 #endif
685 MODULE_AUTHOR("Achim Leubner");
686 MODULE_LICENSE("GPL");
688 /* ioctl interface */
689 static const struct file_operations gdth_fops = {
690 .ioctl = gdth_ioctl,
691 .open = gdth_open,
692 .release = gdth_close,
695 #include "gdth_proc.h"
696 #include "gdth_proc.c"
698 /* notifier block to get a notify on system shutdown/halt/reboot */
699 static struct notifier_block gdth_notifier = {
700 gdth_halt, NULL, 0
702 static int notifier_disabled = 0;
704 static void gdth_delay(int milliseconds)
706 if (milliseconds == 0) {
707 udelay(1);
708 } else {
709 mdelay(milliseconds);
713 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
714 static void gdth_scsi_done(struct scsi_cmnd *scp)
716 TRACE2(("gdth_scsi_done()\n"));
718 if (scp->request)
719 complete((struct completion *)scp->request);
722 int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
723 int timeout, u32 *info)
725 Scsi_Cmnd *scp;
726 DECLARE_COMPLETION_ONSTACK(wait);
727 int rval;
729 scp = kzalloc(sizeof(*scp), GFP_KERNEL);
730 if (!scp)
731 return -ENOMEM;
733 scp->sense_buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
734 if (!scp->sense_buffer) {
735 kfree(scp);
736 return -ENOMEM;
739 scp->device = sdev;
740 /* use request field to save the ptr. to completion struct. */
741 scp->request = (struct request *)&wait;
742 scp->timeout_per_command = timeout*HZ;
743 scp->request_buffer = gdtcmd;
744 scp->cmd_len = 12;
745 memcpy(scp->cmnd, cmnd, 12);
746 scp->SCp.this_residual = IOCTL_PRI; /* priority */
747 scp->done = gdth_scsi_done; /* some fn. test this */
748 gdth_queuecommand(scp, gdth_scsi_done);
749 wait_for_completion(&wait);
751 rval = scp->SCp.Status;
752 if (info)
753 *info = scp->SCp.Message;
754 kfree(scp->sense_buffer);
755 kfree(scp);
756 return rval;
758 #else
759 static void gdth_scsi_done(Scsi_Cmnd *scp)
761 TRACE2(("gdth_scsi_done()\n"));
763 scp->request.rq_status = RQ_SCSI_DONE;
764 if (scp->request.waiting)
765 complete(scp->request.waiting);
768 int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
769 int timeout, u32 *info)
771 Scsi_Cmnd *scp = scsi_allocate_device(sdev, 1, FALSE);
772 unsigned bufflen = gdtcmd ? sizeof(gdth_cmd_str) : 0;
773 DECLARE_COMPLETION_ONSTACK(wait);
774 int rval;
776 if (!scp)
777 return -ENOMEM;
778 scp->cmd_len = 12;
779 scp->use_sg = 0;
780 scp->SCp.this_residual = IOCTL_PRI; /* priority */
781 scp->request.rq_status = RQ_SCSI_BUSY;
782 scp->request.waiting = &wait;
783 scsi_do_cmd(scp, cmnd, gdtcmd, bufflen, gdth_scsi_done, timeout*HZ, 1);
784 wait_for_completion(&wait);
786 rval = scp->SCp.Status;
787 if (info)
788 *info = scp->SCp.Message;
790 scsi_release_command(scp);
791 return rval;
793 #endif
795 int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
796 int timeout, u32 *info)
798 struct scsi_device *sdev = scsi_get_host_dev(shost);
799 int rval = __gdth_execute(sdev, gdtcmd, cmnd, timeout, info);
801 scsi_free_host_dev(sdev);
802 return rval;
805 static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs)
807 *cyls = size /HEADS/SECS;
808 if (*cyls <= MAXCYLS) {
809 *heads = HEADS;
810 *secs = SECS;
811 } else { /* too high for 64*32 */
812 *cyls = size /MEDHEADS/MEDSECS;
813 if (*cyls <= MAXCYLS) {
814 *heads = MEDHEADS;
815 *secs = MEDSECS;
816 } else { /* too high for 127*63 */
817 *cyls = size /BIGHEADS/BIGSECS;
818 *heads = BIGHEADS;
819 *secs = BIGSECS;
824 /* controller search and initialization functions */
826 static int __init gdth_search_eisa(ushort eisa_adr)
828 ulong32 id;
830 TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr));
831 id = inl(eisa_adr+ID0REG);
832 if (id == GDT3A_ID || id == GDT3B_ID) { /* GDT3000A or GDT3000B */
833 if ((inb(eisa_adr+EISAREG) & 8) == 0)
834 return 0; /* not EISA configured */
835 return 1;
837 if (id == GDT3_ID) /* GDT3000 */
838 return 1;
840 return 0;
844 static int __init gdth_search_isa(ulong32 bios_adr)
846 void __iomem *addr;
847 ulong32 id;
849 TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr));
850 if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(ulong32))) != NULL) {
851 id = gdth_readl(addr);
852 iounmap(addr);
853 if (id == GDT2_ID) /* GDT2000 */
854 return 1;
856 return 0;
860 static int __init gdth_search_pci(gdth_pci_str *pcistr)
862 ushort device, cnt;
864 TRACE(("gdth_search_pci()\n"));
866 cnt = 0;
867 for (device = 0; device <= PCI_DEVICE_ID_VORTEX_GDT6555; ++device)
868 gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, device);
869 for (device = PCI_DEVICE_ID_VORTEX_GDT6x17RP;
870 device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP; ++device)
871 gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX, device);
872 gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX,
873 PCI_DEVICE_ID_VORTEX_GDTNEWRX);
874 gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_VORTEX,
875 PCI_DEVICE_ID_VORTEX_GDTNEWRX2);
876 gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_INTEL,
877 PCI_DEVICE_ID_INTEL_SRC);
878 gdth_search_dev(pcistr, &cnt, PCI_VENDOR_ID_INTEL,
879 PCI_DEVICE_ID_INTEL_SRC_XSCALE);
880 return cnt;
883 /* Vortex only makes RAID controllers.
884 * We do not really want to specify all 550 ids here, so wildcard match.
886 static struct pci_device_id gdthtable[] __attribute_used__ = {
887 {PCI_VENDOR_ID_VORTEX,PCI_ANY_ID,PCI_ANY_ID, PCI_ANY_ID},
888 {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC,PCI_ANY_ID,PCI_ANY_ID},
889 {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC_XSCALE,PCI_ANY_ID,PCI_ANY_ID},
892 MODULE_DEVICE_TABLE(pci,gdthtable);
894 static void __init gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt,
895 ushort vendor, ushort device)
897 ulong base0, base1, base2;
898 struct pci_dev *pdev;
900 TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n",
901 *cnt, vendor, device));
903 pdev = NULL;
904 while ((pdev = pci_find_device(vendor, device, pdev))
905 != NULL) {
906 if (pci_enable_device(pdev))
907 continue;
908 if (*cnt >= MAXHA)
909 return;
910 /* GDT PCI controller found, resources are already in pdev */
911 pcistr[*cnt].pdev = pdev;
912 pcistr[*cnt].vendor_id = vendor;
913 pcistr[*cnt].device_id = device;
914 pcistr[*cnt].subdevice_id = pdev->subsystem_device;
915 pcistr[*cnt].bus = pdev->bus->number;
916 pcistr[*cnt].device_fn = pdev->devfn;
917 pcistr[*cnt].irq = pdev->irq;
918 base0 = pci_resource_flags(pdev, 0);
919 base1 = pci_resource_flags(pdev, 1);
920 base2 = pci_resource_flags(pdev, 2);
921 if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000/B */
922 device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */
923 if (!(base0 & IORESOURCE_MEM))
924 continue;
925 pcistr[*cnt].dpmem = pci_resource_start(pdev, 0);
926 } else { /* GDT6110, GDT6120, .. */
927 if (!(base0 & IORESOURCE_MEM) ||
928 !(base2 & IORESOURCE_MEM) ||
929 !(base1 & IORESOURCE_IO))
930 continue;
931 pcistr[*cnt].dpmem = pci_resource_start(pdev, 2);
932 pcistr[*cnt].io_mm = pci_resource_start(pdev, 0);
933 pcistr[*cnt].io = pci_resource_start(pdev, 1);
935 TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n",
936 pcistr[*cnt].bus, PCI_SLOT(pcistr[*cnt].device_fn),
937 pcistr[*cnt].irq, pcistr[*cnt].dpmem));
938 (*cnt)++;
943 static void __init gdth_sort_pci(gdth_pci_str *pcistr, int cnt)
945 gdth_pci_str temp;
946 int i, changed;
948 TRACE(("gdth_sort_pci() cnt %d\n",cnt));
949 if (cnt == 0)
950 return;
952 do {
953 changed = FALSE;
954 for (i = 0; i < cnt-1; ++i) {
955 if (!reverse_scan) {
956 if ((pcistr[i].bus > pcistr[i+1].bus) ||
957 (pcistr[i].bus == pcistr[i+1].bus &&
958 PCI_SLOT(pcistr[i].device_fn) >
959 PCI_SLOT(pcistr[i+1].device_fn))) {
960 temp = pcistr[i];
961 pcistr[i] = pcistr[i+1];
962 pcistr[i+1] = temp;
963 changed = TRUE;
965 } else {
966 if ((pcistr[i].bus < pcistr[i+1].bus) ||
967 (pcistr[i].bus == pcistr[i+1].bus &&
968 PCI_SLOT(pcistr[i].device_fn) <
969 PCI_SLOT(pcistr[i+1].device_fn))) {
970 temp = pcistr[i];
971 pcistr[i] = pcistr[i+1];
972 pcistr[i+1] = temp;
973 changed = TRUE;
977 } while (changed);
981 static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha)
983 ulong32 retries,id;
984 unchar prot_ver,eisacf,i,irq_found;
986 TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr));
988 /* disable board interrupts, deinitialize services */
989 outb(0xff,eisa_adr+EDOORREG);
990 outb(0x00,eisa_adr+EDENABREG);
991 outb(0x00,eisa_adr+EINTENABREG);
993 outb(0xff,eisa_adr+LDOORREG);
994 retries = INIT_RETRIES;
995 gdth_delay(20);
996 while (inb(eisa_adr+EDOORREG) != 0xff) {
997 if (--retries == 0) {
998 printk("GDT-EISA: Initialization error (DEINIT failed)\n");
999 return 0;
1001 gdth_delay(1);
1002 TRACE2(("wait for DEINIT: retries=%d\n",retries));
1004 prot_ver = inb(eisa_adr+MAILBOXREG);
1005 outb(0xff,eisa_adr+EDOORREG);
1006 if (prot_ver != PROTOCOL_VERSION) {
1007 printk("GDT-EISA: Illegal protocol version\n");
1008 return 0;
1010 ha->bmic = eisa_adr;
1011 ha->brd_phys = (ulong32)eisa_adr >> 12;
1013 outl(0,eisa_adr+MAILBOXREG);
1014 outl(0,eisa_adr+MAILBOXREG+4);
1015 outl(0,eisa_adr+MAILBOXREG+8);
1016 outl(0,eisa_adr+MAILBOXREG+12);
1018 /* detect IRQ */
1019 if ((id = inl(eisa_adr+ID0REG)) == GDT3_ID) {
1020 ha->oem_id = OEM_ID_ICP;
1021 ha->type = GDT_EISA;
1022 ha->stype = id;
1023 outl(1,eisa_adr+MAILBOXREG+8);
1024 outb(0xfe,eisa_adr+LDOORREG);
1025 retries = INIT_RETRIES;
1026 gdth_delay(20);
1027 while (inb(eisa_adr+EDOORREG) != 0xfe) {
1028 if (--retries == 0) {
1029 printk("GDT-EISA: Initialization error (get IRQ failed)\n");
1030 return 0;
1032 gdth_delay(1);
1034 ha->irq = inb(eisa_adr+MAILBOXREG);
1035 outb(0xff,eisa_adr+EDOORREG);
1036 TRACE2(("GDT3000/3020: IRQ=%d\n",ha->irq));
1037 /* check the result */
1038 if (ha->irq == 0) {
1039 TRACE2(("Unknown IRQ, use IRQ table from cmd line !\n"));
1040 for (i = 0, irq_found = FALSE;
1041 i < MAXHA && irq[i] != 0xff; ++i) {
1042 if (irq[i]==10 || irq[i]==11 || irq[i]==12 || irq[i]==14) {
1043 irq_found = TRUE;
1044 break;
1047 if (irq_found) {
1048 ha->irq = irq[i];
1049 irq[i] = 0;
1050 printk("GDT-EISA: Can not detect controller IRQ,\n");
1051 printk("Use IRQ setting from command line (IRQ = %d)\n",
1052 ha->irq);
1053 } else {
1054 printk("GDT-EISA: Initialization error (unknown IRQ), Enable\n");
1055 printk("the controller BIOS or use command line parameters\n");
1056 return 0;
1059 } else {
1060 eisacf = inb(eisa_adr+EISAREG) & 7;
1061 if (eisacf > 4) /* level triggered */
1062 eisacf -= 4;
1063 ha->irq = gdth_irq_tab[eisacf];
1064 ha->oem_id = OEM_ID_ICP;
1065 ha->type = GDT_EISA;
1066 ha->stype = id;
1069 ha->dma64_support = 0;
1070 return 1;
1074 static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha)
1076 register gdt2_dpram_str __iomem *dp2_ptr;
1077 int i;
1078 unchar irq_drq,prot_ver;
1079 ulong32 retries;
1081 TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr));
1083 ha->brd = ioremap(bios_adr, sizeof(gdt2_dpram_str));
1084 if (ha->brd == NULL) {
1085 printk("GDT-ISA: Initialization error (DPMEM remap error)\n");
1086 return 0;
1088 dp2_ptr = ha->brd;
1089 gdth_writeb(1, &dp2_ptr->io.memlock); /* switch off write protection */
1090 /* reset interface area */
1091 memset_io(&dp2_ptr->u, 0, sizeof(dp2_ptr->u));
1092 if (gdth_readl(&dp2_ptr->u) != 0) {
1093 printk("GDT-ISA: Initialization error (DPMEM write error)\n");
1094 iounmap(ha->brd);
1095 return 0;
1098 /* disable board interrupts, read DRQ and IRQ */
1099 gdth_writeb(0xff, &dp2_ptr->io.irqdel);
1100 gdth_writeb(0x00, &dp2_ptr->io.irqen);
1101 gdth_writeb(0x00, &dp2_ptr->u.ic.S_Status);
1102 gdth_writeb(0x00, &dp2_ptr->u.ic.Cmd_Index);
1104 irq_drq = gdth_readb(&dp2_ptr->io.rq);
1105 for (i=0; i<3; ++i) {
1106 if ((irq_drq & 1)==0)
1107 break;
1108 irq_drq >>= 1;
1110 ha->drq = gdth_drq_tab[i];
1112 irq_drq = gdth_readb(&dp2_ptr->io.rq) >> 3;
1113 for (i=1; i<5; ++i) {
1114 if ((irq_drq & 1)==0)
1115 break;
1116 irq_drq >>= 1;
1118 ha->irq = gdth_irq_tab[i];
1120 /* deinitialize services */
1121 gdth_writel(bios_adr, &dp2_ptr->u.ic.S_Info[0]);
1122 gdth_writeb(0xff, &dp2_ptr->u.ic.S_Cmd_Indx);
1123 gdth_writeb(0, &dp2_ptr->io.event);
1124 retries = INIT_RETRIES;
1125 gdth_delay(20);
1126 while (gdth_readb(&dp2_ptr->u.ic.S_Status) != 0xff) {
1127 if (--retries == 0) {
1128 printk("GDT-ISA: Initialization error (DEINIT failed)\n");
1129 iounmap(ha->brd);
1130 return 0;
1132 gdth_delay(1);
1134 prot_ver = (unchar)gdth_readl(&dp2_ptr->u.ic.S_Info[0]);
1135 gdth_writeb(0, &dp2_ptr->u.ic.Status);
1136 gdth_writeb(0xff, &dp2_ptr->io.irqdel);
1137 if (prot_ver != PROTOCOL_VERSION) {
1138 printk("GDT-ISA: Illegal protocol version\n");
1139 iounmap(ha->brd);
1140 return 0;
1143 ha->oem_id = OEM_ID_ICP;
1144 ha->type = GDT_ISA;
1145 ha->ic_all_size = sizeof(dp2_ptr->u);
1146 ha->stype= GDT2_ID;
1147 ha->brd_phys = bios_adr >> 4;
1149 /* special request to controller BIOS */
1150 gdth_writel(0x00, &dp2_ptr->u.ic.S_Info[0]);
1151 gdth_writel(0x00, &dp2_ptr->u.ic.S_Info[1]);
1152 gdth_writel(0x01, &dp2_ptr->u.ic.S_Info[2]);
1153 gdth_writel(0x00, &dp2_ptr->u.ic.S_Info[3]);
1154 gdth_writeb(0xfe, &dp2_ptr->u.ic.S_Cmd_Indx);
1155 gdth_writeb(0, &dp2_ptr->io.event);
1156 retries = INIT_RETRIES;
1157 gdth_delay(20);
1158 while (gdth_readb(&dp2_ptr->u.ic.S_Status) != 0xfe) {
1159 if (--retries == 0) {
1160 printk("GDT-ISA: Initialization error\n");
1161 iounmap(ha->brd);
1162 return 0;
1164 gdth_delay(1);
1166 gdth_writeb(0, &dp2_ptr->u.ic.Status);
1167 gdth_writeb(0xff, &dp2_ptr->io.irqdel);
1169 ha->dma64_support = 0;
1170 return 1;
1174 static int __init gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha)
1176 register gdt6_dpram_str __iomem *dp6_ptr;
1177 register gdt6c_dpram_str __iomem *dp6c_ptr;
1178 register gdt6m_dpram_str __iomem *dp6m_ptr;
1179 ulong32 retries;
1180 unchar prot_ver;
1181 ushort command;
1182 int i, found = FALSE;
1184 TRACE(("gdth_init_pci()\n"));
1186 if (pcistr->vendor_id == PCI_VENDOR_ID_INTEL)
1187 ha->oem_id = OEM_ID_INTEL;
1188 else
1189 ha->oem_id = OEM_ID_ICP;
1190 ha->brd_phys = (pcistr->bus << 8) | (pcistr->device_fn & 0xf8);
1191 ha->stype = (ulong32)pcistr->device_id;
1192 ha->subdevice_id = pcistr->subdevice_id;
1193 ha->irq = pcistr->irq;
1194 ha->pdev = pcistr->pdev;
1196 if (ha->stype <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */
1197 TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
1198 ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6_dpram_str));
1199 if (ha->brd == NULL) {
1200 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1201 return 0;
1203 /* check and reset interface area */
1204 dp6_ptr = ha->brd;
1205 gdth_writel(DPMEM_MAGIC, &dp6_ptr->u);
1206 if (gdth_readl(&dp6_ptr->u) != DPMEM_MAGIC) {
1207 printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
1208 pcistr->dpmem);
1209 found = FALSE;
1210 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
1211 iounmap(ha->brd);
1212 ha->brd = ioremap(i, sizeof(ushort));
1213 if (ha->brd == NULL) {
1214 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1215 return 0;
1217 if (gdth_readw(ha->brd) != 0xffff) {
1218 TRACE2(("init_pci_old() address 0x%x busy\n", i));
1219 continue;
1221 iounmap(ha->brd);
1222 pci_write_config_dword(pcistr->pdev,
1223 PCI_BASE_ADDRESS_0, i);
1224 ha->brd = ioremap(i, sizeof(gdt6_dpram_str));
1225 if (ha->brd == NULL) {
1226 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1227 return 0;
1229 dp6_ptr = ha->brd;
1230 gdth_writel(DPMEM_MAGIC, &dp6_ptr->u);
1231 if (gdth_readl(&dp6_ptr->u) == DPMEM_MAGIC) {
1232 printk("GDT-PCI: Use free address at 0x%x\n", i);
1233 found = TRUE;
1234 break;
1237 if (!found) {
1238 printk("GDT-PCI: No free address found!\n");
1239 iounmap(ha->brd);
1240 return 0;
1243 memset_io(&dp6_ptr->u, 0, sizeof(dp6_ptr->u));
1244 if (gdth_readl(&dp6_ptr->u) != 0) {
1245 printk("GDT-PCI: Initialization error (DPMEM write error)\n");
1246 iounmap(ha->brd);
1247 return 0;
1250 /* disable board interrupts, deinit services */
1251 gdth_writeb(0xff, &dp6_ptr->io.irqdel);
1252 gdth_writeb(0x00, &dp6_ptr->io.irqen);
1253 gdth_writeb(0x00, &dp6_ptr->u.ic.S_Status);
1254 gdth_writeb(0x00, &dp6_ptr->u.ic.Cmd_Index);
1256 gdth_writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]);
1257 gdth_writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx);
1258 gdth_writeb(0, &dp6_ptr->io.event);
1259 retries = INIT_RETRIES;
1260 gdth_delay(20);
1261 while (gdth_readb(&dp6_ptr->u.ic.S_Status) != 0xff) {
1262 if (--retries == 0) {
1263 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
1264 iounmap(ha->brd);
1265 return 0;
1267 gdth_delay(1);
1269 prot_ver = (unchar)gdth_readl(&dp6_ptr->u.ic.S_Info[0]);
1270 gdth_writeb(0, &dp6_ptr->u.ic.S_Status);
1271 gdth_writeb(0xff, &dp6_ptr->io.irqdel);
1272 if (prot_ver != PROTOCOL_VERSION) {
1273 printk("GDT-PCI: Illegal protocol version\n");
1274 iounmap(ha->brd);
1275 return 0;
1278 ha->type = GDT_PCI;
1279 ha->ic_all_size = sizeof(dp6_ptr->u);
1281 /* special command to controller BIOS */
1282 gdth_writel(0x00, &dp6_ptr->u.ic.S_Info[0]);
1283 gdth_writel(0x00, &dp6_ptr->u.ic.S_Info[1]);
1284 gdth_writel(0x00, &dp6_ptr->u.ic.S_Info[2]);
1285 gdth_writel(0x00, &dp6_ptr->u.ic.S_Info[3]);
1286 gdth_writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx);
1287 gdth_writeb(0, &dp6_ptr->io.event);
1288 retries = INIT_RETRIES;
1289 gdth_delay(20);
1290 while (gdth_readb(&dp6_ptr->u.ic.S_Status) != 0xfe) {
1291 if (--retries == 0) {
1292 printk("GDT-PCI: Initialization error\n");
1293 iounmap(ha->brd);
1294 return 0;
1296 gdth_delay(1);
1298 gdth_writeb(0, &dp6_ptr->u.ic.S_Status);
1299 gdth_writeb(0xff, &dp6_ptr->io.irqdel);
1301 ha->dma64_support = 0;
1303 } else if (ha->stype <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, ... */
1304 ha->plx = (gdt6c_plx_regs *)pcistr->io;
1305 TRACE2(("init_pci_new() dpmem %lx irq %d\n",
1306 pcistr->dpmem,ha->irq));
1307 ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6c_dpram_str));
1308 if (ha->brd == NULL) {
1309 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1310 iounmap(ha->brd);
1311 return 0;
1313 /* check and reset interface area */
1314 dp6c_ptr = ha->brd;
1315 gdth_writel(DPMEM_MAGIC, &dp6c_ptr->u);
1316 if (gdth_readl(&dp6c_ptr->u) != DPMEM_MAGIC) {
1317 printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
1318 pcistr->dpmem);
1319 found = FALSE;
1320 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
1321 iounmap(ha->brd);
1322 ha->brd = ioremap(i, sizeof(ushort));
1323 if (ha->brd == NULL) {
1324 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1325 return 0;
1327 if (gdth_readw(ha->brd) != 0xffff) {
1328 TRACE2(("init_pci_plx() address 0x%x busy\n", i));
1329 continue;
1331 iounmap(ha->brd);
1332 pci_write_config_dword(pcistr->pdev,
1333 PCI_BASE_ADDRESS_2, i);
1334 ha->brd = ioremap(i, sizeof(gdt6c_dpram_str));
1335 if (ha->brd == NULL) {
1336 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1337 return 0;
1339 dp6c_ptr = ha->brd;
1340 gdth_writel(DPMEM_MAGIC, &dp6c_ptr->u);
1341 if (gdth_readl(&dp6c_ptr->u) == DPMEM_MAGIC) {
1342 printk("GDT-PCI: Use free address at 0x%x\n", i);
1343 found = TRUE;
1344 break;
1347 if (!found) {
1348 printk("GDT-PCI: No free address found!\n");
1349 iounmap(ha->brd);
1350 return 0;
1353 memset_io(&dp6c_ptr->u, 0, sizeof(dp6c_ptr->u));
1354 if (gdth_readl(&dp6c_ptr->u) != 0) {
1355 printk("GDT-PCI: Initialization error (DPMEM write error)\n");
1356 iounmap(ha->brd);
1357 return 0;
1360 /* disable board interrupts, deinit services */
1361 outb(0x00,PTR2USHORT(&ha->plx->control1));
1362 outb(0xff,PTR2USHORT(&ha->plx->edoor_reg));
1364 gdth_writeb(0x00, &dp6c_ptr->u.ic.S_Status);
1365 gdth_writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index);
1367 gdth_writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]);
1368 gdth_writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx);
1370 outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
1372 retries = INIT_RETRIES;
1373 gdth_delay(20);
1374 while (gdth_readb(&dp6c_ptr->u.ic.S_Status) != 0xff) {
1375 if (--retries == 0) {
1376 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
1377 iounmap(ha->brd);
1378 return 0;
1380 gdth_delay(1);
1382 prot_ver = (unchar)gdth_readl(&dp6c_ptr->u.ic.S_Info[0]);
1383 gdth_writeb(0, &dp6c_ptr->u.ic.Status);
1384 if (prot_ver != PROTOCOL_VERSION) {
1385 printk("GDT-PCI: Illegal protocol version\n");
1386 iounmap(ha->brd);
1387 return 0;
1390 ha->type = GDT_PCINEW;
1391 ha->ic_all_size = sizeof(dp6c_ptr->u);
1393 /* special command to controller BIOS */
1394 gdth_writel(0x00, &dp6c_ptr->u.ic.S_Info[0]);
1395 gdth_writel(0x00, &dp6c_ptr->u.ic.S_Info[1]);
1396 gdth_writel(0x00, &dp6c_ptr->u.ic.S_Info[2]);
1397 gdth_writel(0x00, &dp6c_ptr->u.ic.S_Info[3]);
1398 gdth_writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx);
1400 outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
1402 retries = INIT_RETRIES;
1403 gdth_delay(20);
1404 while (gdth_readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) {
1405 if (--retries == 0) {
1406 printk("GDT-PCI: Initialization error\n");
1407 iounmap(ha->brd);
1408 return 0;
1410 gdth_delay(1);
1412 gdth_writeb(0, &dp6c_ptr->u.ic.S_Status);
1414 ha->dma64_support = 0;
1416 } else { /* MPR */
1417 TRACE2(("init_pci_mpr() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
1418 ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6m_dpram_str));
1419 if (ha->brd == NULL) {
1420 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1421 return 0;
1424 /* manipulate config. space to enable DPMEM, start RP controller */
1425 pci_read_config_word(pcistr->pdev, PCI_COMMAND, &command);
1426 command |= 6;
1427 pci_write_config_word(pcistr->pdev, PCI_COMMAND, command);
1428 if (pci_resource_start(pcistr->pdev, 8) == 1UL)
1429 pci_resource_start(pcistr->pdev, 8) = 0UL;
1430 i = 0xFEFF0001UL;
1431 pci_write_config_dword(pcistr->pdev, PCI_ROM_ADDRESS, i);
1432 gdth_delay(1);
1433 pci_write_config_dword(pcistr->pdev, PCI_ROM_ADDRESS,
1434 pci_resource_start(pcistr->pdev, 8));
1436 dp6m_ptr = ha->brd;
1438 /* Ensure that it is safe to access the non HW portions of DPMEM.
1439 * Aditional check needed for Xscale based RAID controllers */
1440 while( ((int)gdth_readb(&dp6m_ptr->i960r.sema0_reg) ) & 3 )
1441 gdth_delay(1);
1443 /* check and reset interface area */
1444 gdth_writel(DPMEM_MAGIC, &dp6m_ptr->u);
1445 if (gdth_readl(&dp6m_ptr->u) != DPMEM_MAGIC) {
1446 printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
1447 pcistr->dpmem);
1448 found = FALSE;
1449 for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
1450 iounmap(ha->brd);
1451 ha->brd = ioremap(i, sizeof(ushort));
1452 if (ha->brd == NULL) {
1453 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1454 return 0;
1456 if (gdth_readw(ha->brd) != 0xffff) {
1457 TRACE2(("init_pci_mpr() address 0x%x busy\n", i));
1458 continue;
1460 iounmap(ha->brd);
1461 pci_write_config_dword(pcistr->pdev,
1462 PCI_BASE_ADDRESS_0, i);
1463 ha->brd = ioremap(i, sizeof(gdt6m_dpram_str));
1464 if (ha->brd == NULL) {
1465 printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
1466 return 0;
1468 dp6m_ptr = ha->brd;
1469 gdth_writel(DPMEM_MAGIC, &dp6m_ptr->u);
1470 if (gdth_readl(&dp6m_ptr->u) == DPMEM_MAGIC) {
1471 printk("GDT-PCI: Use free address at 0x%x\n", i);
1472 found = TRUE;
1473 break;
1476 if (!found) {
1477 printk("GDT-PCI: No free address found!\n");
1478 iounmap(ha->brd);
1479 return 0;
1482 memset_io(&dp6m_ptr->u, 0, sizeof(dp6m_ptr->u));
1484 /* disable board interrupts, deinit services */
1485 gdth_writeb(gdth_readb(&dp6m_ptr->i960r.edoor_en_reg) | 4,
1486 &dp6m_ptr->i960r.edoor_en_reg);
1487 gdth_writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
1488 gdth_writeb(0x00, &dp6m_ptr->u.ic.S_Status);
1489 gdth_writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index);
1491 gdth_writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]);
1492 gdth_writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx);
1493 gdth_writeb(1, &dp6m_ptr->i960r.ldoor_reg);
1494 retries = INIT_RETRIES;
1495 gdth_delay(20);
1496 while (gdth_readb(&dp6m_ptr->u.ic.S_Status) != 0xff) {
1497 if (--retries == 0) {
1498 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
1499 iounmap(ha->brd);
1500 return 0;
1502 gdth_delay(1);
1504 prot_ver = (unchar)gdth_readl(&dp6m_ptr->u.ic.S_Info[0]);
1505 gdth_writeb(0, &dp6m_ptr->u.ic.S_Status);
1506 if (prot_ver != PROTOCOL_VERSION) {
1507 printk("GDT-PCI: Illegal protocol version\n");
1508 iounmap(ha->brd);
1509 return 0;
1512 ha->type = GDT_PCIMPR;
1513 ha->ic_all_size = sizeof(dp6m_ptr->u);
1515 /* special command to controller BIOS */
1516 gdth_writel(0x00, &dp6m_ptr->u.ic.S_Info[0]);
1517 gdth_writel(0x00, &dp6m_ptr->u.ic.S_Info[1]);
1518 gdth_writel(0x00, &dp6m_ptr->u.ic.S_Info[2]);
1519 gdth_writel(0x00, &dp6m_ptr->u.ic.S_Info[3]);
1520 gdth_writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx);
1521 gdth_writeb(1, &dp6m_ptr->i960r.ldoor_reg);
1522 retries = INIT_RETRIES;
1523 gdth_delay(20);
1524 while (gdth_readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) {
1525 if (--retries == 0) {
1526 printk("GDT-PCI: Initialization error\n");
1527 iounmap(ha->brd);
1528 return 0;
1530 gdth_delay(1);
1532 gdth_writeb(0, &dp6m_ptr->u.ic.S_Status);
1534 /* read FW version to detect 64-bit DMA support */
1535 gdth_writeb(0xfd, &dp6m_ptr->u.ic.S_Cmd_Indx);
1536 gdth_writeb(1, &dp6m_ptr->i960r.ldoor_reg);
1537 retries = INIT_RETRIES;
1538 gdth_delay(20);
1539 while (gdth_readb(&dp6m_ptr->u.ic.S_Status) != 0xfd) {
1540 if (--retries == 0) {
1541 printk("GDT-PCI: Initialization error (DEINIT failed)\n");
1542 iounmap(ha->brd);
1543 return 0;
1545 gdth_delay(1);
1547 prot_ver = (unchar)(gdth_readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
1548 gdth_writeb(0, &dp6m_ptr->u.ic.S_Status);
1549 if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */
1550 ha->dma64_support = 0;
1551 else
1552 ha->dma64_support = 1;
1555 return 1;
1559 /* controller protocol functions */
1561 static void __init gdth_enable_int(int hanum)
1563 gdth_ha_str *ha;
1564 ulong flags;
1565 gdt2_dpram_str __iomem *dp2_ptr;
1566 gdt6_dpram_str __iomem *dp6_ptr;
1567 gdt6m_dpram_str __iomem *dp6m_ptr;
1569 TRACE(("gdth_enable_int() hanum %d\n",hanum));
1570 ha = HADATA(gdth_ctr_tab[hanum]);
1571 spin_lock_irqsave(&ha->smp_lock, flags);
1573 if (ha->type == GDT_EISA) {
1574 outb(0xff, ha->bmic + EDOORREG);
1575 outb(0xff, ha->bmic + EDENABREG);
1576 outb(0x01, ha->bmic + EINTENABREG);
1577 } else if (ha->type == GDT_ISA) {
1578 dp2_ptr = ha->brd;
1579 gdth_writeb(1, &dp2_ptr->io.irqdel);
1580 gdth_writeb(0, &dp2_ptr->u.ic.Cmd_Index);
1581 gdth_writeb(1, &dp2_ptr->io.irqen);
1582 } else if (ha->type == GDT_PCI) {
1583 dp6_ptr = ha->brd;
1584 gdth_writeb(1, &dp6_ptr->io.irqdel);
1585 gdth_writeb(0, &dp6_ptr->u.ic.Cmd_Index);
1586 gdth_writeb(1, &dp6_ptr->io.irqen);
1587 } else if (ha->type == GDT_PCINEW) {
1588 outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
1589 outb(0x03, PTR2USHORT(&ha->plx->control1));
1590 } else if (ha->type == GDT_PCIMPR) {
1591 dp6m_ptr = ha->brd;
1592 gdth_writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
1593 gdth_writeb(gdth_readb(&dp6m_ptr->i960r.edoor_en_reg) & ~4,
1594 &dp6m_ptr->i960r.edoor_en_reg);
1596 spin_unlock_irqrestore(&ha->smp_lock, flags);
1600 static int gdth_get_status(unchar *pIStatus,int irq)
1602 register gdth_ha_str *ha;
1603 int i;
1605 TRACE(("gdth_get_status() irq %d ctr_count %d\n",
1606 irq,gdth_ctr_count));
1608 *pIStatus = 0;
1609 for (i=0; i<gdth_ctr_count; ++i) {
1610 ha = HADATA(gdth_ctr_tab[i]);
1611 if (ha->irq != (unchar)irq) /* check IRQ */
1612 continue;
1613 if (ha->type == GDT_EISA)
1614 *pIStatus = inb((ushort)ha->bmic + EDOORREG);
1615 else if (ha->type == GDT_ISA)
1616 *pIStatus =
1617 gdth_readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
1618 else if (ha->type == GDT_PCI)
1619 *pIStatus =
1620 gdth_readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
1621 else if (ha->type == GDT_PCINEW)
1622 *pIStatus = inb(PTR2USHORT(&ha->plx->edoor_reg));
1623 else if (ha->type == GDT_PCIMPR)
1624 *pIStatus =
1625 gdth_readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.edoor_reg);
1627 if (*pIStatus)
1628 return i; /* board found */
1630 return -1;
1634 static int gdth_test_busy(int hanum)
1636 register gdth_ha_str *ha;
1637 register int gdtsema0 = 0;
1639 TRACE(("gdth_test_busy() hanum %d\n",hanum));
1641 ha = HADATA(gdth_ctr_tab[hanum]);
1642 if (ha->type == GDT_EISA)
1643 gdtsema0 = (int)inb(ha->bmic + SEMA0REG);
1644 else if (ha->type == GDT_ISA)
1645 gdtsema0 = (int)gdth_readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
1646 else if (ha->type == GDT_PCI)
1647 gdtsema0 = (int)gdth_readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
1648 else if (ha->type == GDT_PCINEW)
1649 gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg));
1650 else if (ha->type == GDT_PCIMPR)
1651 gdtsema0 =
1652 (int)gdth_readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
1654 return (gdtsema0 & 1);
1658 static int gdth_get_cmd_index(int hanum)
1660 register gdth_ha_str *ha;
1661 int i;
1663 TRACE(("gdth_get_cmd_index() hanum %d\n",hanum));
1665 ha = HADATA(gdth_ctr_tab[hanum]);
1666 for (i=0; i<GDTH_MAXCMDS; ++i) {
1667 if (ha->cmd_tab[i].cmnd == UNUSED_CMND) {
1668 ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer;
1669 ha->cmd_tab[i].service = ha->pccb->Service;
1670 ha->pccb->CommandIndex = (ulong32)i+2;
1671 return (i+2);
1674 return 0;
1678 static void gdth_set_sema0(int hanum)
1680 register gdth_ha_str *ha;
1682 TRACE(("gdth_set_sema0() hanum %d\n",hanum));
1684 ha = HADATA(gdth_ctr_tab[hanum]);
1685 if (ha->type == GDT_EISA) {
1686 outb(1, ha->bmic + SEMA0REG);
1687 } else if (ha->type == GDT_ISA) {
1688 gdth_writeb(1, &((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
1689 } else if (ha->type == GDT_PCI) {
1690 gdth_writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
1691 } else if (ha->type == GDT_PCINEW) {
1692 outb(1, PTR2USHORT(&ha->plx->sema0_reg));
1693 } else if (ha->type == GDT_PCIMPR) {
1694 gdth_writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
1699 static void gdth_copy_command(int hanum)
1701 register gdth_ha_str *ha;
1702 register gdth_cmd_str *cmd_ptr;
1703 register gdt6m_dpram_str __iomem *dp6m_ptr;
1704 register gdt6c_dpram_str __iomem *dp6c_ptr;
1705 gdt6_dpram_str __iomem *dp6_ptr;
1706 gdt2_dpram_str __iomem *dp2_ptr;
1707 ushort cp_count,dp_offset,cmd_no;
1709 TRACE(("gdth_copy_command() hanum %d\n",hanum));
1711 ha = HADATA(gdth_ctr_tab[hanum]);
1712 cp_count = ha->cmd_len;
1713 dp_offset= ha->cmd_offs_dpmem;
1714 cmd_no = ha->cmd_cnt;
1715 cmd_ptr = ha->pccb;
1717 ++ha->cmd_cnt;
1718 if (ha->type == GDT_EISA)
1719 return; /* no DPMEM, no copy */
1721 /* set cpcount dword aligned */
1722 if (cp_count & 3)
1723 cp_count += (4 - (cp_count & 3));
1725 ha->cmd_offs_dpmem += cp_count;
1727 /* set offset and service, copy command to DPMEM */
1728 if (ha->type == GDT_ISA) {
1729 dp2_ptr = ha->brd;
1730 gdth_writew(dp_offset + DPMEM_COMMAND_OFFSET,
1731 &dp2_ptr->u.ic.comm_queue[cmd_no].offset);
1732 gdth_writew((ushort)cmd_ptr->Service,
1733 &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id);
1734 memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1735 } else if (ha->type == GDT_PCI) {
1736 dp6_ptr = ha->brd;
1737 gdth_writew(dp_offset + DPMEM_COMMAND_OFFSET,
1738 &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
1739 gdth_writew((ushort)cmd_ptr->Service,
1740 &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
1741 memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1742 } else if (ha->type == GDT_PCINEW) {
1743 dp6c_ptr = ha->brd;
1744 gdth_writew(dp_offset + DPMEM_COMMAND_OFFSET,
1745 &dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
1746 gdth_writew((ushort)cmd_ptr->Service,
1747 &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
1748 memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1749 } else if (ha->type == GDT_PCIMPR) {
1750 dp6m_ptr = ha->brd;
1751 gdth_writew(dp_offset + DPMEM_COMMAND_OFFSET,
1752 &dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
1753 gdth_writew((ushort)cmd_ptr->Service,
1754 &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
1755 memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
1760 static void gdth_release_event(int hanum)
1762 register gdth_ha_str *ha;
1764 TRACE(("gdth_release_event() hanum %d\n",hanum));
1765 ha = HADATA(gdth_ctr_tab[hanum]);
1767 #ifdef GDTH_STATISTICS
1769 ulong32 i,j;
1770 for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
1771 if (ha->cmd_tab[j].cmnd != UNUSED_CMND)
1772 ++i;
1774 if (max_index < i) {
1775 max_index = i;
1776 TRACE3(("GDT: max_index = %d\n",(ushort)i));
1779 #endif
1781 if (ha->pccb->OpCode == GDT_INIT)
1782 ha->pccb->Service |= 0x80;
1784 if (ha->type == GDT_EISA) {
1785 if (ha->pccb->OpCode == GDT_INIT) /* store DMA buffer */
1786 outl(ha->ccb_phys, ha->bmic + MAILBOXREG);
1787 outb(ha->pccb->Service, ha->bmic + LDOORREG);
1788 } else if (ha->type == GDT_ISA) {
1789 gdth_writeb(0, &((gdt2_dpram_str __iomem *)ha->brd)->io.event);
1790 } else if (ha->type == GDT_PCI) {
1791 gdth_writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event);
1792 } else if (ha->type == GDT_PCINEW) {
1793 outb(1, PTR2USHORT(&ha->plx->ldoor_reg));
1794 } else if (ha->type == GDT_PCIMPR) {
1795 gdth_writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.ldoor_reg);
1800 static int gdth_wait(int hanum,int index,ulong32 time)
1802 gdth_ha_str *ha;
1803 int answer_found = FALSE;
1805 TRACE(("gdth_wait() hanum %d index %d time %d\n",hanum,index,time));
1807 ha = HADATA(gdth_ctr_tab[hanum]);
1808 if (index == 0)
1809 return 1; /* no wait required */
1811 gdth_from_wait = TRUE;
1812 do {
1813 gdth_interrupt((int)ha->irq,ha);
1814 if (wait_hanum==hanum && wait_index==index) {
1815 answer_found = TRUE;
1816 break;
1818 gdth_delay(1);
1819 } while (--time);
1820 gdth_from_wait = FALSE;
1822 while (gdth_test_busy(hanum))
1823 gdth_delay(0);
1825 return (answer_found);
1829 static int gdth_internal_cmd(int hanum,unchar service,ushort opcode,ulong32 p1,
1830 ulong64 p2,ulong64 p3)
1832 register gdth_ha_str *ha;
1833 register gdth_cmd_str *cmd_ptr;
1834 int retries,index;
1836 TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode));
1838 ha = HADATA(gdth_ctr_tab[hanum]);
1839 cmd_ptr = ha->pccb;
1840 memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str));
1842 /* make command */
1843 for (retries = INIT_RETRIES;;) {
1844 cmd_ptr->Service = service;
1845 cmd_ptr->RequestBuffer = INTERNAL_CMND;
1846 if (!(index=gdth_get_cmd_index(hanum))) {
1847 TRACE(("GDT: No free command index found\n"));
1848 return 0;
1850 gdth_set_sema0(hanum);
1851 cmd_ptr->OpCode = opcode;
1852 cmd_ptr->BoardNode = LOCALBOARD;
1853 if (service == CACHESERVICE) {
1854 if (opcode == GDT_IOCTL) {
1855 cmd_ptr->u.ioctl.subfunc = p1;
1856 cmd_ptr->u.ioctl.channel = (ulong32)p2;
1857 cmd_ptr->u.ioctl.param_size = (ushort)p3;
1858 cmd_ptr->u.ioctl.p_param = ha->scratch_phys;
1859 } else {
1860 if (ha->cache_feat & GDT_64BIT) {
1861 cmd_ptr->u.cache64.DeviceNo = (ushort)p1;
1862 cmd_ptr->u.cache64.BlockNo = p2;
1863 } else {
1864 cmd_ptr->u.cache.DeviceNo = (ushort)p1;
1865 cmd_ptr->u.cache.BlockNo = (ulong32)p2;
1868 } else if (service == SCSIRAWSERVICE) {
1869 if (ha->raw_feat & GDT_64BIT) {
1870 cmd_ptr->u.raw64.direction = p1;
1871 cmd_ptr->u.raw64.bus = (unchar)p2;
1872 cmd_ptr->u.raw64.target = (unchar)p3;
1873 cmd_ptr->u.raw64.lun = (unchar)(p3 >> 8);
1874 } else {
1875 cmd_ptr->u.raw.direction = p1;
1876 cmd_ptr->u.raw.bus = (unchar)p2;
1877 cmd_ptr->u.raw.target = (unchar)p3;
1878 cmd_ptr->u.raw.lun = (unchar)(p3 >> 8);
1880 } else if (service == SCREENSERVICE) {
1881 if (opcode == GDT_REALTIME) {
1882 *(ulong32 *)&cmd_ptr->u.screen.su.data[0] = p1;
1883 *(ulong32 *)&cmd_ptr->u.screen.su.data[4] = (ulong32)p2;
1884 *(ulong32 *)&cmd_ptr->u.screen.su.data[8] = (ulong32)p3;
1887 ha->cmd_len = sizeof(gdth_cmd_str);
1888 ha->cmd_offs_dpmem = 0;
1889 ha->cmd_cnt = 0;
1890 gdth_copy_command(hanum);
1891 gdth_release_event(hanum);
1892 gdth_delay(20);
1893 if (!gdth_wait(hanum,index,INIT_TIMEOUT)) {
1894 printk("GDT: Initialization error (timeout service %d)\n",service);
1895 return 0;
1897 if (ha->status != S_BSY || --retries == 0)
1898 break;
1899 gdth_delay(1);
1902 return (ha->status != S_OK ? 0:1);
1906 /* search for devices */
1908 static int __init gdth_search_drives(int hanum)
1910 register gdth_ha_str *ha;
1911 ushort cdev_cnt, i;
1912 int ok;
1913 ulong32 bus_no, drv_cnt, drv_no, j;
1914 gdth_getch_str *chn;
1915 gdth_drlist_str *drl;
1916 gdth_iochan_str *ioc;
1917 gdth_raw_iochan_str *iocr;
1918 gdth_arcdl_str *alst;
1919 gdth_alist_str *alst2;
1920 gdth_oem_str_ioctl *oemstr;
1921 #ifdef INT_COAL
1922 gdth_perf_modes *pmod;
1923 #endif
1925 #ifdef GDTH_RTC
1926 unchar rtc[12];
1927 ulong flags;
1928 #endif
1930 TRACE(("gdth_search_drives() hanum %d\n",hanum));
1931 ha = HADATA(gdth_ctr_tab[hanum]);
1932 ok = 0;
1934 /* initialize controller services, at first: screen service */
1935 ha->screen_feat = 0;
1936 if (!force_dma32) {
1937 ok = gdth_internal_cmd(hanum,SCREENSERVICE,GDT_X_INIT_SCR,0,0,0);
1938 if (ok)
1939 ha->screen_feat = GDT_64BIT;
1941 if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC))
1942 ok = gdth_internal_cmd(hanum,SCREENSERVICE,GDT_INIT,0,0,0);
1943 if (!ok) {
1944 printk("GDT-HA %d: Initialization error screen service (code %d)\n",
1945 hanum, ha->status);
1946 return 0;
1948 TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n"));
1950 #ifdef GDTH_RTC
1951 /* read realtime clock info, send to controller */
1952 /* 1. wait for the falling edge of update flag */
1953 spin_lock_irqsave(&rtc_lock, flags);
1954 for (j = 0; j < 1000000; ++j)
1955 if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
1956 break;
1957 for (j = 0; j < 1000000; ++j)
1958 if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
1959 break;
1960 /* 2. read info */
1961 do {
1962 for (j = 0; j < 12; ++j)
1963 rtc[j] = CMOS_READ(j);
1964 } while (rtc[0] != CMOS_READ(0));
1965 spin_lock_irqrestore(&rtc_lock, flags);
1966 TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0],
1967 *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]));
1968 /* 3. send to controller firmware */
1969 gdth_internal_cmd(hanum,SCREENSERVICE,GDT_REALTIME, *(ulong32 *)&rtc[0],
1970 *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]);
1971 #endif
1973 /* unfreeze all IOs */
1974 gdth_internal_cmd(hanum,CACHESERVICE,GDT_UNFREEZE_IO,0,0,0);
1976 /* initialize cache service */
1977 ha->cache_feat = 0;
1978 if (!force_dma32) {
1979 ok = gdth_internal_cmd(hanum,CACHESERVICE,GDT_X_INIT_HOST,LINUX_OS,0,0);
1980 if (ok)
1981 ha->cache_feat = GDT_64BIT;
1983 if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC))
1984 ok = gdth_internal_cmd(hanum,CACHESERVICE,GDT_INIT,LINUX_OS,0,0);
1985 if (!ok) {
1986 printk("GDT-HA %d: Initialization error cache service (code %d)\n",
1987 hanum, ha->status);
1988 return 0;
1990 TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
1991 cdev_cnt = (ushort)ha->info;
1992 ha->fw_vers = ha->service;
1994 #ifdef INT_COAL
1995 if (ha->type == GDT_PCIMPR) {
1996 /* set perf. modes */
1997 pmod = (gdth_perf_modes *)ha->pscratch;
1998 pmod->version = 1;
1999 pmod->st_mode = 1; /* enable one status buffer */
2000 *((ulong64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys;
2001 pmod->st_buff_indx1 = COALINDEX;
2002 pmod->st_buff_addr2 = 0;
2003 pmod->st_buff_u_addr2 = 0;
2004 pmod->st_buff_indx2 = 0;
2005 pmod->st_buff_size = sizeof(gdth_coal_status) * MAXOFFSETS;
2006 pmod->cmd_mode = 0; // disable all cmd buffers
2007 pmod->cmd_buff_addr1 = 0;
2008 pmod->cmd_buff_u_addr1 = 0;
2009 pmod->cmd_buff_indx1 = 0;
2010 pmod->cmd_buff_addr2 = 0;
2011 pmod->cmd_buff_u_addr2 = 0;
2012 pmod->cmd_buff_indx2 = 0;
2013 pmod->cmd_buff_size = 0;
2014 pmod->reserved1 = 0;
2015 pmod->reserved2 = 0;
2016 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,SET_PERF_MODES,
2017 INVALID_CHANNEL,sizeof(gdth_perf_modes))) {
2018 printk("GDT-HA %d: Interrupt coalescing activated\n", hanum);
2021 #endif
2023 /* detect number of buses - try new IOCTL */
2024 iocr = (gdth_raw_iochan_str *)ha->pscratch;
2025 iocr->hdr.version = 0xffffffff;
2026 iocr->hdr.list_entries = MAXBUS;
2027 iocr->hdr.first_chan = 0;
2028 iocr->hdr.last_chan = MAXBUS-1;
2029 iocr->hdr.list_offset = GDTOFFSOF(gdth_raw_iochan_str, list[0]);
2030 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,IOCHAN_RAW_DESC,
2031 INVALID_CHANNEL,sizeof(gdth_raw_iochan_str))) {
2032 TRACE2(("IOCHAN_RAW_DESC supported!\n"));
2033 ha->bus_cnt = iocr->hdr.chan_count;
2034 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
2035 if (iocr->list[bus_no].proc_id < MAXID)
2036 ha->bus_id[bus_no] = iocr->list[bus_no].proc_id;
2037 else
2038 ha->bus_id[bus_no] = 0xff;
2040 } else {
2041 /* old method */
2042 chn = (gdth_getch_str *)ha->pscratch;
2043 for (bus_no = 0; bus_no < MAXBUS; ++bus_no) {
2044 chn->channel_no = bus_no;
2045 if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,
2046 SCSI_CHAN_CNT | L_CTRL_PATTERN,
2047 IO_CHANNEL | INVALID_CHANNEL,
2048 sizeof(gdth_getch_str))) {
2049 if (bus_no == 0) {
2050 printk("GDT-HA %d: Error detecting channel count (0x%x)\n",
2051 hanum, ha->status);
2052 return 0;
2054 break;
2056 if (chn->siop_id < MAXID)
2057 ha->bus_id[bus_no] = chn->siop_id;
2058 else
2059 ha->bus_id[bus_no] = 0xff;
2061 ha->bus_cnt = (unchar)bus_no;
2063 TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt));
2065 /* read cache configuration */
2066 if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,CACHE_INFO,
2067 INVALID_CHANNEL,sizeof(gdth_cinfo_str))) {
2068 printk("GDT-HA %d: Initialization error cache service (code %d)\n",
2069 hanum, ha->status);
2070 return 0;
2072 ha->cpar = ((gdth_cinfo_str *)ha->pscratch)->cpar;
2073 TRACE2(("gdth_search_drives() cinfo: vs %x sta %d str %d dw %d b %d\n",
2074 ha->cpar.version,ha->cpar.state,ha->cpar.strategy,
2075 ha->cpar.write_back,ha->cpar.block_size));
2077 /* read board info and features */
2078 ha->more_proc = FALSE;
2079 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,BOARD_INFO,
2080 INVALID_CHANNEL,sizeof(gdth_binfo_str))) {
2081 memcpy(&ha->binfo, (gdth_binfo_str *)ha->pscratch,
2082 sizeof(gdth_binfo_str));
2083 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,BOARD_FEATURES,
2084 INVALID_CHANNEL,sizeof(gdth_bfeat_str))) {
2085 TRACE2(("BOARD_INFO/BOARD_FEATURES supported\n"));
2086 ha->bfeat = *(gdth_bfeat_str *)ha->pscratch;
2087 ha->more_proc = TRUE;
2089 } else {
2090 TRACE2(("BOARD_INFO requires firmware >= 1.10/2.08\n"));
2091 strcpy(ha->binfo.type_string, gdth_ctr_name(hanum));
2093 TRACE2(("Controller name: %s\n",ha->binfo.type_string));
2095 /* read more informations */
2096 if (ha->more_proc) {
2097 /* physical drives, channel addresses */
2098 ioc = (gdth_iochan_str *)ha->pscratch;
2099 ioc->hdr.version = 0xffffffff;
2100 ioc->hdr.list_entries = MAXBUS;
2101 ioc->hdr.first_chan = 0;
2102 ioc->hdr.last_chan = MAXBUS-1;
2103 ioc->hdr.list_offset = GDTOFFSOF(gdth_iochan_str, list[0]);
2104 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,IOCHAN_DESC,
2105 INVALID_CHANNEL,sizeof(gdth_iochan_str))) {
2106 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
2107 ha->raw[bus_no].address = ioc->list[bus_no].address;
2108 ha->raw[bus_no].local_no = ioc->list[bus_no].local_no;
2110 } else {
2111 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
2112 ha->raw[bus_no].address = IO_CHANNEL;
2113 ha->raw[bus_no].local_no = bus_no;
2116 for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
2117 chn = (gdth_getch_str *)ha->pscratch;
2118 chn->channel_no = ha->raw[bus_no].local_no;
2119 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,
2120 SCSI_CHAN_CNT | L_CTRL_PATTERN,
2121 ha->raw[bus_no].address | INVALID_CHANNEL,
2122 sizeof(gdth_getch_str))) {
2123 ha->raw[bus_no].pdev_cnt = chn->drive_cnt;
2124 TRACE2(("Channel %d: %d phys. drives\n",
2125 bus_no,chn->drive_cnt));
2127 if (ha->raw[bus_no].pdev_cnt > 0) {
2128 drl = (gdth_drlist_str *)ha->pscratch;
2129 drl->sc_no = ha->raw[bus_no].local_no;
2130 drl->sc_cnt = ha->raw[bus_no].pdev_cnt;
2131 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,
2132 SCSI_DR_LIST | L_CTRL_PATTERN,
2133 ha->raw[bus_no].address | INVALID_CHANNEL,
2134 sizeof(gdth_drlist_str))) {
2135 for (j = 0; j < ha->raw[bus_no].pdev_cnt; ++j)
2136 ha->raw[bus_no].id_list[j] = drl->sc_list[j];
2137 } else {
2138 ha->raw[bus_no].pdev_cnt = 0;
2143 /* logical drives */
2144 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,CACHE_DRV_CNT,
2145 INVALID_CHANNEL,sizeof(ulong32))) {
2146 drv_cnt = *(ulong32 *)ha->pscratch;
2147 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,CACHE_DRV_LIST,
2148 INVALID_CHANNEL,drv_cnt * sizeof(ulong32))) {
2149 for (j = 0; j < drv_cnt; ++j) {
2150 drv_no = ((ulong32 *)ha->pscratch)[j];
2151 if (drv_no < MAX_LDRIVES) {
2152 ha->hdr[drv_no].is_logdrv = TRUE;
2153 TRACE2(("Drive %d is log. drive\n",drv_no));
2157 alst = (gdth_arcdl_str *)ha->pscratch;
2158 alst->entries_avail = MAX_LDRIVES;
2159 alst->first_entry = 0;
2160 alst->list_offset = GDTOFFSOF(gdth_arcdl_str, list[0]);
2161 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,
2162 ARRAY_DRV_LIST2 | LA_CTRL_PATTERN,
2163 INVALID_CHANNEL, sizeof(gdth_arcdl_str) +
2164 (alst->entries_avail-1) * sizeof(gdth_alist_str))) {
2165 for (j = 0; j < alst->entries_init; ++j) {
2166 ha->hdr[j].is_arraydrv = alst->list[j].is_arrayd;
2167 ha->hdr[j].is_master = alst->list[j].is_master;
2168 ha->hdr[j].is_parity = alst->list[j].is_parity;
2169 ha->hdr[j].is_hotfix = alst->list[j].is_hotfix;
2170 ha->hdr[j].master_no = alst->list[j].cd_handle;
2172 } else if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,
2173 ARRAY_DRV_LIST | LA_CTRL_PATTERN,
2174 0, 35 * sizeof(gdth_alist_str))) {
2175 for (j = 0; j < 35; ++j) {
2176 alst2 = &((gdth_alist_str *)ha->pscratch)[j];
2177 ha->hdr[j].is_arraydrv = alst2->is_arrayd;
2178 ha->hdr[j].is_master = alst2->is_master;
2179 ha->hdr[j].is_parity = alst2->is_parity;
2180 ha->hdr[j].is_hotfix = alst2->is_hotfix;
2181 ha->hdr[j].master_no = alst2->cd_handle;
2187 /* initialize raw service */
2188 ha->raw_feat = 0;
2189 if (!force_dma32) {
2190 ok = gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_X_INIT_RAW,0,0,0);
2191 if (ok)
2192 ha->raw_feat = GDT_64BIT;
2194 if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC))
2195 ok = gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_INIT,0,0,0);
2196 if (!ok) {
2197 printk("GDT-HA %d: Initialization error raw service (code %d)\n",
2198 hanum, ha->status);
2199 return 0;
2201 TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n"));
2203 /* set/get features raw service (scatter/gather) */
2204 if (gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_SET_FEAT,SCATTER_GATHER,
2205 0,0)) {
2206 TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n"));
2207 if (gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_GET_FEAT,0,0,0)) {
2208 TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n",
2209 ha->info));
2210 ha->raw_feat |= (ushort)ha->info;
2214 /* set/get features cache service (equal to raw service) */
2215 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_SET_FEAT,0,
2216 SCATTER_GATHER,0)) {
2217 TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n"));
2218 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_GET_FEAT,0,0,0)) {
2219 TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n",
2220 ha->info));
2221 ha->cache_feat |= (ushort)ha->info;
2225 /* reserve drives for raw service */
2226 if (reserve_mode != 0) {
2227 gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_RESERVE_ALL,
2228 reserve_mode == 1 ? 1 : 3, 0, 0);
2229 TRACE2(("gdth_search_drives(): RESERVE_ALL code %d\n",
2230 ha->status));
2232 for (i = 0; i < MAX_RES_ARGS; i += 4) {
2233 if (reserve_list[i] == hanum && reserve_list[i+1] < ha->bus_cnt &&
2234 reserve_list[i+2] < ha->tid_cnt && reserve_list[i+3] < MAXLUN) {
2235 TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d lun %d\n",
2236 reserve_list[i], reserve_list[i+1],
2237 reserve_list[i+2], reserve_list[i+3]));
2238 if (!gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_RESERVE,0,
2239 reserve_list[i+1], reserve_list[i+2] |
2240 (reserve_list[i+3] << 8))) {
2241 printk("GDT-HA %d: Error raw service (RESERVE, code %d)\n",
2242 hanum, ha->status);
2247 /* Determine OEM string using IOCTL */
2248 oemstr = (gdth_oem_str_ioctl *)ha->pscratch;
2249 oemstr->params.ctl_version = 0x01;
2250 oemstr->params.buffer_size = sizeof(oemstr->text);
2251 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,
2252 CACHE_READ_OEM_STRING_RECORD,INVALID_CHANNEL,
2253 sizeof(gdth_oem_str_ioctl))) {
2254 TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD OK\n"));
2255 printk("GDT-HA %d: Vendor: %s Name: %s\n",
2256 hanum,oemstr->text.oem_company_name,ha->binfo.type_string);
2257 /* Save the Host Drive inquiry data */
2258 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
2259 strlcpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id,
2260 sizeof(ha->oem_name));
2261 #else
2262 strncpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id,7);
2263 ha->oem_name[7] = '\0';
2264 #endif
2265 } else {
2266 /* Old method, based on PCI ID */
2267 TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD failed\n"));
2268 printk("GDT-HA %d: Name: %s\n",
2269 hanum,ha->binfo.type_string);
2270 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
2271 if (ha->oem_id == OEM_ID_INTEL)
2272 strlcpy(ha->oem_name,"Intel ", sizeof(ha->oem_name));
2273 else
2274 strlcpy(ha->oem_name,"ICP ", sizeof(ha->oem_name));
2275 #else
2276 if (ha->oem_id == OEM_ID_INTEL)
2277 strcpy(ha->oem_name,"Intel ");
2278 else
2279 strcpy(ha->oem_name,"ICP ");
2280 #endif
2283 /* scanning for host drives */
2284 for (i = 0; i < cdev_cnt; ++i)
2285 gdth_analyse_hdrive(hanum,i);
2287 TRACE(("gdth_search_drives() OK\n"));
2288 return 1;
2291 static int gdth_analyse_hdrive(int hanum,ushort hdrive)
2293 register gdth_ha_str *ha;
2294 ulong32 drv_cyls;
2295 int drv_hds, drv_secs;
2297 TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n",hanum,hdrive));
2298 if (hdrive >= MAX_HDRIVES)
2299 return 0;
2300 ha = HADATA(gdth_ctr_tab[hanum]);
2302 if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_INFO,hdrive,0,0))
2303 return 0;
2304 ha->hdr[hdrive].present = TRUE;
2305 ha->hdr[hdrive].size = ha->info;
2307 /* evaluate mapping (sectors per head, heads per cylinder) */
2308 ha->hdr[hdrive].size &= ~SECS32;
2309 if (ha->info2 == 0) {
2310 gdth_eval_mapping(ha->hdr[hdrive].size,&drv_cyls,&drv_hds,&drv_secs);
2311 } else {
2312 drv_hds = ha->info2 & 0xff;
2313 drv_secs = (ha->info2 >> 8) & 0xff;
2314 drv_cyls = (ulong32)ha->hdr[hdrive].size / drv_hds / drv_secs;
2316 ha->hdr[hdrive].heads = (unchar)drv_hds;
2317 ha->hdr[hdrive].secs = (unchar)drv_secs;
2318 /* round size */
2319 ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs;
2321 if (ha->cache_feat & GDT_64BIT) {
2322 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_X_INFO,hdrive,0,0)
2323 && ha->info2 != 0) {
2324 ha->hdr[hdrive].size = ((ulong64)ha->info2 << 32) | ha->info;
2327 TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n",
2328 hdrive,ha->hdr[hdrive].size,drv_hds,drv_secs));
2330 /* get informations about device */
2331 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_DEVTYPE,hdrive,0,0)) {
2332 TRACE2(("gdth_search_dr() cache drive %d devtype %d\n",
2333 hdrive,ha->info));
2334 ha->hdr[hdrive].devtype = (ushort)ha->info;
2337 /* cluster info */
2338 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_CLUST_INFO,hdrive,0,0)) {
2339 TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n",
2340 hdrive,ha->info));
2341 if (!shared_access)
2342 ha->hdr[hdrive].cluster_type = (unchar)ha->info;
2345 /* R/W attributes */
2346 if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_RW_ATTRIBS,hdrive,0,0)) {
2347 TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n",
2348 hdrive,ha->info));
2349 ha->hdr[hdrive].rw_attribs = (unchar)ha->info;
2352 return 1;
2356 /* command queueing/sending functions */
2358 static void gdth_putq(int hanum,Scsi_Cmnd *scp,unchar priority)
2360 register gdth_ha_str *ha;
2361 register Scsi_Cmnd *pscp;
2362 register Scsi_Cmnd *nscp;
2363 ulong flags;
2364 unchar b, t;
2366 TRACE(("gdth_putq() priority %d\n",priority));
2367 ha = HADATA(gdth_ctr_tab[hanum]);
2368 spin_lock_irqsave(&ha->smp_lock, flags);
2370 if (scp->done != gdth_scsi_done) {
2371 scp->SCp.this_residual = (int)priority;
2372 b = virt_ctr ? NUMDATA(scp->device->host)->busnum:scp->device->channel;
2373 t = scp->device->id;
2374 if (priority >= DEFAULT_PRI) {
2375 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
2376 (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
2377 TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
2378 scp->SCp.buffers_residual = gdth_update_timeout(hanum, scp, 0);
2383 if (ha->req_first==NULL) {
2384 ha->req_first = scp; /* queue was empty */
2385 scp->SCp.ptr = NULL;
2386 } else { /* queue not empty */
2387 pscp = ha->req_first;
2388 nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
2389 /* priority: 0-highest,..,0xff-lowest */
2390 while (nscp && (unchar)nscp->SCp.this_residual <= priority) {
2391 pscp = nscp;
2392 nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
2394 pscp->SCp.ptr = (char *)scp;
2395 scp->SCp.ptr = (char *)nscp;
2397 spin_unlock_irqrestore(&ha->smp_lock, flags);
2399 #ifdef GDTH_STATISTICS
2400 flags = 0;
2401 for (nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
2402 ++flags;
2403 if (max_rq < flags) {
2404 max_rq = flags;
2405 TRACE3(("GDT: max_rq = %d\n",(ushort)max_rq));
2407 #endif
2410 static void gdth_next(int hanum)
2412 register gdth_ha_str *ha;
2413 register Scsi_Cmnd *pscp;
2414 register Scsi_Cmnd *nscp;
2415 unchar b, t, l, firsttime;
2416 unchar this_cmd, next_cmd;
2417 ulong flags = 0;
2418 int cmd_index;
2420 TRACE(("gdth_next() hanum %d\n",hanum));
2421 ha = HADATA(gdth_ctr_tab[hanum]);
2422 if (!gdth_polling)
2423 spin_lock_irqsave(&ha->smp_lock, flags);
2425 ha->cmd_cnt = ha->cmd_offs_dpmem = 0;
2426 this_cmd = firsttime = TRUE;
2427 next_cmd = gdth_polling ? FALSE:TRUE;
2428 cmd_index = 0;
2430 for (nscp = pscp = ha->req_first; nscp; nscp = (Scsi_Cmnd *)nscp->SCp.ptr) {
2431 if (nscp != pscp && nscp != (Scsi_Cmnd *)pscp->SCp.ptr)
2432 pscp = (Scsi_Cmnd *)pscp->SCp.ptr;
2433 if (nscp->done != gdth_scsi_done) {
2434 b = virt_ctr ?
2435 NUMDATA(nscp->device->host)->busnum : nscp->device->channel;
2436 t = nscp->device->id;
2437 l = nscp->device->lun;
2438 if (nscp->SCp.this_residual >= DEFAULT_PRI) {
2439 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
2440 (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock))
2441 continue;
2443 } else
2444 b = t = l = 0;
2446 if (firsttime) {
2447 if (gdth_test_busy(hanum)) { /* controller busy ? */
2448 TRACE(("gdth_next() controller %d busy !\n",hanum));
2449 if (!gdth_polling) {
2450 spin_unlock_irqrestore(&ha->smp_lock, flags);
2451 return;
2453 while (gdth_test_busy(hanum))
2454 gdth_delay(1);
2456 firsttime = FALSE;
2459 if (nscp->done != gdth_scsi_done) {
2460 if (nscp->SCp.phase == -1) {
2461 nscp->SCp.phase = CACHESERVICE; /* default: cache svc. */
2462 if (nscp->cmnd[0] == TEST_UNIT_READY) {
2463 TRACE2(("TEST_UNIT_READY Bus %d Id %d LUN %d\n",
2464 b, t, l));
2465 /* TEST_UNIT_READY -> set scan mode */
2466 if ((ha->scan_mode & 0x0f) == 0) {
2467 if (b == 0 && t == 0 && l == 0) {
2468 ha->scan_mode |= 1;
2469 TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
2471 } else if ((ha->scan_mode & 0x0f) == 1) {
2472 if (b == 0 && ((t == 0 && l == 1) ||
2473 (t == 1 && l == 0))) {
2474 nscp->SCp.sent_command = GDT_SCAN_START;
2475 nscp->SCp.phase = ((ha->scan_mode & 0x10 ? 1:0) << 8)
2476 | SCSIRAWSERVICE;
2477 ha->scan_mode = 0x12;
2478 TRACE2(("Scan mode: 0x%x (SCAN_START)\n",
2479 ha->scan_mode));
2480 } else {
2481 ha->scan_mode &= 0x10;
2482 TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
2484 } else if (ha->scan_mode == 0x12) {
2485 if (b == ha->bus_cnt && t == ha->tid_cnt-1) {
2486 nscp->SCp.phase = SCSIRAWSERVICE;
2487 nscp->SCp.sent_command = GDT_SCAN_END;
2488 ha->scan_mode &= 0x10;
2489 TRACE2(("Scan mode: 0x%x (SCAN_END)\n",
2490 ha->scan_mode));
2494 if (b == ha->virt_bus && nscp->cmnd[0] != INQUIRY &&
2495 nscp->cmnd[0] != READ_CAPACITY && nscp->cmnd[0] != MODE_SENSE &&
2496 (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) {
2497 /* always GDT_CLUST_INFO! */
2498 nscp->SCp.sent_command = GDT_CLUST_INFO;
2503 if (nscp->SCp.sent_command != -1) {
2504 if ((nscp->SCp.phase & 0xff) == CACHESERVICE) {
2505 if (!(cmd_index=gdth_fill_cache_cmd(hanum,nscp,t)))
2506 this_cmd = FALSE;
2507 next_cmd = FALSE;
2508 } else if ((nscp->SCp.phase & 0xff) == SCSIRAWSERVICE) {
2509 if (!(cmd_index=gdth_fill_raw_cmd(hanum,nscp,BUS_L2P(ha,b))))
2510 this_cmd = FALSE;
2511 next_cmd = FALSE;
2512 } else {
2513 memset((char*)nscp->sense_buffer,0,16);
2514 nscp->sense_buffer[0] = 0x70;
2515 nscp->sense_buffer[2] = NOT_READY;
2516 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
2517 if (!nscp->SCp.have_data_in)
2518 nscp->SCp.have_data_in++;
2519 else
2520 nscp->scsi_done(nscp);
2522 } else if (nscp->done == gdth_scsi_done) {
2523 if (!(cmd_index=gdth_special_cmd(hanum,nscp)))
2524 this_cmd = FALSE;
2525 next_cmd = FALSE;
2526 } else if (b != ha->virt_bus) {
2527 if (ha->raw[BUS_L2P(ha,b)].io_cnt[t] >= GDTH_MAX_RAW ||
2528 !(cmd_index=gdth_fill_raw_cmd(hanum,nscp,BUS_L2P(ha,b))))
2529 this_cmd = FALSE;
2530 else
2531 ha->raw[BUS_L2P(ha,b)].io_cnt[t]++;
2532 } else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) {
2533 TRACE2(("Command 0x%x to bus %d id %d lun %d -> IGNORE\n",
2534 nscp->cmnd[0], b, t, l));
2535 nscp->result = DID_BAD_TARGET << 16;
2536 if (!nscp->SCp.have_data_in)
2537 nscp->SCp.have_data_in++;
2538 else
2539 nscp->scsi_done(nscp);
2540 } else {
2541 switch (nscp->cmnd[0]) {
2542 case TEST_UNIT_READY:
2543 case INQUIRY:
2544 case REQUEST_SENSE:
2545 case READ_CAPACITY:
2546 case VERIFY:
2547 case START_STOP:
2548 case MODE_SENSE:
2549 case SERVICE_ACTION_IN:
2550 TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
2551 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
2552 nscp->cmnd[4],nscp->cmnd[5]));
2553 if (ha->hdr[t].media_changed && nscp->cmnd[0] != INQUIRY) {
2554 /* return UNIT_ATTENTION */
2555 TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
2556 nscp->cmnd[0], t));
2557 ha->hdr[t].media_changed = FALSE;
2558 memset((char*)nscp->sense_buffer,0,16);
2559 nscp->sense_buffer[0] = 0x70;
2560 nscp->sense_buffer[2] = UNIT_ATTENTION;
2561 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
2562 if (!nscp->SCp.have_data_in)
2563 nscp->SCp.have_data_in++;
2564 else
2565 nscp->scsi_done(nscp);
2566 } else if (gdth_internal_cache_cmd(hanum,nscp))
2567 nscp->scsi_done(nscp);
2568 break;
2570 case ALLOW_MEDIUM_REMOVAL:
2571 TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
2572 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
2573 nscp->cmnd[4],nscp->cmnd[5]));
2574 if ( (nscp->cmnd[4]&1) && !(ha->hdr[t].devtype&1) ) {
2575 TRACE(("Prevent r. nonremov. drive->do nothing\n"));
2576 nscp->result = DID_OK << 16;
2577 nscp->sense_buffer[0] = 0;
2578 if (!nscp->SCp.have_data_in)
2579 nscp->SCp.have_data_in++;
2580 else
2581 nscp->scsi_done(nscp);
2582 } else {
2583 nscp->cmnd[3] = (ha->hdr[t].devtype&1) ? 1:0;
2584 TRACE(("Prevent/allow r. %d rem. drive %d\n",
2585 nscp->cmnd[4],nscp->cmnd[3]));
2586 if (!(cmd_index=gdth_fill_cache_cmd(hanum,nscp,t)))
2587 this_cmd = FALSE;
2589 break;
2591 case RESERVE:
2592 case RELEASE:
2593 TRACE2(("cache cmd %s\n",nscp->cmnd[0] == RESERVE ?
2594 "RESERVE" : "RELEASE"));
2595 if (!(cmd_index=gdth_fill_cache_cmd(hanum,nscp,t)))
2596 this_cmd = FALSE;
2597 break;
2599 case READ_6:
2600 case WRITE_6:
2601 case READ_10:
2602 case WRITE_10:
2603 case READ_16:
2604 case WRITE_16:
2605 if (ha->hdr[t].media_changed) {
2606 /* return UNIT_ATTENTION */
2607 TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
2608 nscp->cmnd[0], t));
2609 ha->hdr[t].media_changed = FALSE;
2610 memset((char*)nscp->sense_buffer,0,16);
2611 nscp->sense_buffer[0] = 0x70;
2612 nscp->sense_buffer[2] = UNIT_ATTENTION;
2613 nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
2614 if (!nscp->SCp.have_data_in)
2615 nscp->SCp.have_data_in++;
2616 else
2617 nscp->scsi_done(nscp);
2618 } else if (!(cmd_index=gdth_fill_cache_cmd(hanum,nscp,t)))
2619 this_cmd = FALSE;
2620 break;
2622 default:
2623 TRACE2(("cache cmd %x/%x/%x/%x/%x/%x unknown\n",nscp->cmnd[0],
2624 nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
2625 nscp->cmnd[4],nscp->cmnd[5]));
2626 printk("GDT-HA %d: Unknown SCSI command 0x%x to cache service !\n",
2627 hanum, nscp->cmnd[0]);
2628 nscp->result = DID_ABORT << 16;
2629 if (!nscp->SCp.have_data_in)
2630 nscp->SCp.have_data_in++;
2631 else
2632 nscp->scsi_done(nscp);
2633 break;
2637 if (!this_cmd)
2638 break;
2639 if (nscp == ha->req_first)
2640 ha->req_first = pscp = (Scsi_Cmnd *)nscp->SCp.ptr;
2641 else
2642 pscp->SCp.ptr = nscp->SCp.ptr;
2643 if (!next_cmd)
2644 break;
2647 if (ha->cmd_cnt > 0) {
2648 gdth_release_event(hanum);
2651 if (!gdth_polling)
2652 spin_unlock_irqrestore(&ha->smp_lock, flags);
2654 if (gdth_polling && ha->cmd_cnt > 0) {
2655 if (!gdth_wait(hanum,cmd_index,POLL_TIMEOUT))
2656 printk("GDT-HA %d: Command %d timed out !\n",
2657 hanum,cmd_index);
2661 static void gdth_copy_internal_data(int hanum,Scsi_Cmnd *scp,
2662 char *buffer,ushort count)
2664 ushort cpcount,i;
2665 ushort cpsum,cpnow;
2666 struct scatterlist *sl;
2667 gdth_ha_str *ha;
2668 char *address;
2670 cpcount = count<=(ushort)scp->request_bufflen ? count:(ushort)scp->request_bufflen;
2671 ha = HADATA(gdth_ctr_tab[hanum]);
2673 if (scp->use_sg) {
2674 sl = (struct scatterlist *)scp->request_buffer;
2675 for (i=0,cpsum=0; i<scp->use_sg; ++i,++sl) {
2676 unsigned long flags;
2677 cpnow = (ushort)sl->length;
2678 TRACE(("copy_internal() now %d sum %d count %d %d\n",
2679 cpnow,cpsum,cpcount,(ushort)scp->bufflen));
2680 if (cpsum+cpnow > cpcount)
2681 cpnow = cpcount - cpsum;
2682 cpsum += cpnow;
2683 if (!sl->page) {
2684 printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n",
2685 hanum);
2686 return;
2688 local_irq_save(flags);
2689 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
2690 address = kmap_atomic(sl->page, KM_BIO_SRC_IRQ) + sl->offset;
2691 memcpy(address,buffer,cpnow);
2692 flush_dcache_page(sl->page);
2693 kunmap_atomic(address, KM_BIO_SRC_IRQ);
2694 #else
2695 address = kmap_atomic(sl->page, KM_BH_IRQ) + sl->offset;
2696 memcpy(address,buffer,cpnow);
2697 flush_dcache_page(sl->page);
2698 kunmap_atomic(address, KM_BH_IRQ);
2699 #endif
2700 local_irq_restore(flags);
2701 if (cpsum == cpcount)
2702 break;
2703 buffer += cpnow;
2705 } else {
2706 TRACE(("copy_internal() count %d\n",cpcount));
2707 memcpy((char*)scp->request_buffer,buffer,cpcount);
2711 static int gdth_internal_cache_cmd(int hanum,Scsi_Cmnd *scp)
2713 register gdth_ha_str *ha;
2714 unchar t;
2715 gdth_inq_data inq;
2716 gdth_rdcap_data rdc;
2717 gdth_sense_data sd;
2718 gdth_modep_data mpd;
2720 ha = HADATA(gdth_ctr_tab[hanum]);
2721 t = scp->device->id;
2722 TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n",
2723 scp->cmnd[0],t));
2725 scp->result = DID_OK << 16;
2726 scp->sense_buffer[0] = 0;
2728 switch (scp->cmnd[0]) {
2729 case TEST_UNIT_READY:
2730 case VERIFY:
2731 case START_STOP:
2732 TRACE2(("Test/Verify/Start hdrive %d\n",t));
2733 break;
2735 case INQUIRY:
2736 TRACE2(("Inquiry hdrive %d devtype %d\n",
2737 t,ha->hdr[t].devtype));
2738 inq.type_qual = (ha->hdr[t].devtype&4) ? TYPE_ROM:TYPE_DISK;
2739 /* you can here set all disks to removable, if you want to do
2740 a flush using the ALLOW_MEDIUM_REMOVAL command */
2741 inq.modif_rmb = 0x00;
2742 if ((ha->hdr[t].devtype & 1) ||
2743 (ha->hdr[t].cluster_type & CLUSTER_DRIVE))
2744 inq.modif_rmb = 0x80;
2745 inq.version = 2;
2746 inq.resp_aenc = 2;
2747 inq.add_length= 32;
2748 strcpy(inq.vendor,ha->oem_name);
2749 sprintf(inq.product,"Host Drive #%02d",t);
2750 strcpy(inq.revision," ");
2751 gdth_copy_internal_data(hanum,scp,(char*)&inq,sizeof(gdth_inq_data));
2752 break;
2754 case REQUEST_SENSE:
2755 TRACE2(("Request sense hdrive %d\n",t));
2756 sd.errorcode = 0x70;
2757 sd.segno = 0x00;
2758 sd.key = NO_SENSE;
2759 sd.info = 0;
2760 sd.add_length= 0;
2761 gdth_copy_internal_data(hanum,scp,(char*)&sd,sizeof(gdth_sense_data));
2762 break;
2764 case MODE_SENSE:
2765 TRACE2(("Mode sense hdrive %d\n",t));
2766 memset((char*)&mpd,0,sizeof(gdth_modep_data));
2767 mpd.hd.data_length = sizeof(gdth_modep_data);
2768 mpd.hd.dev_par = (ha->hdr[t].devtype&2) ? 0x80:0;
2769 mpd.hd.bd_length = sizeof(mpd.bd);
2770 mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
2771 mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
2772 mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
2773 gdth_copy_internal_data(hanum,scp,(char*)&mpd,sizeof(gdth_modep_data));
2774 break;
2776 case READ_CAPACITY:
2777 TRACE2(("Read capacity hdrive %d\n",t));
2778 if (ha->hdr[t].size > (ulong64)0xffffffff)
2779 rdc.last_block_no = 0xffffffff;
2780 else
2781 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
2782 rdc.block_length = cpu_to_be32(SECTOR_SIZE);
2783 gdth_copy_internal_data(hanum,scp,(char*)&rdc,sizeof(gdth_rdcap_data));
2784 break;
2786 case SERVICE_ACTION_IN:
2787 if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 &&
2788 (ha->cache_feat & GDT_64BIT)) {
2789 gdth_rdcap16_data rdc16;
2791 TRACE2(("Read capacity (16) hdrive %d\n",t));
2792 rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
2793 rdc16.block_length = cpu_to_be32(SECTOR_SIZE);
2794 gdth_copy_internal_data(hanum,scp,(char*)&rdc16,sizeof(gdth_rdcap16_data));
2795 } else {
2796 scp->result = DID_ABORT << 16;
2798 break;
2800 default:
2801 TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0]));
2802 break;
2805 if (!scp->SCp.have_data_in)
2806 scp->SCp.have_data_in++;
2807 else
2808 return 1;
2810 return 0;
2813 static int gdth_fill_cache_cmd(int hanum,Scsi_Cmnd *scp,ushort hdrive)
2815 register gdth_ha_str *ha;
2816 register gdth_cmd_str *cmdp;
2817 struct scatterlist *sl;
2818 ulong32 cnt, blockcnt;
2819 ulong64 no, blockno;
2820 dma_addr_t phys_addr;
2821 int i, cmd_index, read_write, sgcnt, mode64;
2822 struct page *page;
2823 ulong offset;
2825 ha = HADATA(gdth_ctr_tab[hanum]);
2826 cmdp = ha->pccb;
2827 TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
2828 scp->cmnd[0],scp->cmd_len,hdrive));
2830 if (ha->type==GDT_EISA && ha->cmd_cnt>0)
2831 return 0;
2833 mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE;
2834 /* test for READ_16, WRITE_16 if !mode64 ? ---
2835 not required, should not occur due to error return on
2836 READ_CAPACITY_16 */
2838 cmdp->Service = CACHESERVICE;
2839 cmdp->RequestBuffer = scp;
2840 /* search free command index */
2841 if (!(cmd_index=gdth_get_cmd_index(hanum))) {
2842 TRACE(("GDT: No free command index found\n"));
2843 return 0;
2845 /* if it's the first command, set command semaphore */
2846 if (ha->cmd_cnt == 0)
2847 gdth_set_sema0(hanum);
2849 /* fill command */
2850 read_write = 0;
2851 if (scp->SCp.sent_command != -1)
2852 cmdp->OpCode = scp->SCp.sent_command; /* special cache cmd. */
2853 else if (scp->cmnd[0] == RESERVE)
2854 cmdp->OpCode = GDT_RESERVE_DRV;
2855 else if (scp->cmnd[0] == RELEASE)
2856 cmdp->OpCode = GDT_RELEASE_DRV;
2857 else if (scp->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
2858 if (scp->cmnd[4] & 1) /* prevent ? */
2859 cmdp->OpCode = GDT_MOUNT;
2860 else if (scp->cmnd[3] & 1) /* removable drive ? */
2861 cmdp->OpCode = GDT_UNMOUNT;
2862 else
2863 cmdp->OpCode = GDT_FLUSH;
2864 } else if (scp->cmnd[0] == WRITE_6 || scp->cmnd[0] == WRITE_10 ||
2865 scp->cmnd[0] == WRITE_12 || scp->cmnd[0] == WRITE_16
2867 read_write = 1;
2868 if (gdth_write_through || ((ha->hdr[hdrive].rw_attribs & 1) &&
2869 (ha->cache_feat & GDT_WR_THROUGH)))
2870 cmdp->OpCode = GDT_WRITE_THR;
2871 else
2872 cmdp->OpCode = GDT_WRITE;
2873 } else {
2874 read_write = 2;
2875 cmdp->OpCode = GDT_READ;
2878 cmdp->BoardNode = LOCALBOARD;
2879 if (mode64) {
2880 cmdp->u.cache64.DeviceNo = hdrive;
2881 cmdp->u.cache64.BlockNo = 1;
2882 cmdp->u.cache64.sg_canz = 0;
2883 } else {
2884 cmdp->u.cache.DeviceNo = hdrive;
2885 cmdp->u.cache.BlockNo = 1;
2886 cmdp->u.cache.sg_canz = 0;
2889 if (read_write) {
2890 if (scp->cmd_len == 16) {
2891 memcpy(&no, &scp->cmnd[2], sizeof(ulong64));
2892 blockno = be64_to_cpu(no);
2893 memcpy(&cnt, &scp->cmnd[10], sizeof(ulong32));
2894 blockcnt = be32_to_cpu(cnt);
2895 } else if (scp->cmd_len == 10) {
2896 memcpy(&no, &scp->cmnd[2], sizeof(ulong32));
2897 blockno = be32_to_cpu(no);
2898 memcpy(&cnt, &scp->cmnd[7], sizeof(ushort));
2899 blockcnt = be16_to_cpu(cnt);
2900 } else {
2901 memcpy(&no, &scp->cmnd[0], sizeof(ulong32));
2902 blockno = be32_to_cpu(no) & 0x001fffffUL;
2903 blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
2905 if (mode64) {
2906 cmdp->u.cache64.BlockNo = blockno;
2907 cmdp->u.cache64.BlockCnt = blockcnt;
2908 } else {
2909 cmdp->u.cache.BlockNo = (ulong32)blockno;
2910 cmdp->u.cache.BlockCnt = blockcnt;
2913 if (scp->use_sg) {
2914 sl = (struct scatterlist *)scp->request_buffer;
2915 sgcnt = scp->use_sg;
2916 scp->SCp.Status = GDTH_MAP_SG;
2917 scp->SCp.Message = (read_write == 1 ?
2918 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
2919 sgcnt = pci_map_sg(ha->pdev,sl,scp->use_sg,scp->SCp.Message);
2920 if (mode64) {
2921 cmdp->u.cache64.DestAddr= (ulong64)-1;
2922 cmdp->u.cache64.sg_canz = sgcnt;
2923 for (i=0; i<sgcnt; ++i,++sl) {
2924 cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
2925 #ifdef GDTH_DMA_STATISTICS
2926 if (cmdp->u.cache64.sg_lst[i].sg_ptr > (ulong64)0xffffffff)
2927 ha->dma64_cnt++;
2928 else
2929 ha->dma32_cnt++;
2930 #endif
2931 cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl);
2933 } else {
2934 cmdp->u.cache.DestAddr= 0xffffffff;
2935 cmdp->u.cache.sg_canz = sgcnt;
2936 for (i=0; i<sgcnt; ++i,++sl) {
2937 cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl);
2938 #ifdef GDTH_DMA_STATISTICS
2939 ha->dma32_cnt++;
2940 #endif
2941 cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl);
2945 #ifdef GDTH_STATISTICS
2946 if (max_sg < (ulong32)sgcnt) {
2947 max_sg = (ulong32)sgcnt;
2948 TRACE3(("GDT: max_sg = %d\n",max_sg));
2950 #endif
2952 } else if (scp->request_bufflen) {
2953 scp->SCp.Status = GDTH_MAP_SINGLE;
2954 scp->SCp.Message = (read_write == 1 ?
2955 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
2956 page = virt_to_page(scp->request_buffer);
2957 offset = (ulong)scp->request_buffer & ~PAGE_MASK;
2958 phys_addr = pci_map_page(ha->pdev,page,offset,
2959 scp->request_bufflen,scp->SCp.Message);
2960 scp->SCp.dma_handle = phys_addr;
2961 if (mode64) {
2962 if (ha->cache_feat & SCATTER_GATHER) {
2963 cmdp->u.cache64.DestAddr = (ulong64)-1;
2964 cmdp->u.cache64.sg_canz = 1;
2965 cmdp->u.cache64.sg_lst[0].sg_ptr = phys_addr;
2966 cmdp->u.cache64.sg_lst[0].sg_len = scp->request_bufflen;
2967 cmdp->u.cache64.sg_lst[1].sg_len = 0;
2968 } else {
2969 cmdp->u.cache64.DestAddr = phys_addr;
2970 cmdp->u.cache64.sg_canz= 0;
2972 } else {
2973 if (ha->cache_feat & SCATTER_GATHER) {
2974 cmdp->u.cache.DestAddr = 0xffffffff;
2975 cmdp->u.cache.sg_canz = 1;
2976 cmdp->u.cache.sg_lst[0].sg_ptr = phys_addr;
2977 cmdp->u.cache.sg_lst[0].sg_len = scp->request_bufflen;
2978 cmdp->u.cache.sg_lst[1].sg_len = 0;
2979 } else {
2980 cmdp->u.cache.DestAddr = phys_addr;
2981 cmdp->u.cache.sg_canz= 0;
2986 /* evaluate command size, check space */
2987 if (mode64) {
2988 TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2989 cmdp->u.cache64.DestAddr,cmdp->u.cache64.sg_canz,
2990 cmdp->u.cache64.sg_lst[0].sg_ptr,
2991 cmdp->u.cache64.sg_lst[0].sg_len));
2992 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
2993 cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt));
2994 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) +
2995 (ushort)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
2996 } else {
2997 TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
2998 cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
2999 cmdp->u.cache.sg_lst[0].sg_ptr,
3000 cmdp->u.cache.sg_lst[0].sg_len));
3001 TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
3002 cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
3003 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
3004 (ushort)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
3006 if (ha->cmd_len & 3)
3007 ha->cmd_len += (4 - (ha->cmd_len & 3));
3009 if (ha->cmd_cnt > 0) {
3010 if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
3011 ha->ic_all_size) {
3012 TRACE2(("gdth_fill_cache() DPMEM overflow\n"));
3013 ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
3014 return 0;
3018 /* copy command */
3019 gdth_copy_command(hanum);
3020 return cmd_index;
3023 static int gdth_fill_raw_cmd(int hanum,Scsi_Cmnd *scp,unchar b)
3025 register gdth_ha_str *ha;
3026 register gdth_cmd_str *cmdp;
3027 struct scatterlist *sl;
3028 ushort i;
3029 dma_addr_t phys_addr, sense_paddr;
3030 int cmd_index, sgcnt, mode64;
3031 unchar t,l;
3032 struct page *page;
3033 ulong offset;
3035 ha = HADATA(gdth_ctr_tab[hanum]);
3036 t = scp->device->id;
3037 l = scp->device->lun;
3038 cmdp = ha->pccb;
3039 TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n",
3040 scp->cmnd[0],b,t,l));
3042 if (ha->type==GDT_EISA && ha->cmd_cnt>0)
3043 return 0;
3045 mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE;
3047 cmdp->Service = SCSIRAWSERVICE;
3048 cmdp->RequestBuffer = scp;
3049 /* search free command index */
3050 if (!(cmd_index=gdth_get_cmd_index(hanum))) {
3051 TRACE(("GDT: No free command index found\n"));
3052 return 0;
3054 /* if it's the first command, set command semaphore */
3055 if (ha->cmd_cnt == 0)
3056 gdth_set_sema0(hanum);
3058 /* fill command */
3059 if (scp->SCp.sent_command != -1) {
3060 cmdp->OpCode = scp->SCp.sent_command; /* special raw cmd. */
3061 cmdp->BoardNode = LOCALBOARD;
3062 if (mode64) {
3063 cmdp->u.raw64.direction = (scp->SCp.phase >> 8);
3064 TRACE2(("special raw cmd 0x%x param 0x%x\n",
3065 cmdp->OpCode, cmdp->u.raw64.direction));
3066 /* evaluate command size */
3067 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst);
3068 } else {
3069 cmdp->u.raw.direction = (scp->SCp.phase >> 8);
3070 TRACE2(("special raw cmd 0x%x param 0x%x\n",
3071 cmdp->OpCode, cmdp->u.raw.direction));
3072 /* evaluate command size */
3073 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst);
3076 } else {
3077 page = virt_to_page(scp->sense_buffer);
3078 offset = (ulong)scp->sense_buffer & ~PAGE_MASK;
3079 sense_paddr = pci_map_page(ha->pdev,page,offset,
3080 16,PCI_DMA_FROMDEVICE);
3081 *(ulong32 *)&scp->SCp.buffer = (ulong32)sense_paddr;
3082 /* high part, if 64bit */
3083 *(ulong32 *)&scp->host_scribble = (ulong32)((ulong64)sense_paddr >> 32);
3084 cmdp->OpCode = GDT_WRITE; /* always */
3085 cmdp->BoardNode = LOCALBOARD;
3086 if (mode64) {
3087 cmdp->u.raw64.reserved = 0;
3088 cmdp->u.raw64.mdisc_time = 0;
3089 cmdp->u.raw64.mcon_time = 0;
3090 cmdp->u.raw64.clen = scp->cmd_len;
3091 cmdp->u.raw64.target = t;
3092 cmdp->u.raw64.lun = l;
3093 cmdp->u.raw64.bus = b;
3094 cmdp->u.raw64.priority = 0;
3095 cmdp->u.raw64.sdlen = scp->request_bufflen;
3096 cmdp->u.raw64.sense_len = 16;
3097 cmdp->u.raw64.sense_data = sense_paddr;
3098 cmdp->u.raw64.direction =
3099 gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
3100 memcpy(cmdp->u.raw64.cmd,scp->cmnd,16);
3101 cmdp->u.raw64.sg_ranz = 0;
3102 } else {
3103 cmdp->u.raw.reserved = 0;
3104 cmdp->u.raw.mdisc_time = 0;
3105 cmdp->u.raw.mcon_time = 0;
3106 cmdp->u.raw.clen = scp->cmd_len;
3107 cmdp->u.raw.target = t;
3108 cmdp->u.raw.lun = l;
3109 cmdp->u.raw.bus = b;
3110 cmdp->u.raw.priority = 0;
3111 cmdp->u.raw.link_p = 0;
3112 cmdp->u.raw.sdlen = scp->request_bufflen;
3113 cmdp->u.raw.sense_len = 16;
3114 cmdp->u.raw.sense_data = sense_paddr;
3115 cmdp->u.raw.direction =
3116 gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
3117 memcpy(cmdp->u.raw.cmd,scp->cmnd,12);
3118 cmdp->u.raw.sg_ranz = 0;
3121 if (scp->use_sg) {
3122 sl = (struct scatterlist *)scp->request_buffer;
3123 sgcnt = scp->use_sg;
3124 scp->SCp.Status = GDTH_MAP_SG;
3125 scp->SCp.Message = PCI_DMA_BIDIRECTIONAL;
3126 sgcnt = pci_map_sg(ha->pdev,sl,scp->use_sg,scp->SCp.Message);
3127 if (mode64) {
3128 cmdp->u.raw64.sdata = (ulong64)-1;
3129 cmdp->u.raw64.sg_ranz = sgcnt;
3130 for (i=0; i<sgcnt; ++i,++sl) {
3131 cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
3132 #ifdef GDTH_DMA_STATISTICS
3133 if (cmdp->u.raw64.sg_lst[i].sg_ptr > (ulong64)0xffffffff)
3134 ha->dma64_cnt++;
3135 else
3136 ha->dma32_cnt++;
3137 #endif
3138 cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl);
3140 } else {
3141 cmdp->u.raw.sdata = 0xffffffff;
3142 cmdp->u.raw.sg_ranz = sgcnt;
3143 for (i=0; i<sgcnt; ++i,++sl) {
3144 cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl);
3145 #ifdef GDTH_DMA_STATISTICS
3146 ha->dma32_cnt++;
3147 #endif
3148 cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl);
3152 #ifdef GDTH_STATISTICS
3153 if (max_sg < sgcnt) {
3154 max_sg = sgcnt;
3155 TRACE3(("GDT: max_sg = %d\n",sgcnt));
3157 #endif
3159 } else if (scp->request_bufflen) {
3160 scp->SCp.Status = GDTH_MAP_SINGLE;
3161 scp->SCp.Message = PCI_DMA_BIDIRECTIONAL;
3162 page = virt_to_page(scp->request_buffer);
3163 offset = (ulong)scp->request_buffer & ~PAGE_MASK;
3164 phys_addr = pci_map_page(ha->pdev,page,offset,
3165 scp->request_bufflen,scp->SCp.Message);
3166 scp->SCp.dma_handle = phys_addr;
3168 if (mode64) {
3169 if (ha->raw_feat & SCATTER_GATHER) {
3170 cmdp->u.raw64.sdata = (ulong64)-1;
3171 cmdp->u.raw64.sg_ranz= 1;
3172 cmdp->u.raw64.sg_lst[0].sg_ptr = phys_addr;
3173 cmdp->u.raw64.sg_lst[0].sg_len = scp->request_bufflen;
3174 cmdp->u.raw64.sg_lst[1].sg_len = 0;
3175 } else {
3176 cmdp->u.raw64.sdata = phys_addr;
3177 cmdp->u.raw64.sg_ranz= 0;
3179 } else {
3180 if (ha->raw_feat & SCATTER_GATHER) {
3181 cmdp->u.raw.sdata = 0xffffffff;
3182 cmdp->u.raw.sg_ranz= 1;
3183 cmdp->u.raw.sg_lst[0].sg_ptr = phys_addr;
3184 cmdp->u.raw.sg_lst[0].sg_len = scp->request_bufflen;
3185 cmdp->u.raw.sg_lst[1].sg_len = 0;
3186 } else {
3187 cmdp->u.raw.sdata = phys_addr;
3188 cmdp->u.raw.sg_ranz= 0;
3192 if (mode64) {
3193 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
3194 cmdp->u.raw64.sdata,cmdp->u.raw64.sg_ranz,
3195 cmdp->u.raw64.sg_lst[0].sg_ptr,
3196 cmdp->u.raw64.sg_lst[0].sg_len));
3197 /* evaluate command size */
3198 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) +
3199 (ushort)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
3200 } else {
3201 TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
3202 cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
3203 cmdp->u.raw.sg_lst[0].sg_ptr,
3204 cmdp->u.raw.sg_lst[0].sg_len));
3205 /* evaluate command size */
3206 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
3207 (ushort)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
3210 /* check space */
3211 if (ha->cmd_len & 3)
3212 ha->cmd_len += (4 - (ha->cmd_len & 3));
3214 if (ha->cmd_cnt > 0) {
3215 if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
3216 ha->ic_all_size) {
3217 TRACE2(("gdth_fill_raw() DPMEM overflow\n"));
3218 ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
3219 return 0;
3223 /* copy command */
3224 gdth_copy_command(hanum);
3225 return cmd_index;
3228 static int gdth_special_cmd(int hanum,Scsi_Cmnd *scp)
3230 register gdth_ha_str *ha;
3231 register gdth_cmd_str *cmdp;
3232 int cmd_index;
3234 ha = HADATA(gdth_ctr_tab[hanum]);
3235 cmdp= ha->pccb;
3236 TRACE2(("gdth_special_cmd(): "));
3238 if (ha->type==GDT_EISA && ha->cmd_cnt>0)
3239 return 0;
3241 memcpy( cmdp, scp->request_buffer, sizeof(gdth_cmd_str));
3242 cmdp->RequestBuffer = scp;
3244 /* search free command index */
3245 if (!(cmd_index=gdth_get_cmd_index(hanum))) {
3246 TRACE(("GDT: No free command index found\n"));
3247 return 0;
3250 /* if it's the first command, set command semaphore */
3251 if (ha->cmd_cnt == 0)
3252 gdth_set_sema0(hanum);
3254 /* evaluate command size, check space */
3255 if (cmdp->OpCode == GDT_IOCTL) {
3256 TRACE2(("IOCTL\n"));
3257 ha->cmd_len =
3258 GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(ulong64);
3259 } else if (cmdp->Service == CACHESERVICE) {
3260 TRACE2(("cache command %d\n",cmdp->OpCode));
3261 if (ha->cache_feat & GDT_64BIT)
3262 ha->cmd_len =
3263 GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + sizeof(gdth_sg64_str);
3264 else
3265 ha->cmd_len =
3266 GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str);
3267 } else if (cmdp->Service == SCSIRAWSERVICE) {
3268 TRACE2(("raw command %d\n",cmdp->OpCode));
3269 if (ha->raw_feat & GDT_64BIT)
3270 ha->cmd_len =
3271 GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + sizeof(gdth_sg64_str);
3272 else
3273 ha->cmd_len =
3274 GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str);
3277 if (ha->cmd_len & 3)
3278 ha->cmd_len += (4 - (ha->cmd_len & 3));
3280 if (ha->cmd_cnt > 0) {
3281 if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
3282 ha->ic_all_size) {
3283 TRACE2(("gdth_special_cmd() DPMEM overflow\n"));
3284 ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
3285 return 0;
3289 /* copy command */
3290 gdth_copy_command(hanum);
3291 return cmd_index;
3295 /* Controller event handling functions */
3296 static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source,
3297 ushort idx, gdth_evt_data *evt)
3299 gdth_evt_str *e;
3300 struct timeval tv;
3302 /* no GDTH_LOCK_HA() ! */
3303 TRACE2(("gdth_store_event() source %d idx %d\n", source, idx));
3304 if (source == 0) /* no source -> no event */
3305 return NULL;
3307 if (ebuffer[elastidx].event_source == source &&
3308 ebuffer[elastidx].event_idx == idx &&
3309 ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
3310 !memcmp((char *)&ebuffer[elastidx].event_data.eu,
3311 (char *)&evt->eu, evt->size)) ||
3312 (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
3313 !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
3314 (char *)&evt->event_string)))) {
3315 e = &ebuffer[elastidx];
3316 do_gettimeofday(&tv);
3317 e->last_stamp = tv.tv_sec;
3318 ++e->same_count;
3319 } else {
3320 if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
3321 ++elastidx;
3322 if (elastidx == MAX_EVENTS)
3323 elastidx = 0;
3324 if (elastidx == eoldidx) { /* reached mark ? */
3325 ++eoldidx;
3326 if (eoldidx == MAX_EVENTS)
3327 eoldidx = 0;
3330 e = &ebuffer[elastidx];
3331 e->event_source = source;
3332 e->event_idx = idx;
3333 do_gettimeofday(&tv);
3334 e->first_stamp = e->last_stamp = tv.tv_sec;
3335 e->same_count = 1;
3336 e->event_data = *evt;
3337 e->application = 0;
3339 return e;
3342 static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
3344 gdth_evt_str *e;
3345 int eindex;
3346 ulong flags;
3348 TRACE2(("gdth_read_event() handle %d\n", handle));
3349 spin_lock_irqsave(&ha->smp_lock, flags);
3350 if (handle == -1)
3351 eindex = eoldidx;
3352 else
3353 eindex = handle;
3354 estr->event_source = 0;
3356 if (eindex >= MAX_EVENTS) {
3357 spin_unlock_irqrestore(&ha->smp_lock, flags);
3358 return eindex;
3360 e = &ebuffer[eindex];
3361 if (e->event_source != 0) {
3362 if (eindex != elastidx) {
3363 if (++eindex == MAX_EVENTS)
3364 eindex = 0;
3365 } else {
3366 eindex = -1;
3368 memcpy(estr, e, sizeof(gdth_evt_str));
3370 spin_unlock_irqrestore(&ha->smp_lock, flags);
3371 return eindex;
3374 static void gdth_readapp_event(gdth_ha_str *ha,
3375 unchar application, gdth_evt_str *estr)
3377 gdth_evt_str *e;
3378 int eindex;
3379 ulong flags;
3380 unchar found = FALSE;
3382 TRACE2(("gdth_readapp_event() app. %d\n", application));
3383 spin_lock_irqsave(&ha->smp_lock, flags);
3384 eindex = eoldidx;
3385 for (;;) {
3386 e = &ebuffer[eindex];
3387 if (e->event_source == 0)
3388 break;
3389 if ((e->application & application) == 0) {
3390 e->application |= application;
3391 found = TRUE;
3392 break;
3394 if (eindex == elastidx)
3395 break;
3396 if (++eindex == MAX_EVENTS)
3397 eindex = 0;
3399 if (found)
3400 memcpy(estr, e, sizeof(gdth_evt_str));
3401 else
3402 estr->event_source = 0;
3403 spin_unlock_irqrestore(&ha->smp_lock, flags);
3406 static void gdth_clear_events(void)
3408 TRACE(("gdth_clear_events()"));
3410 eoldidx = elastidx = 0;
3411 ebuffer[0].event_source = 0;
3415 /* SCSI interface functions */
3417 static irqreturn_t gdth_interrupt(int irq,void *dev_id)
3419 gdth_ha_str *ha2 = (gdth_ha_str *)dev_id;
3420 register gdth_ha_str *ha;
3421 gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
3422 gdt6_dpram_str __iomem *dp6_ptr;
3423 gdt2_dpram_str __iomem *dp2_ptr;
3424 Scsi_Cmnd *scp;
3425 int hanum, rval, i;
3426 unchar IStatus;
3427 ushort Service;
3428 ulong flags = 0;
3429 #ifdef INT_COAL
3430 int coalesced = FALSE;
3431 int next = FALSE;
3432 gdth_coal_status *pcs = NULL;
3433 int act_int_coal = 0;
3434 #endif
3436 TRACE(("gdth_interrupt() IRQ %d\n",irq));
3438 /* if polling and not from gdth_wait() -> return */
3439 if (gdth_polling) {
3440 if (!gdth_from_wait) {
3441 return IRQ_HANDLED;
3445 if (!gdth_polling)
3446 spin_lock_irqsave(&ha2->smp_lock, flags);
3447 wait_index = 0;
3449 /* search controller */
3450 if ((hanum = gdth_get_status(&IStatus,irq)) == -1) {
3451 /* spurious interrupt */
3452 if (!gdth_polling)
3453 spin_unlock_irqrestore(&ha2->smp_lock, flags);
3454 return IRQ_HANDLED;
3456 ha = HADATA(gdth_ctr_tab[hanum]);
3458 #ifdef GDTH_STATISTICS
3459 ++act_ints;
3460 #endif
3462 #ifdef INT_COAL
3463 /* See if the fw is returning coalesced status */
3464 if (IStatus == COALINDEX) {
3465 /* Coalesced status. Setup the initial status
3466 buffer pointer and flags */
3467 pcs = ha->coal_stat;
3468 coalesced = TRUE;
3469 next = TRUE;
3472 do {
3473 if (coalesced) {
3474 /* For coalesced requests all status
3475 information is found in the status buffer */
3476 IStatus = (unchar)(pcs->status & 0xff);
3478 #endif
3480 if (ha->type == GDT_EISA) {
3481 if (IStatus & 0x80) { /* error flag */
3482 IStatus &= ~0x80;
3483 ha->status = inw(ha->bmic + MAILBOXREG+8);
3484 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
3485 } else /* no error */
3486 ha->status = S_OK;
3487 ha->info = inl(ha->bmic + MAILBOXREG+12);
3488 ha->service = inw(ha->bmic + MAILBOXREG+10);
3489 ha->info2 = inl(ha->bmic + MAILBOXREG+4);
3491 outb(0xff, ha->bmic + EDOORREG); /* acknowledge interrupt */
3492 outb(0x00, ha->bmic + SEMA1REG); /* reset status semaphore */
3493 } else if (ha->type == GDT_ISA) {
3494 dp2_ptr = ha->brd;
3495 if (IStatus & 0x80) { /* error flag */
3496 IStatus &= ~0x80;
3497 ha->status = gdth_readw(&dp2_ptr->u.ic.Status);
3498 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
3499 } else /* no error */
3500 ha->status = S_OK;
3501 ha->info = gdth_readl(&dp2_ptr->u.ic.Info[0]);
3502 ha->service = gdth_readw(&dp2_ptr->u.ic.Service);
3503 ha->info2 = gdth_readl(&dp2_ptr->u.ic.Info[1]);
3505 gdth_writeb(0xff, &dp2_ptr->io.irqdel); /* acknowledge interrupt */
3506 gdth_writeb(0, &dp2_ptr->u.ic.Cmd_Index);/* reset command index */
3507 gdth_writeb(0, &dp2_ptr->io.Sema1); /* reset status semaphore */
3508 } else if (ha->type == GDT_PCI) {
3509 dp6_ptr = ha->brd;
3510 if (IStatus & 0x80) { /* error flag */
3511 IStatus &= ~0x80;
3512 ha->status = gdth_readw(&dp6_ptr->u.ic.Status);
3513 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
3514 } else /* no error */
3515 ha->status = S_OK;
3516 ha->info = gdth_readl(&dp6_ptr->u.ic.Info[0]);
3517 ha->service = gdth_readw(&dp6_ptr->u.ic.Service);
3518 ha->info2 = gdth_readl(&dp6_ptr->u.ic.Info[1]);
3520 gdth_writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */
3521 gdth_writeb(0, &dp6_ptr->u.ic.Cmd_Index);/* reset command index */
3522 gdth_writeb(0, &dp6_ptr->io.Sema1); /* reset status semaphore */
3523 } else if (ha->type == GDT_PCINEW) {
3524 if (IStatus & 0x80) { /* error flag */
3525 IStatus &= ~0x80;
3526 ha->status = inw(PTR2USHORT(&ha->plx->status));
3527 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
3528 } else
3529 ha->status = S_OK;
3530 ha->info = inl(PTR2USHORT(&ha->plx->info[0]));
3531 ha->service = inw(PTR2USHORT(&ha->plx->service));
3532 ha->info2 = inl(PTR2USHORT(&ha->plx->info[1]));
3534 outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
3535 outb(0x00, PTR2USHORT(&ha->plx->sema1_reg));
3536 } else if (ha->type == GDT_PCIMPR) {
3537 dp6m_ptr = ha->brd;
3538 if (IStatus & 0x80) { /* error flag */
3539 IStatus &= ~0x80;
3540 #ifdef INT_COAL
3541 if (coalesced)
3542 ha->status = pcs->ext_status & 0xffff;
3543 else
3544 #endif
3545 ha->status = gdth_readw(&dp6m_ptr->i960r.status);
3546 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
3547 } else /* no error */
3548 ha->status = S_OK;
3549 #ifdef INT_COAL
3550 /* get information */
3551 if (coalesced) {
3552 ha->info = pcs->info0;
3553 ha->info2 = pcs->info1;
3554 ha->service = (pcs->ext_status >> 16) & 0xffff;
3555 } else
3556 #endif
3558 ha->info = gdth_readl(&dp6m_ptr->i960r.info[0]);
3559 ha->service = gdth_readw(&dp6m_ptr->i960r.service);
3560 ha->info2 = gdth_readl(&dp6m_ptr->i960r.info[1]);
3562 /* event string */
3563 if (IStatus == ASYNCINDEX) {
3564 if (ha->service != SCREENSERVICE &&
3565 (ha->fw_vers & 0xff) >= 0x1a) {
3566 ha->dvr.severity = gdth_readb
3567 (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.severity);
3568 for (i = 0; i < 256; ++i) {
3569 ha->dvr.event_string[i] = gdth_readb
3570 (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.evt_str[i]);
3571 if (ha->dvr.event_string[i] == 0)
3572 break;
3576 #ifdef INT_COAL
3577 /* Make sure that non coalesced interrupts get cleared
3578 before being handled by gdth_async_event/gdth_sync_event */
3579 if (!coalesced)
3580 #endif
3582 gdth_writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
3583 gdth_writeb(0, &dp6m_ptr->i960r.sema1_reg);
3585 } else {
3586 TRACE2(("gdth_interrupt() unknown controller type\n"));
3587 if (!gdth_polling)
3588 spin_unlock_irqrestore(&ha2->smp_lock, flags);
3589 return IRQ_HANDLED;
3592 TRACE(("gdth_interrupt() index %d stat %d info %d\n",
3593 IStatus,ha->status,ha->info));
3595 if (gdth_from_wait) {
3596 wait_hanum = hanum;
3597 wait_index = (int)IStatus;
3600 if (IStatus == ASYNCINDEX) {
3601 TRACE2(("gdth_interrupt() async. event\n"));
3602 gdth_async_event(hanum);
3603 if (!gdth_polling)
3604 spin_unlock_irqrestore(&ha2->smp_lock, flags);
3605 gdth_next(hanum);
3606 return IRQ_HANDLED;
3609 if (IStatus == SPEZINDEX) {
3610 TRACE2(("Service unknown or not initialized !\n"));
3611 ha->dvr.size = sizeof(ha->dvr.eu.driver);
3612 ha->dvr.eu.driver.ionode = hanum;
3613 gdth_store_event(ha, ES_DRIVER, 4, &ha->dvr);
3614 if (!gdth_polling)
3615 spin_unlock_irqrestore(&ha2->smp_lock, flags);
3616 return IRQ_HANDLED;
3618 scp = ha->cmd_tab[IStatus-2].cmnd;
3619 Service = ha->cmd_tab[IStatus-2].service;
3620 ha->cmd_tab[IStatus-2].cmnd = UNUSED_CMND;
3621 if (scp == UNUSED_CMND) {
3622 TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus));
3623 ha->dvr.size = sizeof(ha->dvr.eu.driver);
3624 ha->dvr.eu.driver.ionode = hanum;
3625 ha->dvr.eu.driver.index = IStatus;
3626 gdth_store_event(ha, ES_DRIVER, 1, &ha->dvr);
3627 if (!gdth_polling)
3628 spin_unlock_irqrestore(&ha2->smp_lock, flags);
3629 return IRQ_HANDLED;
3631 if (scp == INTERNAL_CMND) {
3632 TRACE(("gdth_interrupt() answer to internal command\n"));
3633 if (!gdth_polling)
3634 spin_unlock_irqrestore(&ha2->smp_lock, flags);
3635 return IRQ_HANDLED;
3638 TRACE(("gdth_interrupt() sync. status\n"));
3639 rval = gdth_sync_event(hanum,Service,IStatus,scp);
3640 if (!gdth_polling)
3641 spin_unlock_irqrestore(&ha2->smp_lock, flags);
3642 if (rval == 2) {
3643 gdth_putq(hanum,scp,scp->SCp.this_residual);
3644 } else if (rval == 1) {
3645 scp->scsi_done(scp);
3648 #ifdef INT_COAL
3649 if (coalesced) {
3650 /* go to the next status in the status buffer */
3651 ++pcs;
3652 #ifdef GDTH_STATISTICS
3653 ++act_int_coal;
3654 if (act_int_coal > max_int_coal) {
3655 max_int_coal = act_int_coal;
3656 printk("GDT: max_int_coal = %d\n",(ushort)max_int_coal);
3658 #endif
3659 /* see if there is another status */
3660 if (pcs->status == 0)
3661 /* Stop the coalesce loop */
3662 next = FALSE;
3664 } while (next);
3666 /* coalescing only for new GDT_PCIMPR controllers available */
3667 if (ha->type == GDT_PCIMPR && coalesced) {
3668 gdth_writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
3669 gdth_writeb(0, &dp6m_ptr->i960r.sema1_reg);
3671 #endif
3673 gdth_next(hanum);
3674 return IRQ_HANDLED;
3677 static int gdth_sync_event(int hanum,int service,unchar index,Scsi_Cmnd *scp)
3679 register gdth_ha_str *ha;
3680 gdth_msg_str *msg;
3681 gdth_cmd_str *cmdp;
3682 unchar b, t;
3684 ha = HADATA(gdth_ctr_tab[hanum]);
3685 cmdp = ha->pccb;
3686 TRACE(("gdth_sync_event() serv %d status %d\n",
3687 service,ha->status));
3689 if (service == SCREENSERVICE) {
3690 msg = ha->pmsg;
3691 TRACE(("len: %d, answer: %d, ext: %d, alen: %d\n",
3692 msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen));
3693 if (msg->msg_len > MSGLEN+1)
3694 msg->msg_len = MSGLEN+1;
3695 if (msg->msg_len)
3696 if (!(msg->msg_answer && msg->msg_ext)) {
3697 msg->msg_text[msg->msg_len] = '\0';
3698 printk("%s",msg->msg_text);
3701 if (msg->msg_ext && !msg->msg_answer) {
3702 while (gdth_test_busy(hanum))
3703 gdth_delay(0);
3704 cmdp->Service = SCREENSERVICE;
3705 cmdp->RequestBuffer = SCREEN_CMND;
3706 gdth_get_cmd_index(hanum);
3707 gdth_set_sema0(hanum);
3708 cmdp->OpCode = GDT_READ;
3709 cmdp->BoardNode = LOCALBOARD;
3710 cmdp->u.screen.reserved = 0;
3711 cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
3712 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
3713 ha->cmd_offs_dpmem = 0;
3714 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
3715 + sizeof(ulong64);
3716 ha->cmd_cnt = 0;
3717 gdth_copy_command(hanum);
3718 gdth_release_event(hanum);
3719 return 0;
3722 if (msg->msg_answer && msg->msg_alen) {
3723 /* default answers (getchar() not possible) */
3724 if (msg->msg_alen == 1) {
3725 msg->msg_alen = 0;
3726 msg->msg_len = 1;
3727 msg->msg_text[0] = 0;
3728 } else {
3729 msg->msg_alen -= 2;
3730 msg->msg_len = 2;
3731 msg->msg_text[0] = 1;
3732 msg->msg_text[1] = 0;
3734 msg->msg_ext = 0;
3735 msg->msg_answer = 0;
3736 while (gdth_test_busy(hanum))
3737 gdth_delay(0);
3738 cmdp->Service = SCREENSERVICE;
3739 cmdp->RequestBuffer = SCREEN_CMND;
3740 gdth_get_cmd_index(hanum);
3741 gdth_set_sema0(hanum);
3742 cmdp->OpCode = GDT_WRITE;
3743 cmdp->BoardNode = LOCALBOARD;
3744 cmdp->u.screen.reserved = 0;
3745 cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
3746 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
3747 ha->cmd_offs_dpmem = 0;
3748 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
3749 + sizeof(ulong64);
3750 ha->cmd_cnt = 0;
3751 gdth_copy_command(hanum);
3752 gdth_release_event(hanum);
3753 return 0;
3755 printk("\n");
3757 } else {
3758 b = virt_ctr ? NUMDATA(scp->device->host)->busnum : scp->device->channel;
3759 t = scp->device->id;
3760 if (scp->SCp.sent_command == -1 && b != ha->virt_bus) {
3761 ha->raw[BUS_L2P(ha,b)].io_cnt[t]--;
3763 /* cache or raw service */
3764 if (ha->status == S_BSY) {
3765 TRACE2(("Controller busy -> retry !\n"));
3766 if (scp->SCp.sent_command == GDT_MOUNT)
3767 scp->SCp.sent_command = GDT_CLUST_INFO;
3768 /* retry */
3769 return 2;
3771 if (scp->SCp.Status == GDTH_MAP_SG)
3772 pci_unmap_sg(ha->pdev,scp->request_buffer,
3773 scp->use_sg,scp->SCp.Message);
3774 else if (scp->SCp.Status == GDTH_MAP_SINGLE)
3775 pci_unmap_page(ha->pdev,scp->SCp.dma_handle,
3776 scp->request_bufflen,scp->SCp.Message);
3777 if (scp->SCp.buffer) {
3778 dma_addr_t addr;
3779 addr = (dma_addr_t)*(ulong32 *)&scp->SCp.buffer;
3780 if (scp->host_scribble)
3781 addr += (dma_addr_t)
3782 ((ulong64)(*(ulong32 *)&scp->host_scribble) << 32);
3783 pci_unmap_page(ha->pdev,addr,16,PCI_DMA_FROMDEVICE);
3786 if (ha->status == S_OK) {
3787 scp->SCp.Status = S_OK;
3788 scp->SCp.Message = ha->info;
3789 if (scp->SCp.sent_command != -1) {
3790 TRACE2(("gdth_sync_event(): special cmd 0x%x OK\n",
3791 scp->SCp.sent_command));
3792 /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */
3793 if (scp->SCp.sent_command == GDT_CLUST_INFO) {
3794 ha->hdr[t].cluster_type = (unchar)ha->info;
3795 if (!(ha->hdr[t].cluster_type &
3796 CLUSTER_MOUNTED)) {
3797 /* NOT MOUNTED -> MOUNT */
3798 scp->SCp.sent_command = GDT_MOUNT;
3799 if (ha->hdr[t].cluster_type &
3800 CLUSTER_RESERVED) {
3801 /* cluster drive RESERVED (on the other node) */
3802 scp->SCp.phase = -2; /* reservation conflict */
3804 } else {
3805 scp->SCp.sent_command = -1;
3807 } else {
3808 if (scp->SCp.sent_command == GDT_MOUNT) {
3809 ha->hdr[t].cluster_type |= CLUSTER_MOUNTED;
3810 ha->hdr[t].media_changed = TRUE;
3811 } else if (scp->SCp.sent_command == GDT_UNMOUNT) {
3812 ha->hdr[t].cluster_type &= ~CLUSTER_MOUNTED;
3813 ha->hdr[t].media_changed = TRUE;
3815 scp->SCp.sent_command = -1;
3817 /* retry */
3818 scp->SCp.this_residual = HIGH_PRI;
3819 return 2;
3820 } else {
3821 /* RESERVE/RELEASE ? */
3822 if (scp->cmnd[0] == RESERVE) {
3823 ha->hdr[t].cluster_type |= CLUSTER_RESERVED;
3824 } else if (scp->cmnd[0] == RELEASE) {
3825 ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
3827 scp->result = DID_OK << 16;
3828 scp->sense_buffer[0] = 0;
3830 } else {
3831 scp->SCp.Status = ha->status;
3832 scp->SCp.Message = ha->info;
3834 if (scp->SCp.sent_command != -1) {
3835 TRACE2(("gdth_sync_event(): special cmd 0x%x error 0x%x\n",
3836 scp->SCp.sent_command, ha->status));
3837 if (scp->SCp.sent_command == GDT_SCAN_START ||
3838 scp->SCp.sent_command == GDT_SCAN_END) {
3839 scp->SCp.sent_command = -1;
3840 /* retry */
3841 scp->SCp.this_residual = HIGH_PRI;
3842 return 2;
3844 memset((char*)scp->sense_buffer,0,16);
3845 scp->sense_buffer[0] = 0x70;
3846 scp->sense_buffer[2] = NOT_READY;
3847 scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
3848 } else if (service == CACHESERVICE) {
3849 if (ha->status == S_CACHE_UNKNOWN &&
3850 (ha->hdr[t].cluster_type &
3851 CLUSTER_RESERVE_STATE) == CLUSTER_RESERVE_STATE) {
3852 /* bus reset -> force GDT_CLUST_INFO */
3853 ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
3855 memset((char*)scp->sense_buffer,0,16);
3856 if (ha->status == (ushort)S_CACHE_RESERV) {
3857 scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1);
3858 } else {
3859 scp->sense_buffer[0] = 0x70;
3860 scp->sense_buffer[2] = NOT_READY;
3861 scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
3863 if (scp->done != gdth_scsi_done) {
3864 ha->dvr.size = sizeof(ha->dvr.eu.sync);
3865 ha->dvr.eu.sync.ionode = hanum;
3866 ha->dvr.eu.sync.service = service;
3867 ha->dvr.eu.sync.status = ha->status;
3868 ha->dvr.eu.sync.info = ha->info;
3869 ha->dvr.eu.sync.hostdrive = t;
3870 if (ha->status >= 0x8000)
3871 gdth_store_event(ha, ES_SYNC, 0, &ha->dvr);
3872 else
3873 gdth_store_event(ha, ES_SYNC, service, &ha->dvr);
3875 } else {
3876 /* sense buffer filled from controller firmware (DMA) */
3877 if (ha->status != S_RAW_SCSI || ha->info >= 0x100) {
3878 scp->result = DID_BAD_TARGET << 16;
3879 } else {
3880 scp->result = (DID_OK << 16) | ha->info;
3884 if (!scp->SCp.have_data_in)
3885 scp->SCp.have_data_in++;
3886 else
3887 return 1;
3890 return 0;
3893 static char *async_cache_tab[] = {
3894 /* 0*/ "\011\000\002\002\002\004\002\006\004"
3895 "GDT HA %u, service %u, async. status %u/%lu unknown",
3896 /* 1*/ "\011\000\002\002\002\004\002\006\004"
3897 "GDT HA %u, service %u, async. status %u/%lu unknown",
3898 /* 2*/ "\005\000\002\006\004"
3899 "GDT HA %u, Host Drive %lu not ready",
3900 /* 3*/ "\005\000\002\006\004"
3901 "GDT HA %u, Host Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
3902 /* 4*/ "\005\000\002\006\004"
3903 "GDT HA %u, mirror update on Host Drive %lu failed",
3904 /* 5*/ "\005\000\002\006\004"
3905 "GDT HA %u, Mirror Drive %lu failed",
3906 /* 6*/ "\005\000\002\006\004"
3907 "GDT HA %u, Mirror Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
3908 /* 7*/ "\005\000\002\006\004"
3909 "GDT HA %u, Host Drive %lu write protected",
3910 /* 8*/ "\005\000\002\006\004"
3911 "GDT HA %u, media changed in Host Drive %lu",
3912 /* 9*/ "\005\000\002\006\004"
3913 "GDT HA %u, Host Drive %lu is offline",
3914 /*10*/ "\005\000\002\006\004"
3915 "GDT HA %u, media change of Mirror Drive %lu",
3916 /*11*/ "\005\000\002\006\004"
3917 "GDT HA %u, Mirror Drive %lu is write protected",
3918 /*12*/ "\005\000\002\006\004"
3919 "GDT HA %u, general error on Host Drive %lu. Please check the devices of this drive!",
3920 /*13*/ "\007\000\002\006\002\010\002"
3921 "GDT HA %u, Array Drive %u: Cache Drive %u failed",
3922 /*14*/ "\005\000\002\006\002"
3923 "GDT HA %u, Array Drive %u: FAIL state entered",
3924 /*15*/ "\005\000\002\006\002"
3925 "GDT HA %u, Array Drive %u: error",
3926 /*16*/ "\007\000\002\006\002\010\002"
3927 "GDT HA %u, Array Drive %u: failed drive replaced by Cache Drive %u",
3928 /*17*/ "\005\000\002\006\002"
3929 "GDT HA %u, Array Drive %u: parity build failed",
3930 /*18*/ "\005\000\002\006\002"
3931 "GDT HA %u, Array Drive %u: drive rebuild failed",
3932 /*19*/ "\005\000\002\010\002"
3933 "GDT HA %u, Test of Hot Fix %u failed",
3934 /*20*/ "\005\000\002\006\002"
3935 "GDT HA %u, Array Drive %u: drive build finished successfully",
3936 /*21*/ "\005\000\002\006\002"
3937 "GDT HA %u, Array Drive %u: drive rebuild finished successfully",
3938 /*22*/ "\007\000\002\006\002\010\002"
3939 "GDT HA %u, Array Drive %u: Hot Fix %u activated",
3940 /*23*/ "\005\000\002\006\002"
3941 "GDT HA %u, Host Drive %u: processing of i/o aborted due to serious drive error",
3942 /*24*/ "\005\000\002\010\002"
3943 "GDT HA %u, mirror update on Cache Drive %u completed",
3944 /*25*/ "\005\000\002\010\002"
3945 "GDT HA %u, mirror update on Cache Drive %lu failed",
3946 /*26*/ "\005\000\002\006\002"
3947 "GDT HA %u, Array Drive %u: drive rebuild started",
3948 /*27*/ "\005\000\002\012\001"
3949 "GDT HA %u, Fault bus %u: SHELF OK detected",
3950 /*28*/ "\005\000\002\012\001"
3951 "GDT HA %u, Fault bus %u: SHELF not OK detected",
3952 /*29*/ "\007\000\002\012\001\013\001"
3953 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug started",
3954 /*30*/ "\007\000\002\012\001\013\001"
3955 "GDT HA %u, Fault bus %u, ID %u: new disk detected",
3956 /*31*/ "\007\000\002\012\001\013\001"
3957 "GDT HA %u, Fault bus %u, ID %u: old disk detected",
3958 /*32*/ "\007\000\002\012\001\013\001"
3959 "GDT HA %u, Fault bus %u, ID %u: plugging an active disk is invalid",
3960 /*33*/ "\007\000\002\012\001\013\001"
3961 "GDT HA %u, Fault bus %u, ID %u: invalid device detected",
3962 /*34*/ "\011\000\002\012\001\013\001\006\004"
3963 "GDT HA %u, Fault bus %u, ID %u: insufficient disk capacity (%lu MB required)",
3964 /*35*/ "\007\000\002\012\001\013\001"
3965 "GDT HA %u, Fault bus %u, ID %u: disk write protected",
3966 /*36*/ "\007\000\002\012\001\013\001"
3967 "GDT HA %u, Fault bus %u, ID %u: disk not available",
3968 /*37*/ "\007\000\002\012\001\006\004"
3969 "GDT HA %u, Fault bus %u: swap detected (%lu)",
3970 /*38*/ "\007\000\002\012\001\013\001"
3971 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug finished successfully",
3972 /*39*/ "\007\000\002\012\001\013\001"
3973 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted due to user Hot Plug",
3974 /*40*/ "\007\000\002\012\001\013\001"
3975 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted",
3976 /*41*/ "\007\000\002\012\001\013\001"
3977 "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug for Hot Fix started",
3978 /*42*/ "\005\000\002\006\002"
3979 "GDT HA %u, Array Drive %u: drive build started",
3980 /*43*/ "\003\000\002"
3981 "GDT HA %u, DRAM parity error detected",
3982 /*44*/ "\005\000\002\006\002"
3983 "GDT HA %u, Mirror Drive %u: update started",
3984 /*45*/ "\007\000\002\006\002\010\002"
3985 "GDT HA %u, Mirror Drive %u: Hot Fix %u activated",
3986 /*46*/ "\005\000\002\006\002"
3987 "GDT HA %u, Array Drive %u: no matching Pool Hot Fix Drive available",
3988 /*47*/ "\005\000\002\006\002"
3989 "GDT HA %u, Array Drive %u: Pool Hot Fix Drive available",
3990 /*48*/ "\005\000\002\006\002"
3991 "GDT HA %u, Mirror Drive %u: no matching Pool Hot Fix Drive available",
3992 /*49*/ "\005\000\002\006\002"
3993 "GDT HA %u, Mirror Drive %u: Pool Hot Fix Drive available",
3994 /*50*/ "\007\000\002\012\001\013\001"
3995 "GDT HA %u, SCSI bus %u, ID %u: IGNORE_WIDE_RESIDUE message received",
3996 /*51*/ "\005\000\002\006\002"
3997 "GDT HA %u, Array Drive %u: expand started",
3998 /*52*/ "\005\000\002\006\002"
3999 "GDT HA %u, Array Drive %u: expand finished successfully",
4000 /*53*/ "\005\000\002\006\002"
4001 "GDT HA %u, Array Drive %u: expand failed",
4002 /*54*/ "\003\000\002"
4003 "GDT HA %u, CPU temperature critical",
4004 /*55*/ "\003\000\002"
4005 "GDT HA %u, CPU temperature OK",
4006 /*56*/ "\005\000\002\006\004"
4007 "GDT HA %u, Host drive %lu created",
4008 /*57*/ "\005\000\002\006\002"
4009 "GDT HA %u, Array Drive %u: expand restarted",
4010 /*58*/ "\005\000\002\006\002"
4011 "GDT HA %u, Array Drive %u: expand stopped",
4012 /*59*/ "\005\000\002\010\002"
4013 "GDT HA %u, Mirror Drive %u: drive build quited",
4014 /*60*/ "\005\000\002\006\002"
4015 "GDT HA %u, Array Drive %u: parity build quited",
4016 /*61*/ "\005\000\002\006\002"
4017 "GDT HA %u, Array Drive %u: drive rebuild quited",
4018 /*62*/ "\005\000\002\006\002"
4019 "GDT HA %u, Array Drive %u: parity verify started",
4020 /*63*/ "\005\000\002\006\002"
4021 "GDT HA %u, Array Drive %u: parity verify done",
4022 /*64*/ "\005\000\002\006\002"
4023 "GDT HA %u, Array Drive %u: parity verify failed",
4024 /*65*/ "\005\000\002\006\002"
4025 "GDT HA %u, Array Drive %u: parity error detected",
4026 /*66*/ "\005\000\002\006\002"
4027 "GDT HA %u, Array Drive %u: parity verify quited",
4028 /*67*/ "\005\000\002\006\002"
4029 "GDT HA %u, Host Drive %u reserved",
4030 /*68*/ "\005\000\002\006\002"
4031 "GDT HA %u, Host Drive %u mounted and released",
4032 /*69*/ "\005\000\002\006\002"
4033 "GDT HA %u, Host Drive %u released",
4034 /*70*/ "\003\000\002"
4035 "GDT HA %u, DRAM error detected and corrected with ECC",
4036 /*71*/ "\003\000\002"
4037 "GDT HA %u, Uncorrectable DRAM error detected with ECC",
4038 /*72*/ "\011\000\002\012\001\013\001\014\001"
4039 "GDT HA %u, SCSI bus %u, ID %u, LUN %u: reassigning block",
4040 /*73*/ "\005\000\002\006\002"
4041 "GDT HA %u, Host drive %u resetted locally",
4042 /*74*/ "\005\000\002\006\002"
4043 "GDT HA %u, Host drive %u resetted remotely",
4044 /*75*/ "\003\000\002"
4045 "GDT HA %u, async. status 75 unknown",
4049 static int gdth_async_event(int hanum)
4051 gdth_ha_str *ha;
4052 gdth_cmd_str *cmdp;
4053 int cmd_index;
4055 ha = HADATA(gdth_ctr_tab[hanum]);
4056 cmdp= ha->pccb;
4057 TRACE2(("gdth_async_event() ha %d serv %d\n",
4058 hanum,ha->service));
4060 if (ha->service == SCREENSERVICE) {
4061 if (ha->status == MSG_REQUEST) {
4062 while (gdth_test_busy(hanum))
4063 gdth_delay(0);
4064 cmdp->Service = SCREENSERVICE;
4065 cmdp->RequestBuffer = SCREEN_CMND;
4066 cmd_index = gdth_get_cmd_index(hanum);
4067 gdth_set_sema0(hanum);
4068 cmdp->OpCode = GDT_READ;
4069 cmdp->BoardNode = LOCALBOARD;
4070 cmdp->u.screen.reserved = 0;
4071 cmdp->u.screen.su.msg.msg_handle= MSG_INV_HANDLE;
4072 cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
4073 ha->cmd_offs_dpmem = 0;
4074 ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
4075 + sizeof(ulong64);
4076 ha->cmd_cnt = 0;
4077 gdth_copy_command(hanum);
4078 if (ha->type == GDT_EISA)
4079 printk("[EISA slot %d] ",(ushort)ha->brd_phys);
4080 else if (ha->type == GDT_ISA)
4081 printk("[DPMEM 0x%4X] ",(ushort)ha->brd_phys);
4082 else
4083 printk("[PCI %d/%d] ",(ushort)(ha->brd_phys>>8),
4084 (ushort)((ha->brd_phys>>3)&0x1f));
4085 gdth_release_event(hanum);
4088 } else {
4089 if (ha->type == GDT_PCIMPR &&
4090 (ha->fw_vers & 0xff) >= 0x1a) {
4091 ha->dvr.size = 0;
4092 ha->dvr.eu.async.ionode = hanum;
4093 ha->dvr.eu.async.status = ha->status;
4094 /* severity and event_string already set! */
4095 } else {
4096 ha->dvr.size = sizeof(ha->dvr.eu.async);
4097 ha->dvr.eu.async.ionode = hanum;
4098 ha->dvr.eu.async.service = ha->service;
4099 ha->dvr.eu.async.status = ha->status;
4100 ha->dvr.eu.async.info = ha->info;
4101 *(ulong32 *)ha->dvr.eu.async.scsi_coord = ha->info2;
4103 gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr );
4104 gdth_log_event( &ha->dvr, NULL );
4106 /* new host drive from expand? */
4107 if (ha->service == CACHESERVICE && ha->status == 56) {
4108 TRACE2(("gdth_async_event(): new host drive %d created\n",
4109 (ushort)ha->info));
4110 /* gdth_analyse_hdrive(hanum, (ushort)ha->info); */
4113 return 1;
4116 static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
4118 gdth_stackframe stack;
4119 char *f = NULL;
4120 int i,j;
4122 TRACE2(("gdth_log_event()\n"));
4123 if (dvr->size == 0) {
4124 if (buffer == NULL) {
4125 printk("Adapter %d: %s\n",dvr->eu.async.ionode,dvr->event_string);
4126 } else {
4127 sprintf(buffer,"Adapter %d: %s\n",
4128 dvr->eu.async.ionode,dvr->event_string);
4130 } else if (dvr->eu.async.service == CACHESERVICE &&
4131 INDEX_OK(dvr->eu.async.status, async_cache_tab)) {
4132 TRACE2(("GDT: Async. event cache service, event no.: %d\n",
4133 dvr->eu.async.status));
4135 f = async_cache_tab[dvr->eu.async.status];
4137 /* i: parameter to push, j: stack element to fill */
4138 for (j=0,i=1; i < f[0]; i+=2) {
4139 switch (f[i+1]) {
4140 case 4:
4141 stack.b[j++] = *(ulong32*)&dvr->eu.stream[(int)f[i]];
4142 break;
4143 case 2:
4144 stack.b[j++] = *(ushort*)&dvr->eu.stream[(int)f[i]];
4145 break;
4146 case 1:
4147 stack.b[j++] = *(unchar*)&dvr->eu.stream[(int)f[i]];
4148 break;
4149 default:
4150 break;
4154 if (buffer == NULL) {
4155 printk(&f[(int)f[0]],stack);
4156 printk("\n");
4157 } else {
4158 sprintf(buffer,&f[(int)f[0]],stack);
4161 } else {
4162 if (buffer == NULL) {
4163 printk("GDT HA %u, Unknown async. event service %d event no. %d\n",
4164 dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
4165 } else {
4166 sprintf(buffer,"GDT HA %u, Unknown async. event service %d event no. %d",
4167 dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
4172 #ifdef GDTH_STATISTICS
4173 static void gdth_timeout(ulong data)
4175 ulong32 i;
4176 Scsi_Cmnd *nscp;
4177 gdth_ha_str *ha;
4178 ulong flags;
4179 int hanum = 0;
4181 ha = HADATA(gdth_ctr_tab[hanum]);
4182 spin_lock_irqsave(&ha->smp_lock, flags);
4184 for (act_stats=0,i=0; i<GDTH_MAXCMDS; ++i)
4185 if (ha->cmd_tab[i].cmnd != UNUSED_CMND)
4186 ++act_stats;
4188 for (act_rq=0,nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
4189 ++act_rq;
4191 TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n",
4192 act_ints, act_ios, act_stats, act_rq));
4193 act_ints = act_ios = 0;
4195 gdth_timer.expires = jiffies + 30 * HZ;
4196 add_timer(&gdth_timer);
4197 spin_unlock_irqrestore(&ha->smp_lock, flags);
4199 #endif
4201 static void __init internal_setup(char *str,int *ints)
4203 int i, argc;
4204 char *cur_str, *argv;
4206 TRACE2(("internal_setup() str %s ints[0] %d\n",
4207 str ? str:"NULL", ints ? ints[0]:0));
4209 /* read irq[] from ints[] */
4210 if (ints) {
4211 argc = ints[0];
4212 if (argc > 0) {
4213 if (argc > MAXHA)
4214 argc = MAXHA;
4215 for (i = 0; i < argc; ++i)
4216 irq[i] = ints[i+1];
4220 /* analyse string */
4221 argv = str;
4222 while (argv && (cur_str = strchr(argv, ':'))) {
4223 int val = 0, c = *++cur_str;
4225 if (c == 'n' || c == 'N')
4226 val = 0;
4227 else if (c == 'y' || c == 'Y')
4228 val = 1;
4229 else
4230 val = (int)simple_strtoul(cur_str, NULL, 0);
4232 if (!strncmp(argv, "disable:", 8))
4233 disable = val;
4234 else if (!strncmp(argv, "reserve_mode:", 13))
4235 reserve_mode = val;
4236 else if (!strncmp(argv, "reverse_scan:", 13))
4237 reverse_scan = val;
4238 else if (!strncmp(argv, "hdr_channel:", 12))
4239 hdr_channel = val;
4240 else if (!strncmp(argv, "max_ids:", 8))
4241 max_ids = val;
4242 else if (!strncmp(argv, "rescan:", 7))
4243 rescan = val;
4244 else if (!strncmp(argv, "virt_ctr:", 9))
4245 virt_ctr = val;
4246 else if (!strncmp(argv, "shared_access:", 14))
4247 shared_access = val;
4248 else if (!strncmp(argv, "probe_eisa_isa:", 15))
4249 probe_eisa_isa = val;
4250 else if (!strncmp(argv, "reserve_list:", 13)) {
4251 reserve_list[0] = val;
4252 for (i = 1; i < MAX_RES_ARGS; i++) {
4253 cur_str = strchr(cur_str, ',');
4254 if (!cur_str)
4255 break;
4256 if (!isdigit((int)*++cur_str)) {
4257 --cur_str;
4258 break;
4260 reserve_list[i] =
4261 (int)simple_strtoul(cur_str, NULL, 0);
4263 if (!cur_str)
4264 break;
4265 argv = ++cur_str;
4266 continue;
4269 if ((argv = strchr(argv, ',')))
4270 ++argv;
4274 int __init option_setup(char *str)
4276 int ints[MAXHA];
4277 char *cur = str;
4278 int i = 1;
4280 TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
4282 while (cur && isdigit(*cur) && i <= MAXHA) {
4283 ints[i++] = simple_strtoul(cur, NULL, 0);
4284 if ((cur = strchr(cur, ',')) != NULL) cur++;
4287 ints[0] = i - 1;
4288 internal_setup(cur, ints);
4289 return 1;
4292 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
4293 static int __init gdth_detect(struct scsi_host_template *shtp)
4294 #else
4295 static int __init gdth_detect(Scsi_Host_Template *shtp)
4296 #endif
4298 struct Scsi_Host *shp;
4299 gdth_pci_str pcistr[MAXHA];
4300 gdth_ha_str *ha;
4301 ulong32 isa_bios;
4302 ushort eisa_slot;
4303 int i,hanum,cnt,ctr,err;
4304 unchar b;
4307 #ifdef DEBUG_GDTH
4308 printk("GDT: This driver contains debugging information !! Trace level = %d\n",
4309 DebugState);
4310 printk(" Destination of debugging information: ");
4311 #ifdef __SERIAL__
4312 #ifdef __COM2__
4313 printk("Serial port COM2\n");
4314 #else
4315 printk("Serial port COM1\n");
4316 #endif
4317 #else
4318 printk("Console\n");
4319 #endif
4320 gdth_delay(3000);
4321 #endif
4323 TRACE(("gdth_detect()\n"));
4325 if (disable) {
4326 printk("GDT-HA: Controller driver disabled from command line !\n");
4327 return 0;
4330 printk("GDT-HA: Storage RAID Controller Driver. Version: %s\n",GDTH_VERSION_STR);
4331 /* initializations */
4332 gdth_polling = TRUE; b = 0;
4333 gdth_clear_events();
4335 /* As default we do not probe for EISA or ISA controllers */
4336 if (probe_eisa_isa) {
4337 /* scanning for controllers, at first: ISA controller */
4338 for (isa_bios=0xc8000UL; isa_bios<=0xd8000UL; isa_bios+=0x8000UL) {
4339 dma_addr_t scratch_dma_handle;
4340 scratch_dma_handle = 0;
4342 if (gdth_ctr_count >= MAXHA)
4343 break;
4344 if (gdth_search_isa(isa_bios)) { /* controller found */
4345 shp = scsi_register(shtp,sizeof(gdth_ext_str));
4346 if (shp == NULL)
4347 continue;
4349 ha = HADATA(shp);
4350 if (!gdth_init_isa(isa_bios,ha)) {
4351 scsi_unregister(shp);
4352 continue;
4354 #ifdef __ia64__
4355 break;
4356 #else
4357 /* controller found and initialized */
4358 printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n",
4359 isa_bios,ha->irq,ha->drq);
4361 if (request_irq(ha->irq,gdth_interrupt,IRQF_DISABLED,"gdth",ha)) {
4362 printk("GDT-ISA: Unable to allocate IRQ\n");
4363 scsi_unregister(shp);
4364 continue;
4366 if (request_dma(ha->drq,"gdth")) {
4367 printk("GDT-ISA: Unable to allocate DMA channel\n");
4368 free_irq(ha->irq,ha);
4369 scsi_unregister(shp);
4370 continue;
4372 set_dma_mode(ha->drq,DMA_MODE_CASCADE);
4373 enable_dma(ha->drq);
4374 shp->unchecked_isa_dma = 1;
4375 shp->irq = ha->irq;
4376 shp->dma_channel = ha->drq;
4377 hanum = gdth_ctr_count;
4378 gdth_ctr_tab[gdth_ctr_count++] = shp;
4379 gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
4381 NUMDATA(shp)->hanum = (ushort)hanum;
4382 NUMDATA(shp)->busnum= 0;
4384 ha->pccb = CMDDATA(shp);
4385 ha->ccb_phys = 0L;
4386 ha->pdev = NULL;
4387 ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH,
4388 &scratch_dma_handle);
4389 ha->scratch_phys = scratch_dma_handle;
4390 ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str),
4391 &scratch_dma_handle);
4392 ha->msg_phys = scratch_dma_handle;
4393 #ifdef INT_COAL
4394 ha->coal_stat = (gdth_coal_status *)
4395 pci_alloc_consistent(ha->pdev, sizeof(gdth_coal_status) *
4396 MAXOFFSETS, &scratch_dma_handle);
4397 ha->coal_stat_phys = scratch_dma_handle;
4398 #endif
4400 ha->scratch_busy = FALSE;
4401 ha->req_first = NULL;
4402 ha->tid_cnt = MAX_HDRIVES;
4403 if (max_ids > 0 && max_ids < ha->tid_cnt)
4404 ha->tid_cnt = max_ids;
4405 for (i=0; i<GDTH_MAXCMDS; ++i)
4406 ha->cmd_tab[i].cmnd = UNUSED_CMND;
4407 ha->scan_mode = rescan ? 0x10 : 0;
4409 if (ha->pscratch == NULL || ha->pmsg == NULL ||
4410 !gdth_search_drives(hanum)) {
4411 printk("GDT-ISA: Error during device scan\n");
4412 --gdth_ctr_count;
4413 --gdth_ctr_vcount;
4415 #ifdef INT_COAL
4416 if (ha->coal_stat)
4417 pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) *
4418 MAXOFFSETS, ha->coal_stat,
4419 ha->coal_stat_phys);
4420 #endif
4421 if (ha->pscratch)
4422 pci_free_consistent(ha->pdev, GDTH_SCRATCH,
4423 ha->pscratch, ha->scratch_phys);
4424 if (ha->pmsg)
4425 pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
4426 ha->pmsg, ha->msg_phys);
4428 free_irq(ha->irq,ha);
4429 scsi_unregister(shp);
4430 continue;
4432 if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
4433 hdr_channel = ha->bus_cnt;
4434 ha->virt_bus = hdr_channel;
4436 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) && \
4437 LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
4438 shp->highmem_io = 0;
4439 #endif
4440 if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT)
4441 shp->max_cmd_len = 16;
4443 shp->max_id = ha->tid_cnt;
4444 shp->max_lun = MAXLUN;
4445 shp->max_channel = virt_ctr ? 0 : ha->bus_cnt;
4446 if (virt_ctr) {
4447 virt_ctr = 1;
4448 /* register addit. SCSI channels as virtual controllers */
4449 for (b = 1; b < ha->bus_cnt + 1; ++b) {
4450 shp = scsi_register(shtp,sizeof(gdth_num_str));
4451 shp->unchecked_isa_dma = 1;
4452 shp->irq = ha->irq;
4453 shp->dma_channel = ha->drq;
4454 gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
4455 NUMDATA(shp)->hanum = (ushort)hanum;
4456 NUMDATA(shp)->busnum = b;
4460 spin_lock_init(&ha->smp_lock);
4461 gdth_enable_int(hanum);
4462 #endif /* !__ia64__ */
4466 /* scanning for EISA controllers */
4467 for (eisa_slot=0x1000; eisa_slot<=0x8000; eisa_slot+=0x1000) {
4468 dma_addr_t scratch_dma_handle;
4469 scratch_dma_handle = 0;
4471 if (gdth_ctr_count >= MAXHA)
4472 break;
4473 if (gdth_search_eisa(eisa_slot)) { /* controller found */
4474 shp = scsi_register(shtp,sizeof(gdth_ext_str));
4475 if (shp == NULL)
4476 continue;
4478 ha = HADATA(shp);
4479 if (!gdth_init_eisa(eisa_slot,ha)) {
4480 scsi_unregister(shp);
4481 continue;
4483 /* controller found and initialized */
4484 printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",
4485 eisa_slot>>12,ha->irq);
4487 if (request_irq(ha->irq,gdth_interrupt,IRQF_DISABLED,"gdth",ha)) {
4488 printk("GDT-EISA: Unable to allocate IRQ\n");
4489 scsi_unregister(shp);
4490 continue;
4492 shp->unchecked_isa_dma = 0;
4493 shp->irq = ha->irq;
4494 shp->dma_channel = 0xff;
4495 hanum = gdth_ctr_count;
4496 gdth_ctr_tab[gdth_ctr_count++] = shp;
4497 gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
4499 NUMDATA(shp)->hanum = (ushort)hanum;
4500 NUMDATA(shp)->busnum= 0;
4501 TRACE2(("EISA detect Bus 0: hanum %d\n",
4502 NUMDATA(shp)->hanum));
4504 ha->pccb = CMDDATA(shp);
4505 ha->ccb_phys = 0L;
4507 ha->pdev = NULL;
4508 ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH,
4509 &scratch_dma_handle);
4510 ha->scratch_phys = scratch_dma_handle;
4511 ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str),
4512 &scratch_dma_handle);
4513 ha->msg_phys = scratch_dma_handle;
4514 #ifdef INT_COAL
4515 ha->coal_stat = (gdth_coal_status *)
4516 pci_alloc_consistent(ha->pdev, sizeof(gdth_coal_status) *
4517 MAXOFFSETS, &scratch_dma_handle);
4518 ha->coal_stat_phys = scratch_dma_handle;
4519 #endif
4520 ha->ccb_phys =
4521 pci_map_single(ha->pdev,ha->pccb,
4522 sizeof(gdth_cmd_str),PCI_DMA_BIDIRECTIONAL);
4523 ha->scratch_busy = FALSE;
4524 ha->req_first = NULL;
4525 ha->tid_cnt = MAX_HDRIVES;
4526 if (max_ids > 0 && max_ids < ha->tid_cnt)
4527 ha->tid_cnt = max_ids;
4528 for (i=0; i<GDTH_MAXCMDS; ++i)
4529 ha->cmd_tab[i].cmnd = UNUSED_CMND;
4530 ha->scan_mode = rescan ? 0x10 : 0;
4532 if (ha->pscratch == NULL || ha->pmsg == NULL ||
4533 !gdth_search_drives(hanum)) {
4534 printk("GDT-EISA: Error during device scan\n");
4535 --gdth_ctr_count;
4536 --gdth_ctr_vcount;
4537 #ifdef INT_COAL
4538 if (ha->coal_stat)
4539 pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) *
4540 MAXOFFSETS, ha->coal_stat,
4541 ha->coal_stat_phys);
4542 #endif
4543 if (ha->pscratch)
4544 pci_free_consistent(ha->pdev, GDTH_SCRATCH,
4545 ha->pscratch, ha->scratch_phys);
4546 if (ha->pmsg)
4547 pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
4548 ha->pmsg, ha->msg_phys);
4549 if (ha->ccb_phys)
4550 pci_unmap_single(ha->pdev,ha->ccb_phys,
4551 sizeof(gdth_cmd_str),PCI_DMA_BIDIRECTIONAL);
4552 free_irq(ha->irq,ha);
4553 scsi_unregister(shp);
4554 continue;
4556 if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
4557 hdr_channel = ha->bus_cnt;
4558 ha->virt_bus = hdr_channel;
4560 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) && \
4561 LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
4562 shp->highmem_io = 0;
4563 #endif
4564 if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT)
4565 shp->max_cmd_len = 16;
4567 shp->max_id = ha->tid_cnt;
4568 shp->max_lun = MAXLUN;
4569 shp->max_channel = virt_ctr ? 0 : ha->bus_cnt;
4570 if (virt_ctr) {
4571 virt_ctr = 1;
4572 /* register addit. SCSI channels as virtual controllers */
4573 for (b = 1; b < ha->bus_cnt + 1; ++b) {
4574 shp = scsi_register(shtp,sizeof(gdth_num_str));
4575 shp->unchecked_isa_dma = 0;
4576 shp->irq = ha->irq;
4577 shp->dma_channel = 0xff;
4578 gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
4579 NUMDATA(shp)->hanum = (ushort)hanum;
4580 NUMDATA(shp)->busnum = b;
4584 spin_lock_init(&ha->smp_lock);
4585 gdth_enable_int(hanum);
4590 /* scanning for PCI controllers */
4591 cnt = gdth_search_pci(pcistr);
4592 printk("GDT-HA: Found %d PCI Storage RAID Controllers\n",cnt);
4593 gdth_sort_pci(pcistr,cnt);
4594 for (ctr = 0; ctr < cnt; ++ctr) {
4595 dma_addr_t scratch_dma_handle;
4596 scratch_dma_handle = 0;
4598 if (gdth_ctr_count >= MAXHA)
4599 break;
4600 shp = scsi_register(shtp,sizeof(gdth_ext_str));
4601 if (shp == NULL)
4602 continue;
4604 ha = HADATA(shp);
4605 if (!gdth_init_pci(&pcistr[ctr],ha)) {
4606 scsi_unregister(shp);
4607 continue;
4609 /* controller found and initialized */
4610 printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n",
4611 pcistr[ctr].bus,PCI_SLOT(pcistr[ctr].device_fn),ha->irq);
4613 if (request_irq(ha->irq, gdth_interrupt,
4614 IRQF_DISABLED|IRQF_SHARED, "gdth", ha))
4616 printk("GDT-PCI: Unable to allocate IRQ\n");
4617 scsi_unregister(shp);
4618 continue;
4620 shp->unchecked_isa_dma = 0;
4621 shp->irq = ha->irq;
4622 shp->dma_channel = 0xff;
4623 hanum = gdth_ctr_count;
4624 gdth_ctr_tab[gdth_ctr_count++] = shp;
4625 gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
4627 NUMDATA(shp)->hanum = (ushort)hanum;
4628 NUMDATA(shp)->busnum= 0;
4630 ha->pccb = CMDDATA(shp);
4631 ha->ccb_phys = 0L;
4633 ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH,
4634 &scratch_dma_handle);
4635 ha->scratch_phys = scratch_dma_handle;
4636 ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str),
4637 &scratch_dma_handle);
4638 ha->msg_phys = scratch_dma_handle;
4639 #ifdef INT_COAL
4640 ha->coal_stat = (gdth_coal_status *)
4641 pci_alloc_consistent(ha->pdev, sizeof(gdth_coal_status) *
4642 MAXOFFSETS, &scratch_dma_handle);
4643 ha->coal_stat_phys = scratch_dma_handle;
4644 #endif
4645 ha->scratch_busy = FALSE;
4646 ha->req_first = NULL;
4647 ha->tid_cnt = pcistr[ctr].device_id >= 0x200 ? MAXID : MAX_HDRIVES;
4648 if (max_ids > 0 && max_ids < ha->tid_cnt)
4649 ha->tid_cnt = max_ids;
4650 for (i=0; i<GDTH_MAXCMDS; ++i)
4651 ha->cmd_tab[i].cmnd = UNUSED_CMND;
4652 ha->scan_mode = rescan ? 0x10 : 0;
4654 err = FALSE;
4655 if (ha->pscratch == NULL || ha->pmsg == NULL ||
4656 !gdth_search_drives(hanum)) {
4657 err = TRUE;
4658 } else {
4659 if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
4660 hdr_channel = ha->bus_cnt;
4661 ha->virt_bus = hdr_channel;
4664 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
4665 scsi_set_pci_device(shp, pcistr[ctr].pdev);
4666 #endif
4667 if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat &GDT_64BIT)||
4668 /* 64-bit DMA only supported from FW >= x.43 */
4669 (!ha->dma64_support)) {
4670 if (pci_set_dma_mask(pcistr[ctr].pdev, DMA_32BIT_MASK)) {
4671 printk(KERN_WARNING "GDT-PCI %d: Unable to set 32-bit DMA\n", hanum);
4672 err = TRUE;
4674 } else {
4675 shp->max_cmd_len = 16;
4676 if (!pci_set_dma_mask(pcistr[ctr].pdev, DMA_64BIT_MASK)) {
4677 printk("GDT-PCI %d: 64-bit DMA enabled\n", hanum);
4678 } else if (pci_set_dma_mask(pcistr[ctr].pdev, DMA_32BIT_MASK)) {
4679 printk(KERN_WARNING "GDT-PCI %d: Unable to set 64/32-bit DMA\n", hanum);
4680 err = TRUE;
4685 if (err) {
4686 printk("GDT-PCI %d: Error during device scan\n", hanum);
4687 --gdth_ctr_count;
4688 --gdth_ctr_vcount;
4689 #ifdef INT_COAL
4690 if (ha->coal_stat)
4691 pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) *
4692 MAXOFFSETS, ha->coal_stat,
4693 ha->coal_stat_phys);
4694 #endif
4695 if (ha->pscratch)
4696 pci_free_consistent(ha->pdev, GDTH_SCRATCH,
4697 ha->pscratch, ha->scratch_phys);
4698 if (ha->pmsg)
4699 pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
4700 ha->pmsg, ha->msg_phys);
4701 free_irq(ha->irq,ha);
4702 scsi_unregister(shp);
4703 continue;
4706 shp->max_id = ha->tid_cnt;
4707 shp->max_lun = MAXLUN;
4708 shp->max_channel = virt_ctr ? 0 : ha->bus_cnt;
4709 if (virt_ctr) {
4710 virt_ctr = 1;
4711 /* register addit. SCSI channels as virtual controllers */
4712 for (b = 1; b < ha->bus_cnt + 1; ++b) {
4713 shp = scsi_register(shtp,sizeof(gdth_num_str));
4714 shp->unchecked_isa_dma = 0;
4715 shp->irq = ha->irq;
4716 shp->dma_channel = 0xff;
4717 gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
4718 NUMDATA(shp)->hanum = (ushort)hanum;
4719 NUMDATA(shp)->busnum = b;
4723 spin_lock_init(&ha->smp_lock);
4724 gdth_enable_int(hanum);
4727 TRACE2(("gdth_detect() %d controller detected\n",gdth_ctr_count));
4728 if (gdth_ctr_count > 0) {
4729 #ifdef GDTH_STATISTICS
4730 TRACE2(("gdth_detect(): Initializing timer !\n"));
4731 init_timer(&gdth_timer);
4732 gdth_timer.expires = jiffies + HZ;
4733 gdth_timer.data = 0L;
4734 gdth_timer.function = gdth_timeout;
4735 add_timer(&gdth_timer);
4736 #endif
4737 major = register_chrdev(0,"gdth",&gdth_fops);
4738 notifier_disabled = 0;
4739 register_reboot_notifier(&gdth_notifier);
4741 gdth_polling = FALSE;
4742 return gdth_ctr_vcount;
4745 static int gdth_release(struct Scsi_Host *shp)
4747 int hanum;
4748 gdth_ha_str *ha;
4750 TRACE2(("gdth_release()\n"));
4751 if (NUMDATA(shp)->busnum == 0) {
4752 hanum = NUMDATA(shp)->hanum;
4753 ha = HADATA(gdth_ctr_tab[hanum]);
4754 if (ha->sdev) {
4755 scsi_free_host_dev(ha->sdev);
4756 ha->sdev = NULL;
4758 gdth_flush(hanum);
4760 if (shp->irq) {
4761 free_irq(shp->irq,ha);
4763 #ifndef __ia64__
4764 if (shp->dma_channel != 0xff) {
4765 free_dma(shp->dma_channel);
4767 #endif
4768 #ifdef INT_COAL
4769 if (ha->coal_stat)
4770 pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) *
4771 MAXOFFSETS, ha->coal_stat, ha->coal_stat_phys);
4772 #endif
4773 if (ha->pscratch)
4774 pci_free_consistent(ha->pdev, GDTH_SCRATCH,
4775 ha->pscratch, ha->scratch_phys);
4776 if (ha->pmsg)
4777 pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
4778 ha->pmsg, ha->msg_phys);
4779 if (ha->ccb_phys)
4780 pci_unmap_single(ha->pdev,ha->ccb_phys,
4781 sizeof(gdth_cmd_str),PCI_DMA_BIDIRECTIONAL);
4782 gdth_ctr_released++;
4783 TRACE2(("gdth_release(): HA %d of %d\n",
4784 gdth_ctr_released, gdth_ctr_count));
4786 if (gdth_ctr_released == gdth_ctr_count) {
4787 #ifdef GDTH_STATISTICS
4788 del_timer(&gdth_timer);
4789 #endif
4790 unregister_chrdev(major,"gdth");
4791 unregister_reboot_notifier(&gdth_notifier);
4795 scsi_unregister(shp);
4796 return 0;
4800 static const char *gdth_ctr_name(int hanum)
4802 gdth_ha_str *ha;
4804 TRACE2(("gdth_ctr_name()\n"));
4806 ha = HADATA(gdth_ctr_tab[hanum]);
4808 if (ha->type == GDT_EISA) {
4809 switch (ha->stype) {
4810 case GDT3_ID:
4811 return("GDT3000/3020");
4812 case GDT3A_ID:
4813 return("GDT3000A/3020A/3050A");
4814 case GDT3B_ID:
4815 return("GDT3000B/3010A");
4817 } else if (ha->type == GDT_ISA) {
4818 return("GDT2000/2020");
4819 } else if (ha->type == GDT_PCI) {
4820 switch (ha->stype) {
4821 case PCI_DEVICE_ID_VORTEX_GDT60x0:
4822 return("GDT6000/6020/6050");
4823 case PCI_DEVICE_ID_VORTEX_GDT6000B:
4824 return("GDT6000B/6010");
4827 /* new controllers (GDT_PCINEW, GDT_PCIMPR, ..) use board_info IOCTL! */
4829 return("");
4832 static const char *gdth_info(struct Scsi_Host *shp)
4834 int hanum;
4835 gdth_ha_str *ha;
4837 TRACE2(("gdth_info()\n"));
4838 hanum = NUMDATA(shp)->hanum;
4839 ha = HADATA(gdth_ctr_tab[hanum]);
4841 return ((const char *)ha->binfo.type_string);
4844 static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
4846 int i, hanum;
4847 gdth_ha_str *ha;
4848 ulong flags;
4849 Scsi_Cmnd *cmnd;
4850 unchar b;
4852 TRACE2(("gdth_eh_bus_reset()\n"));
4854 hanum = NUMDATA(scp->device->host)->hanum;
4855 b = virt_ctr ? NUMDATA(scp->device->host)->busnum : scp->device->channel;
4856 ha = HADATA(gdth_ctr_tab[hanum]);
4858 /* clear command tab */
4859 spin_lock_irqsave(&ha->smp_lock, flags);
4860 for (i = 0; i < GDTH_MAXCMDS; ++i) {
4861 cmnd = ha->cmd_tab[i].cmnd;
4862 if (!SPECIAL_SCP(cmnd) && cmnd->device->channel == b)
4863 ha->cmd_tab[i].cmnd = UNUSED_CMND;
4865 spin_unlock_irqrestore(&ha->smp_lock, flags);
4867 if (b == ha->virt_bus) {
4868 /* host drives */
4869 for (i = 0; i < MAX_HDRIVES; ++i) {
4870 if (ha->hdr[i].present) {
4871 spin_lock_irqsave(&ha->smp_lock, flags);
4872 gdth_polling = TRUE;
4873 while (gdth_test_busy(hanum))
4874 gdth_delay(0);
4875 if (gdth_internal_cmd(hanum, CACHESERVICE,
4876 GDT_CLUST_RESET, i, 0, 0))
4877 ha->hdr[i].cluster_type &= ~CLUSTER_RESERVED;
4878 gdth_polling = FALSE;
4879 spin_unlock_irqrestore(&ha->smp_lock, flags);
4882 } else {
4883 /* raw devices */
4884 spin_lock_irqsave(&ha->smp_lock, flags);
4885 for (i = 0; i < MAXID; ++i)
4886 ha->raw[BUS_L2P(ha,b)].io_cnt[i] = 0;
4887 gdth_polling = TRUE;
4888 while (gdth_test_busy(hanum))
4889 gdth_delay(0);
4890 gdth_internal_cmd(hanum, SCSIRAWSERVICE, GDT_RESET_BUS,
4891 BUS_L2P(ha,b), 0, 0);
4892 gdth_polling = FALSE;
4893 spin_unlock_irqrestore(&ha->smp_lock, flags);
4895 return SUCCESS;
4898 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
4899 static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
4900 #else
4901 static int gdth_bios_param(Disk *disk,kdev_t dev,int *ip)
4902 #endif
4904 unchar b, t;
4905 int hanum;
4906 gdth_ha_str *ha;
4907 struct scsi_device *sd;
4908 unsigned capacity;
4910 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
4911 sd = sdev;
4912 capacity = cap;
4913 #else
4914 sd = disk->device;
4915 capacity = disk->capacity;
4916 #endif
4917 hanum = NUMDATA(sd->host)->hanum;
4918 b = virt_ctr ? NUMDATA(sd->host)->busnum : sd->channel;
4919 t = sd->id;
4920 TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", hanum, b, t));
4921 ha = HADATA(gdth_ctr_tab[hanum]);
4923 if (b != ha->virt_bus || ha->hdr[t].heads == 0) {
4924 /* raw device or host drive without mapping information */
4925 TRACE2(("Evaluate mapping\n"));
4926 gdth_eval_mapping(capacity,&ip[2],&ip[0],&ip[1]);
4927 } else {
4928 ip[0] = ha->hdr[t].heads;
4929 ip[1] = ha->hdr[t].secs;
4930 ip[2] = capacity / ip[0] / ip[1];
4933 TRACE2(("gdth_bios_param(): %d heads, %d secs, %d cyls\n",
4934 ip[0],ip[1],ip[2]));
4935 return 0;
4939 static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *))
4941 int hanum;
4942 int priority;
4944 TRACE(("gdth_queuecommand() cmd 0x%x\n", scp->cmnd[0]));
4946 scp->scsi_done = (void *)done;
4947 scp->SCp.have_data_in = 1;
4948 scp->SCp.phase = -1;
4949 scp->SCp.sent_command = -1;
4950 scp->SCp.Status = GDTH_MAP_NONE;
4951 scp->SCp.buffer = (struct scatterlist *)NULL;
4953 hanum = NUMDATA(scp->device->host)->hanum;
4954 #ifdef GDTH_STATISTICS
4955 ++act_ios;
4956 #endif
4958 priority = DEFAULT_PRI;
4959 if (scp->done == gdth_scsi_done)
4960 priority = scp->SCp.this_residual;
4961 else
4962 gdth_update_timeout(hanum, scp, scp->timeout_per_command * 6);
4964 gdth_putq( hanum, scp, priority );
4965 gdth_next( hanum );
4966 return 0;
4970 static int gdth_open(struct inode *inode, struct file *filep)
4972 gdth_ha_str *ha;
4973 int i;
4975 for (i = 0; i < gdth_ctr_count; i++) {
4976 ha = HADATA(gdth_ctr_tab[i]);
4977 if (!ha->sdev)
4978 ha->sdev = scsi_get_host_dev(gdth_ctr_tab[i]);
4981 TRACE(("gdth_open()\n"));
4982 return 0;
4985 static int gdth_close(struct inode *inode, struct file *filep)
4987 TRACE(("gdth_close()\n"));
4988 return 0;
4991 static int ioc_event(void __user *arg)
4993 gdth_ioctl_event evt;
4994 gdth_ha_str *ha;
4995 ulong flags;
4997 if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event)) ||
4998 evt.ionode >= gdth_ctr_count)
4999 return -EFAULT;
5000 ha = HADATA(gdth_ctr_tab[evt.ionode]);
5002 if (evt.erase == 0xff) {
5003 if (evt.event.event_source == ES_TEST)
5004 evt.event.event_data.size=sizeof(evt.event.event_data.eu.test);
5005 else if (evt.event.event_source == ES_DRIVER)
5006 evt.event.event_data.size=sizeof(evt.event.event_data.eu.driver);
5007 else if (evt.event.event_source == ES_SYNC)
5008 evt.event.event_data.size=sizeof(evt.event.event_data.eu.sync);
5009 else
5010 evt.event.event_data.size=sizeof(evt.event.event_data.eu.async);
5011 spin_lock_irqsave(&ha->smp_lock, flags);
5012 gdth_store_event(ha, evt.event.event_source, evt.event.event_idx,
5013 &evt.event.event_data);
5014 spin_unlock_irqrestore(&ha->smp_lock, flags);
5015 } else if (evt.erase == 0xfe) {
5016 gdth_clear_events();
5017 } else if (evt.erase == 0) {
5018 evt.handle = gdth_read_event(ha, evt.handle, &evt.event);
5019 } else {
5020 gdth_readapp_event(ha, evt.erase, &evt.event);
5022 if (copy_to_user(arg, &evt, sizeof(gdth_ioctl_event)))
5023 return -EFAULT;
5024 return 0;
5027 static int ioc_lockdrv(void __user *arg)
5029 gdth_ioctl_lockdrv ldrv;
5030 unchar i, j;
5031 ulong flags;
5032 gdth_ha_str *ha;
5034 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)) ||
5035 ldrv.ionode >= gdth_ctr_count)
5036 return -EFAULT;
5037 ha = HADATA(gdth_ctr_tab[ldrv.ionode]);
5039 for (i = 0; i < ldrv.drive_cnt && i < MAX_HDRIVES; ++i) {
5040 j = ldrv.drives[i];
5041 if (j >= MAX_HDRIVES || !ha->hdr[j].present)
5042 continue;
5043 if (ldrv.lock) {
5044 spin_lock_irqsave(&ha->smp_lock, flags);
5045 ha->hdr[j].lock = 1;
5046 spin_unlock_irqrestore(&ha->smp_lock, flags);
5047 gdth_wait_completion(ldrv.ionode, ha->bus_cnt, j);
5048 gdth_stop_timeout(ldrv.ionode, ha->bus_cnt, j);
5049 } else {
5050 spin_lock_irqsave(&ha->smp_lock, flags);
5051 ha->hdr[j].lock = 0;
5052 spin_unlock_irqrestore(&ha->smp_lock, flags);
5053 gdth_start_timeout(ldrv.ionode, ha->bus_cnt, j);
5054 gdth_next(ldrv.ionode);
5057 return 0;
5060 static int ioc_resetdrv(void __user *arg, char *cmnd)
5062 gdth_ioctl_reset res;
5063 gdth_cmd_str cmd;
5064 int hanum;
5065 gdth_ha_str *ha;
5066 int rval;
5068 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
5069 res.ionode >= gdth_ctr_count || res.number >= MAX_HDRIVES)
5070 return -EFAULT;
5071 hanum = res.ionode;
5072 ha = HADATA(gdth_ctr_tab[hanum]);
5074 if (!ha->hdr[res.number].present)
5075 return 0;
5076 memset(&cmd, 0, sizeof(gdth_cmd_str));
5077 cmd.Service = CACHESERVICE;
5078 cmd.OpCode = GDT_CLUST_RESET;
5079 if (ha->cache_feat & GDT_64BIT)
5080 cmd.u.cache64.DeviceNo = res.number;
5081 else
5082 cmd.u.cache.DeviceNo = res.number;
5084 rval = __gdth_execute(ha->sdev, &cmd, cmnd, 30, NULL);
5085 if (rval < 0)
5086 return rval;
5087 res.status = rval;
5089 if (copy_to_user(arg, &res, sizeof(gdth_ioctl_reset)))
5090 return -EFAULT;
5091 return 0;
5094 static int ioc_general(void __user *arg, char *cmnd)
5096 gdth_ioctl_general gen;
5097 char *buf = NULL;
5098 ulong64 paddr;
5099 int hanum;
5100 gdth_ha_str *ha;
5101 int rval;
5103 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)) ||
5104 gen.ionode >= gdth_ctr_count)
5105 return -EFAULT;
5106 hanum = gen.ionode;
5107 ha = HADATA(gdth_ctr_tab[hanum]);
5108 if (gen.data_len + gen.sense_len != 0) {
5109 if (!(buf = gdth_ioctl_alloc(hanum, gen.data_len + gen.sense_len,
5110 FALSE, &paddr)))
5111 return -EFAULT;
5112 if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general),
5113 gen.data_len + gen.sense_len)) {
5114 gdth_ioctl_free(hanum, gen.data_len+gen.sense_len, buf, paddr);
5115 return -EFAULT;
5118 if (gen.command.OpCode == GDT_IOCTL) {
5119 gen.command.u.ioctl.p_param = paddr;
5120 } else if (gen.command.Service == CACHESERVICE) {
5121 if (ha->cache_feat & GDT_64BIT) {
5122 /* copy elements from 32-bit IOCTL structure */
5123 gen.command.u.cache64.BlockCnt = gen.command.u.cache.BlockCnt;
5124 gen.command.u.cache64.BlockNo = gen.command.u.cache.BlockNo;
5125 gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo;
5126 /* addresses */
5127 if (ha->cache_feat & SCATTER_GATHER) {
5128 gen.command.u.cache64.DestAddr = (ulong64)-1;
5129 gen.command.u.cache64.sg_canz = 1;
5130 gen.command.u.cache64.sg_lst[0].sg_ptr = paddr;
5131 gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len;
5132 gen.command.u.cache64.sg_lst[1].sg_len = 0;
5133 } else {
5134 gen.command.u.cache64.DestAddr = paddr;
5135 gen.command.u.cache64.sg_canz = 0;
5137 } else {
5138 if (ha->cache_feat & SCATTER_GATHER) {
5139 gen.command.u.cache.DestAddr = 0xffffffff;
5140 gen.command.u.cache.sg_canz = 1;
5141 gen.command.u.cache.sg_lst[0].sg_ptr = (ulong32)paddr;
5142 gen.command.u.cache.sg_lst[0].sg_len = gen.data_len;
5143 gen.command.u.cache.sg_lst[1].sg_len = 0;
5144 } else {
5145 gen.command.u.cache.DestAddr = paddr;
5146 gen.command.u.cache.sg_canz = 0;
5149 } else if (gen.command.Service == SCSIRAWSERVICE) {
5150 if (ha->raw_feat & GDT_64BIT) {
5151 /* copy elements from 32-bit IOCTL structure */
5152 char cmd[16];
5153 gen.command.u.raw64.sense_len = gen.command.u.raw.sense_len;
5154 gen.command.u.raw64.bus = gen.command.u.raw.bus;
5155 gen.command.u.raw64.lun = gen.command.u.raw.lun;
5156 gen.command.u.raw64.target = gen.command.u.raw.target;
5157 memcpy(cmd, gen.command.u.raw.cmd, 16);
5158 memcpy(gen.command.u.raw64.cmd, cmd, 16);
5159 gen.command.u.raw64.clen = gen.command.u.raw.clen;
5160 gen.command.u.raw64.sdlen = gen.command.u.raw.sdlen;
5161 gen.command.u.raw64.direction = gen.command.u.raw.direction;
5162 /* addresses */
5163 if (ha->raw_feat & SCATTER_GATHER) {
5164 gen.command.u.raw64.sdata = (ulong64)-1;
5165 gen.command.u.raw64.sg_ranz = 1;
5166 gen.command.u.raw64.sg_lst[0].sg_ptr = paddr;
5167 gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len;
5168 gen.command.u.raw64.sg_lst[1].sg_len = 0;
5169 } else {
5170 gen.command.u.raw64.sdata = paddr;
5171 gen.command.u.raw64.sg_ranz = 0;
5173 gen.command.u.raw64.sense_data = paddr + gen.data_len;
5174 } else {
5175 if (ha->raw_feat & SCATTER_GATHER) {
5176 gen.command.u.raw.sdata = 0xffffffff;
5177 gen.command.u.raw.sg_ranz = 1;
5178 gen.command.u.raw.sg_lst[0].sg_ptr = (ulong32)paddr;
5179 gen.command.u.raw.sg_lst[0].sg_len = gen.data_len;
5180 gen.command.u.raw.sg_lst[1].sg_len = 0;
5181 } else {
5182 gen.command.u.raw.sdata = paddr;
5183 gen.command.u.raw.sg_ranz = 0;
5185 gen.command.u.raw.sense_data = (ulong32)paddr + gen.data_len;
5187 } else {
5188 gdth_ioctl_free(hanum, gen.data_len+gen.sense_len, buf, paddr);
5189 return -EFAULT;
5193 rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info);
5194 if (rval < 0)
5195 return rval;
5196 gen.status = rval;
5198 if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf,
5199 gen.data_len + gen.sense_len)) {
5200 gdth_ioctl_free(hanum, gen.data_len+gen.sense_len, buf, paddr);
5201 return -EFAULT;
5203 if (copy_to_user(arg, &gen,
5204 sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str))) {
5205 gdth_ioctl_free(hanum, gen.data_len+gen.sense_len, buf, paddr);
5206 return -EFAULT;
5208 gdth_ioctl_free(hanum, gen.data_len+gen.sense_len, buf, paddr);
5209 return 0;
5212 static int ioc_hdrlist(void __user *arg, char *cmnd)
5214 gdth_ioctl_rescan *rsc;
5215 gdth_cmd_str *cmd;
5216 gdth_ha_str *ha;
5217 unchar i;
5218 int hanum, rc = -ENOMEM;
5219 u32 cluster_type = 0;
5221 rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
5222 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
5223 if (!rsc || !cmd)
5224 goto free_fail;
5226 if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
5227 rsc->ionode >= gdth_ctr_count) {
5228 rc = -EFAULT;
5229 goto free_fail;
5231 hanum = rsc->ionode;
5232 ha = HADATA(gdth_ctr_tab[hanum]);
5233 memset(cmd, 0, sizeof(gdth_cmd_str));
5235 for (i = 0; i < MAX_HDRIVES; ++i) {
5236 if (!ha->hdr[i].present) {
5237 rsc->hdr_list[i].bus = 0xff;
5238 continue;
5240 rsc->hdr_list[i].bus = ha->virt_bus;
5241 rsc->hdr_list[i].target = i;
5242 rsc->hdr_list[i].lun = 0;
5243 rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
5244 if (ha->hdr[i].cluster_type & CLUSTER_DRIVE) {
5245 cmd->Service = CACHESERVICE;
5246 cmd->OpCode = GDT_CLUST_INFO;
5247 if (ha->cache_feat & GDT_64BIT)
5248 cmd->u.cache64.DeviceNo = i;
5249 else
5250 cmd->u.cache.DeviceNo = i;
5251 if (__gdth_execute(ha->sdev, cmd, cmnd, 30, &cluster_type) == S_OK)
5252 rsc->hdr_list[i].cluster_type = cluster_type;
5256 if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
5257 rc = -EFAULT;
5258 else
5259 rc = 0;
5261 free_fail:
5262 kfree(rsc);
5263 kfree(cmd);
5264 return rc;
5267 static int ioc_rescan(void __user *arg, char *cmnd)
5269 gdth_ioctl_rescan *rsc;
5270 gdth_cmd_str *cmd;
5271 ushort i, status, hdr_cnt;
5272 ulong32 info;
5273 int hanum, cyls, hds, secs;
5274 int rc = -ENOMEM;
5275 ulong flags;
5276 gdth_ha_str *ha;
5278 rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
5279 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
5280 if (!cmd || !rsc)
5281 goto free_fail;
5283 if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
5284 rsc->ionode >= gdth_ctr_count) {
5285 rc = -EFAULT;
5286 goto free_fail;
5288 hanum = rsc->ionode;
5289 ha = HADATA(gdth_ctr_tab[hanum]);
5290 memset(cmd, 0, sizeof(gdth_cmd_str));
5292 if (rsc->flag == 0) {
5293 /* old method: re-init. cache service */
5294 cmd->Service = CACHESERVICE;
5295 if (ha->cache_feat & GDT_64BIT) {
5296 cmd->OpCode = GDT_X_INIT_HOST;
5297 cmd->u.cache64.DeviceNo = LINUX_OS;
5298 } else {
5299 cmd->OpCode = GDT_INIT;
5300 cmd->u.cache.DeviceNo = LINUX_OS;
5303 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
5304 i = 0;
5305 hdr_cnt = (status == S_OK ? (ushort)info : 0);
5306 } else {
5307 i = rsc->hdr_no;
5308 hdr_cnt = i + 1;
5311 for (; i < hdr_cnt && i < MAX_HDRIVES; ++i) {
5312 cmd->Service = CACHESERVICE;
5313 cmd->OpCode = GDT_INFO;
5314 if (ha->cache_feat & GDT_64BIT)
5315 cmd->u.cache64.DeviceNo = i;
5316 else
5317 cmd->u.cache.DeviceNo = i;
5319 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
5321 spin_lock_irqsave(&ha->smp_lock, flags);
5322 rsc->hdr_list[i].bus = ha->virt_bus;
5323 rsc->hdr_list[i].target = i;
5324 rsc->hdr_list[i].lun = 0;
5325 if (status != S_OK) {
5326 ha->hdr[i].present = FALSE;
5327 } else {
5328 ha->hdr[i].present = TRUE;
5329 ha->hdr[i].size = info;
5330 /* evaluate mapping */
5331 ha->hdr[i].size &= ~SECS32;
5332 gdth_eval_mapping(ha->hdr[i].size,&cyls,&hds,&secs);
5333 ha->hdr[i].heads = hds;
5334 ha->hdr[i].secs = secs;
5335 /* round size */
5336 ha->hdr[i].size = cyls * hds * secs;
5338 spin_unlock_irqrestore(&ha->smp_lock, flags);
5339 if (status != S_OK)
5340 continue;
5342 /* extended info, if GDT_64BIT, for drives > 2 TB */
5343 /* but we need ha->info2, not yet stored in scp->SCp */
5345 /* devtype, cluster info, R/W attribs */
5346 cmd->Service = CACHESERVICE;
5347 cmd->OpCode = GDT_DEVTYPE;
5348 if (ha->cache_feat & GDT_64BIT)
5349 cmd->u.cache64.DeviceNo = i;
5350 else
5351 cmd->u.cache.DeviceNo = i;
5353 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
5355 spin_lock_irqsave(&ha->smp_lock, flags);
5356 ha->hdr[i].devtype = (status == S_OK ? (ushort)info : 0);
5357 spin_unlock_irqrestore(&ha->smp_lock, flags);
5359 cmd->Service = CACHESERVICE;
5360 cmd->OpCode = GDT_CLUST_INFO;
5361 if (ha->cache_feat & GDT_64BIT)
5362 cmd->u.cache64.DeviceNo = i;
5363 else
5364 cmd->u.cache.DeviceNo = i;
5366 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
5368 spin_lock_irqsave(&ha->smp_lock, flags);
5369 ha->hdr[i].cluster_type =
5370 ((status == S_OK && !shared_access) ? (ushort)info : 0);
5371 spin_unlock_irqrestore(&ha->smp_lock, flags);
5372 rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
5374 cmd->Service = CACHESERVICE;
5375 cmd->OpCode = GDT_RW_ATTRIBS;
5376 if (ha->cache_feat & GDT_64BIT)
5377 cmd->u.cache64.DeviceNo = i;
5378 else
5379 cmd->u.cache.DeviceNo = i;
5381 status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
5383 spin_lock_irqsave(&ha->smp_lock, flags);
5384 ha->hdr[i].rw_attribs = (status == S_OK ? (ushort)info : 0);
5385 spin_unlock_irqrestore(&ha->smp_lock, flags);
5388 if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
5389 rc = -EFAULT;
5390 else
5391 rc = 0;
5393 free_fail:
5394 kfree(rsc);
5395 kfree(cmd);
5396 return rc;
5399 static int gdth_ioctl(struct inode *inode, struct file *filep,
5400 unsigned int cmd, unsigned long arg)
5402 gdth_ha_str *ha;
5403 Scsi_Cmnd *scp;
5404 ulong flags;
5405 char cmnd[MAX_COMMAND_SIZE];
5406 void __user *argp = (void __user *)arg;
5408 memset(cmnd, 0xff, 12);
5410 TRACE(("gdth_ioctl() cmd 0x%x\n", cmd));
5412 switch (cmd) {
5413 case GDTIOCTL_CTRCNT:
5415 int cnt = gdth_ctr_count;
5416 if (put_user(cnt, (int __user *)argp))
5417 return -EFAULT;
5418 break;
5421 case GDTIOCTL_DRVERS:
5423 int ver = (GDTH_VERSION<<8) | GDTH_SUBVERSION;
5424 if (put_user(ver, (int __user *)argp))
5425 return -EFAULT;
5426 break;
5429 case GDTIOCTL_OSVERS:
5431 gdth_ioctl_osvers osv;
5433 osv.version = (unchar)(LINUX_VERSION_CODE >> 16);
5434 osv.subversion = (unchar)(LINUX_VERSION_CODE >> 8);
5435 osv.revision = (ushort)(LINUX_VERSION_CODE & 0xff);
5436 if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers)))
5437 return -EFAULT;
5438 break;
5441 case GDTIOCTL_CTRTYPE:
5443 gdth_ioctl_ctrtype ctrt;
5445 if (copy_from_user(&ctrt, argp, sizeof(gdth_ioctl_ctrtype)) ||
5446 ctrt.ionode >= gdth_ctr_count)
5447 return -EFAULT;
5448 ha = HADATA(gdth_ctr_tab[ctrt.ionode]);
5449 if (ha->type == GDT_ISA || ha->type == GDT_EISA) {
5450 ctrt.type = (unchar)((ha->stype>>20) - 0x10);
5451 } else {
5452 if (ha->type != GDT_PCIMPR) {
5453 ctrt.type = (unchar)((ha->stype<<4) + 6);
5454 } else {
5455 ctrt.type =
5456 (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
5457 if (ha->stype >= 0x300)
5458 ctrt.ext_type = 0x6000 | ha->subdevice_id;
5459 else
5460 ctrt.ext_type = 0x6000 | ha->stype;
5462 ctrt.device_id = ha->stype;
5463 ctrt.sub_device_id = ha->subdevice_id;
5465 ctrt.info = ha->brd_phys;
5466 ctrt.oem_id = ha->oem_id;
5467 if (copy_to_user(argp, &ctrt, sizeof(gdth_ioctl_ctrtype)))
5468 return -EFAULT;
5469 break;
5472 case GDTIOCTL_GENERAL:
5473 return ioc_general(argp, cmnd);
5475 case GDTIOCTL_EVENT:
5476 return ioc_event(argp);
5478 case GDTIOCTL_LOCKDRV:
5479 return ioc_lockdrv(argp);
5481 case GDTIOCTL_LOCKCHN:
5483 gdth_ioctl_lockchn lchn;
5484 unchar i, j;
5486 if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) ||
5487 lchn.ionode >= gdth_ctr_count)
5488 return -EFAULT;
5489 ha = HADATA(gdth_ctr_tab[lchn.ionode]);
5491 i = lchn.channel;
5492 if (i < ha->bus_cnt) {
5493 if (lchn.lock) {
5494 spin_lock_irqsave(&ha->smp_lock, flags);
5495 ha->raw[i].lock = 1;
5496 spin_unlock_irqrestore(&ha->smp_lock, flags);
5497 for (j = 0; j < ha->tid_cnt; ++j) {
5498 gdth_wait_completion(lchn.ionode, i, j);
5499 gdth_stop_timeout(lchn.ionode, i, j);
5501 } else {
5502 spin_lock_irqsave(&ha->smp_lock, flags);
5503 ha->raw[i].lock = 0;
5504 spin_unlock_irqrestore(&ha->smp_lock, flags);
5505 for (j = 0; j < ha->tid_cnt; ++j) {
5506 gdth_start_timeout(lchn.ionode, i, j);
5507 gdth_next(lchn.ionode);
5511 break;
5514 case GDTIOCTL_RESCAN:
5515 return ioc_rescan(argp, cmnd);
5517 case GDTIOCTL_HDRLIST:
5518 return ioc_hdrlist(argp, cmnd);
5520 case GDTIOCTL_RESET_BUS:
5522 gdth_ioctl_reset res;
5523 int hanum, rval;
5525 if (copy_from_user(&res, argp, sizeof(gdth_ioctl_reset)) ||
5526 res.ionode >= gdth_ctr_count)
5527 return -EFAULT;
5528 hanum = res.ionode;
5529 ha = HADATA(gdth_ctr_tab[hanum]);
5531 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
5532 scp = kmalloc(sizeof(*scp), GFP_KERNEL);
5533 if (!scp)
5534 return -ENOMEM;
5535 memset(scp, 0, sizeof(*scp));
5536 scp->device = ha->sdev;
5537 scp->cmd_len = 12;
5538 scp->use_sg = 0;
5539 scp->device->channel = virt_ctr ? 0 : res.number;
5540 rval = gdth_eh_bus_reset(scp);
5541 res.status = (rval == SUCCESS ? S_OK : S_GENERR);
5542 kfree(scp);
5543 #else
5544 scp = scsi_allocate_device(ha->sdev, 1, FALSE);
5545 if (!scp)
5546 return -ENOMEM;
5547 scp->cmd_len = 12;
5548 scp->use_sg = 0;
5549 scp->channel = virt_ctr ? 0 : res.number;
5550 rval = gdth_eh_bus_reset(scp);
5551 res.status = (rval == SUCCESS ? S_OK : S_GENERR);
5552 scsi_release_command(scp);
5553 #endif
5554 if (copy_to_user(argp, &res, sizeof(gdth_ioctl_reset)))
5555 return -EFAULT;
5556 break;
5559 case GDTIOCTL_RESET_DRV:
5560 return ioc_resetdrv(argp, cmnd);
5562 default:
5563 break;
5565 return 0;
5569 /* flush routine */
5570 static void gdth_flush(int hanum)
5572 int i;
5573 gdth_ha_str *ha;
5574 gdth_cmd_str gdtcmd;
5575 char cmnd[MAX_COMMAND_SIZE];
5576 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
5578 TRACE2(("gdth_flush() hanum %d\n",hanum));
5579 ha = HADATA(gdth_ctr_tab[hanum]);
5581 for (i = 0; i < MAX_HDRIVES; ++i) {
5582 if (ha->hdr[i].present) {
5583 gdtcmd.BoardNode = LOCALBOARD;
5584 gdtcmd.Service = CACHESERVICE;
5585 gdtcmd.OpCode = GDT_FLUSH;
5586 if (ha->cache_feat & GDT_64BIT) {
5587 gdtcmd.u.cache64.DeviceNo = i;
5588 gdtcmd.u.cache64.BlockNo = 1;
5589 gdtcmd.u.cache64.sg_canz = 0;
5590 } else {
5591 gdtcmd.u.cache.DeviceNo = i;
5592 gdtcmd.u.cache.BlockNo = 1;
5593 gdtcmd.u.cache.sg_canz = 0;
5595 TRACE2(("gdth_flush(): flush ha %d drive %d\n", hanum, i));
5597 gdth_execute(gdth_ctr_tab[hanum], &gdtcmd, cmnd, 30, NULL);
5602 /* shutdown routine */
5603 static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
5605 int hanum;
5606 #ifndef __alpha__
5607 gdth_cmd_str gdtcmd;
5608 char cmnd[MAX_COMMAND_SIZE];
5609 #endif
5611 if (notifier_disabled)
5612 return NOTIFY_OK;
5614 TRACE2(("gdth_halt() event %d\n",(int)event));
5615 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
5616 return NOTIFY_DONE;
5618 notifier_disabled = 1;
5619 printk("GDT-HA: Flushing all host drives .. ");
5620 for (hanum = 0; hanum < gdth_ctr_count; ++hanum) {
5621 gdth_flush(hanum);
5623 #ifndef __alpha__
5624 /* controller reset */
5625 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
5626 gdtcmd.BoardNode = LOCALBOARD;
5627 gdtcmd.Service = CACHESERVICE;
5628 gdtcmd.OpCode = GDT_RESET;
5629 TRACE2(("gdth_halt(): reset controller %d\n", hanum));
5630 gdth_execute(gdth_ctr_tab[hanum], &gdtcmd, cmnd, 10, NULL);
5631 #endif
5633 printk("Done.\n");
5635 #ifdef GDTH_STATISTICS
5636 del_timer(&gdth_timer);
5637 #endif
5638 return NOTIFY_OK;
5641 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
5642 /* configure lun */
5643 static int gdth_slave_configure(struct scsi_device *sdev)
5645 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
5646 sdev->skip_ms_page_3f = 1;
5647 sdev->skip_ms_page_8 = 1;
5648 return 0;
5650 #endif
5652 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
5653 static struct scsi_host_template driver_template = {
5654 #else
5655 static Scsi_Host_Template driver_template = {
5656 #endif
5657 .proc_name = "gdth",
5658 .proc_info = gdth_proc_info,
5659 .name = "GDT SCSI Disk Array Controller",
5660 .detect = gdth_detect,
5661 .release = gdth_release,
5662 .info = gdth_info,
5663 .queuecommand = gdth_queuecommand,
5664 .eh_bus_reset_handler = gdth_eh_bus_reset,
5665 .bios_param = gdth_bios_param,
5666 .can_queue = GDTH_MAXCMDS,
5667 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
5668 .slave_configure = gdth_slave_configure,
5669 #endif
5670 .this_id = -1,
5671 .sg_tablesize = GDTH_MAXSG,
5672 .cmd_per_lun = GDTH_MAXC_P_L,
5673 .unchecked_isa_dma = 1,
5674 .use_clustering = ENABLE_CLUSTERING,
5675 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
5676 .use_new_eh_code = 1,
5677 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20)
5678 .highmem_io = 1,
5679 #endif
5680 #endif
5683 #include "scsi_module.c"
5684 #ifndef MODULE
5685 __setup("gdth=", option_setup);
5686 #endif