2 pd.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU public license.
5 This is the high-level driver for parallel port IDE hard
6 drives based on chips supported by the paride module.
8 By default, the driver will autoprobe for a single parallel
9 port IDE drive, but if their individual parameters are
10 specified, the driver can handle up to 4 drives.
12 The behaviour of the pd driver can be altered by setting
13 some parameters from the insmod command line. The following
14 parameters are adjustable:
16 drive0 These four arguments can be arrays of
17 drive1 1-8 integers as follows:
19 drive3 <prt>,<pro>,<uni>,<mod>,<geo>,<sby>,<dly>,<slv>
23 <prt> is the base of the parallel port address for
24 the corresponding drive. (required)
26 <pro> is the protocol number for the adapter that
27 supports this drive. These numbers are
28 logged by 'paride' when the protocol modules
29 are initialised. (0 if not given)
31 <uni> for those adapters that support chained
32 devices, this is the unit selector for the
33 chain of devices on the given port. It should
34 be zero for devices that don't support chaining.
37 <mod> this can be -1 to choose the best mode, or one
38 of the mode numbers supported by the adapter.
41 <geo> this defaults to 0 to indicate that the driver
42 should use the CHS geometry provided by the drive
43 itself. If set to 1, the driver will provide
44 a logical geometry with 64 heads and 32 sectors
45 per track, to be consistent with most SCSI
46 drivers. (0 if not given)
48 <sby> set this to zero to disable the power saving
49 standby mode, if needed. (1 if not given)
51 <dly> some parallel ports require the driver to
52 go more slowly. -1 sets a default value that
53 should work with the chosen protocol. Otherwise,
54 set this to a small integer, the larger it is
55 the slower the port i/o. In some cases, setting
56 this to zero will speed up the device. (default -1)
58 <slv> IDE disks can be jumpered to master or slave.
59 Set this to 0 to choose the master drive, 1 to
60 choose the slave, -1 (the default) to choose the
64 major You may use this parameter to overide the
65 default major number (45) that this driver
66 will use. Be sure to change the device
69 name This parameter is a character string that
70 contains the name the kernel will use for this
71 device (in /proc output, for instance).
74 cluster The driver will attempt to aggregate requests
75 for adjacent blocks into larger multi-block
76 clusters. The maximum cluster size (in 512
77 byte sectors) is set with this parameter.
80 verbose This parameter controls the amount of logging
81 that the driver will do. Set it to 0 for
82 normal operation, 1 to see autoprobe progress
83 messages, or 2 to see additional debugging
86 nice This parameter controls the driver's use of
87 idle CPU time, at the expense of some speed.
89 If this driver is built into the kernel, you can use kernel
90 the following command line parameters, with the same values
91 as the corresponding module parameters listed above:
100 In addition, you can use the parameter pd.disable to disable
107 1.01 GRG 1997.01.24 Restored pd_reset()
109 1.02 GRG 1998.05.06 SMP spinlock changes,
111 1.03 GRG 1998.06.16 Eliminate an Ugh.
112 1.04 GRG 1998.08.15 Extra debugging, use HZ in loop timing
113 1.05 GRG 1998.09.24 Added jumbo support
117 #define PD_VERSION "1.05"
122 /* Here are things one can override from the insmod command.
123 Most are autoprobed by paride unless set here. Verbose is off
128 static int verbose
= 0;
129 static int major
= PD_MAJOR
;
130 static char *name
= PD_NAME
;
131 static int cluster
= 64;
133 static int disable
= 0;
135 static int drive0
[8] = {0,0,0,-1,0,1,-1,-1};
136 static int drive1
[8] = {0,0,0,-1,0,1,-1,-1};
137 static int drive2
[8] = {0,0,0,-1,0,1,-1,-1};
138 static int drive3
[8] = {0,0,0,-1,0,1,-1,-1};
140 static int (*drives
[4])[8] = {&drive0
,&drive1
,&drive2
,&drive3
};
141 static int pd_drive_count
;
152 #define DU (*drives[unit])
154 /* end of parameters */
156 #include <linux/module.h>
157 #include <linux/errno.h>
158 #include <linux/fs.h>
159 #include <linux/devfs_fs_kernel.h>
160 #include <linux/kernel.h>
161 #include <linux/delay.h>
162 #include <linux/genhd.h>
163 #include <linux/hdreg.h>
164 #include <linux/cdrom.h> /* for the eject ioctl */
165 #include <linux/spinlock.h>
167 #include <asm/uaccess.h>
173 static STT pd_stt
[7] = {{"drive0",8,drive0
},
177 {"disable",1,&disable
},
178 {"cluster",1,&cluster
},
181 void pd_setup( char *str
, int *ints
)
183 { generic_setup(pd_stt
,7,str
);
188 MODULE_PARM(verbose
,"i");
189 MODULE_PARM(major
,"i");
190 MODULE_PARM(name
,"s");
191 MODULE_PARM(cluster
,"i");
192 MODULE_PARM(nice
,"i");
193 MODULE_PARM(drive0
,"1-8i");
194 MODULE_PARM(drive1
,"1-8i");
195 MODULE_PARM(drive2
,"1-8i");
196 MODULE_PARM(drive3
,"1-8i");
202 /* set up defines for blk.h, why don't all drivers do it this way ? */
204 #define MAJOR_NR major
205 #define DEVICE_NAME "PD"
206 #define DEVICE_REQUEST do_pd_request
207 #define DEVICE_NR(device) (MINOR(device)>>PD_BITS)
208 #define DEVICE_ON(device)
209 #define DEVICE_OFF(device)
211 #include <linux/blk.h>
212 #include <linux/blkpg.h>
216 #define PD_PARTNS (1<<PD_BITS)
217 #define PD_DEVS PD_PARTNS*PD_UNITS
219 /* numbers for "SCSI" geometry */
221 #define PD_LOG_HEADS 64
222 #define PD_LOG_SECTS 32
227 #define PD_MAX_RETRIES 5
228 #define PD_TMO 800 /* interrupt timeout in jiffies */
229 #define PD_SPIN_DEL 50 /* spin delay in micro-seconds */
231 #define PD_SPIN (1000000*PD_TMO)/(HZ*PD_SPIN_DEL)
233 #define STAT_ERR 0x00001
234 #define STAT_INDEX 0x00002
235 #define STAT_ECC 0x00004
236 #define STAT_DRQ 0x00008
237 #define STAT_SEEK 0x00010
238 #define STAT_WRERR 0x00020
239 #define STAT_READY 0x00040
240 #define STAT_BUSY 0x00080
242 #define ERR_AMNF 0x00100
243 #define ERR_TK0NF 0x00200
244 #define ERR_ABRT 0x00400
245 #define ERR_MCR 0x00800
246 #define ERR_IDNF 0x01000
247 #define ERR_MC 0x02000
248 #define ERR_UNC 0x04000
249 #define ERR_TMO 0x10000
251 #define IDE_READ 0x20
252 #define IDE_WRITE 0x30
253 #define IDE_READ_VRFY 0x40
254 #define IDE_INIT_DEV_PARMS 0x91
255 #define IDE_STANDBY 0x96
256 #define IDE_ACKCHANGE 0xdb
257 #define IDE_DOORLOCK 0xde
258 #define IDE_DOORUNLOCK 0xdf
259 #define IDE_IDENTIFY 0xec
260 #define IDE_EJECT 0xed
263 void pd_setup(char * str
, int * ints
);
265 void cleanup_module( void );
267 static int pd_open(struct inode
*inode
, struct file
*file
);
268 static void do_pd_request(request_queue_t
* q
);
269 static int pd_ioctl(struct inode
*inode
,struct file
*file
,
270 unsigned int cmd
, unsigned long arg
);
271 static int pd_release (struct inode
*inode
, struct file
*file
);
272 static int pd_revalidate(kdev_t dev
);
273 static int pd_detect(void);
274 static void do_pd_read(void);
275 static void do_pd_read_start(void);
276 static void do_pd_write(void);
277 static void do_pd_write_start(void);
278 static void do_pd_read_drq( void );
279 static void do_pd_write_done( void );
281 static int pd_identify (int unit
);
282 static void pd_media_check(int unit
);
283 static void pd_doorlock(int unit
, int func
);
284 static int pd_check_media(kdev_t dev
);
285 static void pd_eject( int unit
);
287 static struct hd_struct pd_hd
[PD_DEVS
];
288 static int pd_sizes
[PD_DEVS
];
289 static int pd_blocksizes
[PD_DEVS
];
294 struct pi_adapter pia
; /* interface to paride layer */
295 struct pi_adapter
*pi
;
296 int access
; /* count of active opens ... */
297 int capacity
; /* Size of this volume in sectors */
298 int heads
; /* physical geometry */
301 int drive
; /* master=0 slave=1 */
302 int changed
; /* Have we seen a disk change ? */
303 int removable
; /* removable media device ? */
307 char name
[PD_NAMELEN
]; /* pda, pdb, etc ... */
310 struct pd_unit pd
[PD_UNITS
];
312 /* 'unit' must be defined in all functions - either as a local or a param */
317 static int pd_valid
= 1; /* serialise partition checks */
318 static char pd_scratch
[512]; /* scratch block buffer */
320 /* the variables below are used mainly in the I/O request engine, which
321 processes only one request at a time.
324 static int pd_retries
= 0; /* i/o error retry count */
325 static int pd_busy
= 0; /* request being processed ? */
326 static int pd_block
; /* address of next requested block */
327 static int pd_count
; /* number of blocks still to do */
328 static int pd_run
; /* sectors in current cluster */
329 static int pd_cmd
; /* current command READ/WRITE */
330 static int pd_unit
; /* unit of current request */
331 static int pd_dev
; /* minor of current request */
332 static int pd_poffs
; /* partition offset of current minor */
333 static char * pd_buf
; /* buffer for request in progress */
335 static DECLARE_WAIT_QUEUE_HEAD(pd_wait_open
);
337 static char *pd_errs
[17] = { "ERR","INDEX","ECC","DRQ","SEEK","WRERR",
338 "READY","BUSY","AMNF","TK0NF","ABRT","MCR",
339 "IDNF","MC","UNC","???","TMO"};
341 /* kernel glue structures */
343 extern struct block_device_operations pd_fops
;
345 static struct gendisk pd_gendisk
= {
346 PD_MAJOR
, /* Major number */
347 PD_NAME
, /* Major name */
348 PD_BITS
, /* Bits to shift to get real from partition */
349 PD_PARTNS
, /* Number of partitions per real */
350 pd_hd
, /* hd struct */
351 pd_sizes
, /* block sizes */
355 &pd_fops
, /* block device operations */
358 static struct block_device_operations pd_fops
= {
362 check_media_change
: pd_check_media
,
363 revalidate
: pd_revalidate
366 void pd_init_units( void )
371 for (unit
=0;unit
<PD_UNITS
;unit
++) {
376 PD
.drive
= DU
[D_SLV
];
379 while ((j
< PD_NAMELEN
-2) && (PD
.name
[j
]=name
[j
])) j
++;
380 PD
.name
[j
++] = 'a' + unit
;
382 PD
.alt_geom
= DU
[D_GEO
];
383 PD
.standby
= DU
[D_SBY
];
384 if (DU
[D_PRT
]) pd_drive_count
++;
388 static inline int pd_new_segment(request_queue_t
*q
, struct request
*req
, int max_segments
)
390 if (max_segments
> cluster
)
391 max_segments
= cluster
;
393 if (req
->nr_segments
< max_segments
) {
395 q
->elevator
.nr_segments
++;
401 static int pd_back_merge_fn(request_queue_t
*q
, struct request
*req
,
402 struct buffer_head
*bh
, int max_segments
)
404 if (req
->bhtail
->b_data
+ req
->bhtail
->b_size
== bh
->b_data
)
406 return pd_new_segment(q
, req
, max_segments
);
409 static int pd_front_merge_fn(request_queue_t
*q
, struct request
*req
,
410 struct buffer_head
*bh
, int max_segments
)
412 if (bh
->b_data
+ bh
->b_size
== req
->bh
->b_data
)
414 return pd_new_segment(q
, req
, max_segments
);
417 static int pd_merge_requests_fn(request_queue_t
*q
, struct request
*req
,
418 struct request
*next
, int max_segments
)
420 int total_segments
= req
->nr_segments
+ next
->nr_segments
;
423 if (max_segments
> cluster
)
424 max_segments
= cluster
;
427 if (req
->bhtail
->b_data
+ req
->bhtail
->b_size
== next
->bh
->b_data
) {
432 if (total_segments
> max_segments
)
435 q
->elevator
.nr_segments
-= same_segment
;
436 req
->nr_segments
= total_segments
;
445 if (disable
) return -1;
446 if (devfs_register_blkdev(MAJOR_NR
,name
,&pd_fops
)) {
447 printk("%s: unable to get major number %d\n",
451 q
= BLK_DEFAULT_QUEUE(MAJOR_NR
);
452 blk_init_queue(q
, DEVICE_REQUEST
);
453 q
->back_merge_fn
= pd_back_merge_fn
;
454 q
->front_merge_fn
= pd_front_merge_fn
;
455 q
->merge_requests_fn
= pd_merge_requests_fn
;
456 read_ahead
[MAJOR_NR
] = 8; /* 8 sector (4kB) read ahead */
458 pd_gendisk
.major
= major
;
459 pd_gendisk
.major_name
= name
;
460 pd_gendisk
.next
= gendisk_head
;
461 gendisk_head
= &pd_gendisk
;
463 for(i
=0;i
<PD_DEVS
;i
++) pd_blocksizes
[i
] = 1024;
464 blksize_size
[MAJOR_NR
] = pd_blocksizes
;
466 printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
467 name
,name
,PD_VERSION
,major
,cluster
,nice
);
470 pd_gendisk
.nr_real
= pd_detect();
474 if (!pd_gendisk
.nr_real
) {
482 static int pd_open (struct inode
*inode
, struct file
*file
)
484 { int unit
= DEVICE_NR(inode
->i_rdev
);
486 if ((unit
>= PD_UNITS
) || (!PD
.present
)) return -ENODEV
;
490 wait_event (pd_wait_open
, pd_valid
);
495 pd_media_check(unit
);
496 pd_doorlock(unit
,IDE_DOORLOCK
);
501 static int pd_ioctl(struct inode
*inode
,struct file
*file
,
502 unsigned int cmd
, unsigned long arg
)
504 { struct hd_geometry
*geo
= (struct hd_geometry
*) arg
;
507 if ((!inode
) || (!inode
->i_rdev
)) return -EINVAL
;
508 dev
= MINOR(inode
->i_rdev
);
509 unit
= DEVICE_NR(inode
->i_rdev
);
510 if (dev
>= PD_DEVS
) return -EINVAL
;
511 if (!PD
.present
) return -ENODEV
;
515 if (PD
.access
== 1) pd_eject(unit
);
518 if (!geo
) return -EINVAL
;
519 err
= verify_area(VERIFY_WRITE
,geo
,sizeof(*geo
));
523 put_user(PD
.capacity
/(PD_LOG_HEADS
*PD_LOG_SECTS
),
524 (short *) &geo
->cylinders
);
525 put_user(PD_LOG_HEADS
, (char *) &geo
->heads
);
526 put_user(PD_LOG_SECTS
, (char *) &geo
->sectors
);
528 put_user(PD
.cylinders
, (short *) &geo
->cylinders
);
529 put_user(PD
.heads
, (char *) &geo
->heads
);
530 put_user(PD
.sectors
, (char *) &geo
->sectors
);
532 put_user(pd_hd
[dev
].start_sect
,(long *)&geo
->start
);
535 if (!arg
) return -EINVAL
;
536 err
= verify_area(VERIFY_WRITE
,(long *) arg
,sizeof(long));
537 if (err
) return (err
);
538 put_user(pd_hd
[dev
].nr_sects
,(long *) arg
);
541 if (!capable(CAP_SYS_ADMIN
))
543 return pd_revalidate(inode
->i_rdev
);
550 return blk_ioctl(inode
->i_rdev
, cmd
, arg
);
556 static int pd_release (struct inode
*inode
, struct file
*file
)
561 devp
= inode
->i_rdev
;
562 unit
= DEVICE_NR(devp
);
564 if ((unit
>= PD_UNITS
) || (PD
.access
<= 0))
569 if (!PD
.access
&& PD
.removable
)
570 pd_doorlock(unit
,IDE_DOORUNLOCK
);
577 static int pd_check_media( kdev_t dev
)
581 unit
= DEVICE_NR(dev
);
582 if ((unit
>= PD_UNITS
) || (!PD
.present
)) return -ENODEV
;
583 if (!PD
.removable
) return 0;
584 pd_media_check(unit
);
590 static int pd_revalidate(kdev_t dev
)
592 { int p
, unit
, minor
;
596 struct super_block
*sb
;
598 unit
= DEVICE_NR(dev
);
599 if ((unit
>= PD_UNITS
) || (!PD
.present
)) return -ENODEV
;
604 restore_flags(flags
);
608 restore_flags(flags
);
610 for (p
=(PD_PARTNS
-1);p
>=0;p
--) {
611 minor
= p
+ unit
*PD_PARTNS
;
612 devp
= MKDEV(MAJOR_NR
, minor
);
615 sb
= get_super(devp
);
616 if (sb
) invalidate_inodes(sb
);
618 invalidate_buffers(devp
);
619 pd_hd
[minor
].start_sect
= 0;
620 pd_hd
[minor
].nr_sects
= 0;
623 if (pd_identify(unit
))
624 grok_partitions(&pd_gendisk
,unit
,1<<PD_BITS
,PD
.capacity
);
627 wake_up(&pd_wait_open
);
634 /* Glue for modules ... */
636 void cleanup_module(void);
638 int init_module(void)
643 { extern paride_init();
650 void cleanup_module(void)
652 { struct gendisk
**gdp
;
655 devfs_unregister_blkdev(MAJOR_NR
,name
);
656 for(gdp
=&gendisk_head
;*gdp
;gdp
=&((*gdp
)->next
))
657 if (*gdp
== &pd_gendisk
) break;
658 if (*gdp
) *gdp
= (*gdp
)->next
;
660 for (unit
=0;unit
<PD_UNITS
;unit
++)
661 if (PD
.present
) pi_release(PI
);
666 #define WR(c,r,v) pi_write_regr(PI,c,r,v)
667 #define RR(c,r) (pi_read_regr(PI,c,r))
669 #define DRIVE (0xa0+0x10*PD.drive)
671 /* ide command interface */
673 static void pd_print_error( int unit
, char * msg
, int status
)
677 printk("%s: %s: status = 0x%x =",PD
.name
,msg
,status
);
678 for(i
=0;i
<18;i
++) if (status
& (1<<i
)) printk(" %s",pd_errs
[i
]);
682 static void pd_reset( int unit
) /* called only for MASTER drive */
692 #define DBMSG(msg) ((verbose>1)?(msg):NULL)
694 static int pd_wait_for( int unit
, int w
, char * msg
) /* polled wait */
702 if (((r
& w
) == w
) && !(r
& STAT_BUSY
)) break;
705 e
= (RR(0,1)<<8) + RR(0,7);
706 if (k
>= PD_SPIN
) e
|= ERR_TMO
;
707 if ((e
& (STAT_ERR
|ERR_TMO
)) && (msg
!= NULL
))
708 pd_print_error(unit
,msg
,e
);
712 static void pd_send_command( int unit
, int n
, int s
, int h
,
713 int c0
, int c1
, int func
)
717 WR(0,1,0); /* the IDE task file */
727 static void pd_ide_command( int unit
, int func
, int block
, int count
)
729 /* Don't use this call if the capacity is zero. */
733 s
= ( block
% PD
.sectors
) + 1;
734 h
= ( block
/ PD
.sectors
) % PD
.heads
;
735 c0
= ( block
/ (PD
.sectors
*PD
.heads
)) % 256;
736 c1
= ( block
/ (PD
.sectors
*PD
.heads
*256));
738 pd_send_command(unit
,count
,s
,h
,c0
,c1
,func
);
741 /* According to the ATA standard, the default CHS geometry should be
742 available following a reset. Some Western Digital drives come up
743 in a mode where only LBA addresses are accepted until the device
744 parameters are initialised.
747 static void pd_init_dev_parms( int unit
)
750 pd_wait_for(unit
,0,DBMSG("before init_dev_parms"));
751 pd_send_command(unit
,PD
.sectors
,0,PD
.heads
-1,0,0,IDE_INIT_DEV_PARMS
);
753 pd_wait_for(unit
,0,"Initialise device parameters");
757 static void pd_doorlock( int unit
, int func
)
760 if (pd_wait_for(unit
,STAT_READY
,"Lock") & STAT_ERR
) {
764 pd_send_command(unit
,1,0,0,0,0,func
);
765 pd_wait_for(unit
,STAT_READY
,"Lock done");
769 static void pd_eject( int unit
)
772 pd_wait_for(unit
,0,DBMSG("before unlock on eject"));
773 pd_send_command(unit
,1,0,0,0,0,IDE_DOORUNLOCK
);
774 pd_wait_for(unit
,0,DBMSG("after unlock on eject"));
775 pd_wait_for(unit
,0,DBMSG("before eject"));
776 pd_send_command(unit
,0,0,0,0,0,IDE_EJECT
);
777 pd_wait_for(unit
,0,DBMSG("after eject"));
781 static void pd_media_check( int unit
)
786 r
= pd_wait_for(unit
,STAT_READY
,DBMSG("before media_check"));
787 if (!(r
& STAT_ERR
)) {
788 pd_send_command(unit
,1,1,0,0,0,IDE_READ_VRFY
);
789 r
= pd_wait_for(unit
,STAT_READY
,DBMSG("RDY after READ_VRFY"));
790 } else PD
.changed
= 1; /* say changed if other error */
793 pd_send_command(unit
,1,0,0,0,0,IDE_ACKCHANGE
);
794 pd_wait_for(unit
,STAT_READY
,DBMSG("RDY after ACKCHANGE"));
795 pd_send_command(unit
,1,1,0,0,0,IDE_READ_VRFY
);
796 r
= pd_wait_for(unit
,STAT_READY
,DBMSG("RDY after VRFY"));
802 static void pd_standby_off( int unit
)
805 pd_wait_for(unit
,0,DBMSG("before STANDBY"));
806 pd_send_command(unit
,0,0,0,0,0,IDE_STANDBY
);
807 pd_wait_for(unit
,0,DBMSG("after STANDBY"));
811 #define word_val(n) ((pd_scratch[2*n]&0xff)+256*(pd_scratch[2*n+1]&0xff))
813 static int pd_identify( int unit
)
816 char id
[PD_ID_LEN
+1];
818 /* WARNING: here there may be dragons. reset() applies to both drives,
819 but we call it only on probing the MASTER. This should allow most
820 common configurations to work, but be warned that a reset can clear
821 settings on the SLAVE drive.
824 if (PD
.drive
== 0) pd_reset(unit
);
828 pd_wait_for(unit
,0,DBMSG("before IDENT"));
829 pd_send_command(unit
,1,0,0,0,0,IDE_IDENTIFY
);
831 if (pd_wait_for(unit
,STAT_DRQ
,DBMSG("IDENT DRQ")) & STAT_ERR
) {
835 pi_read_block(PI
,pd_scratch
,512);
837 PD
.sectors
= word_val(6);
838 PD
.heads
= word_val(3);
839 PD
.cylinders
= word_val(1);
840 PD
.capacity
= PD
.sectors
*PD
.heads
*PD
.cylinders
;
842 for(j
=0;j
<PD_ID_LEN
;j
++) id
[j
^1] = pd_scratch
[j
+PD_ID_OFF
];
844 while ((j
>= 0) && (id
[j
] <= 0x20)) j
--;
847 PD
.removable
= (word_val(0) & 0x80);
849 printk("%s: %s, %s, %d blocks [%dM], (%d/%d/%d), %s media\n",
851 PD
.drive
?"slave":"master",
852 PD
.capacity
,PD
.capacity
/2048,
853 PD
.cylinders
,PD
.heads
,PD
.sectors
,
854 PD
.removable
?"removable":"fixed");
856 if (PD
.capacity
) pd_init_dev_parms(unit
);
857 if (!PD
.standby
) pd_standby_off(unit
);
862 static int pd_probe_drive( int unit
)
864 if (PD
.drive
== -1) {
865 for (PD
.drive
=0;PD
.drive
<=1;PD
.drive
++)
866 if (pd_identify(unit
))
870 return pd_identify(unit
);
873 static int pd_detect( void )
878 if (pd_drive_count
== 0) { /* nothing spec'd - so autoprobe for 1 */
880 if (pi_init(PI
,1,-1,-1,-1,-1,-1,pd_scratch
,
881 PI_PD
,verbose
,PD
.name
)) {
882 if (pd_probe_drive(unit
)) {
885 } else pi_release(PI
);
888 } else for (unit
=0;unit
<PD_UNITS
;unit
++) if (DU
[D_PRT
])
889 if (pi_init(PI
,0,DU
[D_PRT
],DU
[D_MOD
],DU
[D_UNI
],
890 DU
[D_PRO
],DU
[D_DLY
],pd_scratch
,
891 PI_PD
,verbose
,PD
.name
)) {
892 if (pd_probe_drive(unit
)) {
895 } else pi_release(PI
);
897 for (unit
=0;unit
<PD_UNITS
;unit
++)
898 register_disk(&pd_gendisk
,MKDEV(MAJOR_NR
,unit
<<PD_BITS
),
900 PD
.present
?PD
.capacity
:0);
902 /* We lie about the number of drives found, as the generic partition
903 scanner assumes that the drives are numbered sequentially from 0.
904 This can result in some bogus error messages if non-sequential
905 drive numbers are used.
909 printk("%s: no valid drive found\n",name
);
913 /* The i/o request engine */
915 static int pd_ready( void )
917 { int unit
= pd_unit
;
919 return (!(RR(1,6) & STAT_BUSY
)) ;
922 static void do_pd_request (request_queue_t
* q
)
924 { struct buffer_head
* bh
;
929 if (QUEUE_EMPTY
|| (CURRENT
->rq_status
== RQ_INACTIVE
)) return;
932 pd_dev
= MINOR(CURRENT
->rq_dev
);
933 pd_unit
= unit
= DEVICE_NR(CURRENT
->rq_dev
);
934 pd_block
= CURRENT
->sector
;
935 pd_run
= CURRENT
->nr_sectors
;
936 pd_count
= CURRENT
->current_nr_sectors
;
940 if ((pd_dev
>= PD_DEVS
) ||
941 ((pd_block
+pd_count
) > pd_hd
[pd_dev
].nr_sects
)) {
946 pd_cmd
= CURRENT
->cmd
;
947 pd_poffs
= pd_hd
[pd_dev
].start_sect
;
948 pd_block
+= pd_poffs
;
949 pd_buf
= CURRENT
->buffer
;
953 if (pd_cmd
== READ
) pi_do_claimed(PI
,do_pd_read
);
954 else if (pd_cmd
== WRITE
) pi_do_claimed(PI
,do_pd_write
);
961 static void pd_next_buf( int unit
)
965 spin_lock_irqsave(&io_request_lock
,saved_flags
);
967 if (!pd_run
) { spin_unlock_irqrestore(&io_request_lock
,saved_flags
);
974 (CURRENT
->cmd
!= pd_cmd
) ||
975 (MINOR(CURRENT
->rq_dev
) != pd_dev
) ||
976 (CURRENT
->rq_status
== RQ_INACTIVE
) ||
977 (CURRENT
->sector
+pd_poffs
!= pd_block
))
978 printk("%s: OUCH: request list changed unexpectedly\n",
981 pd_count
= CURRENT
->current_nr_sectors
;
982 pd_buf
= CURRENT
->buffer
;
983 spin_unlock_irqrestore(&io_request_lock
,saved_flags
);
986 static void do_pd_read( void )
988 { ps_set_intr(do_pd_read_start
,0,0,nice
);
991 static void do_pd_read_start( void )
993 { int unit
= pd_unit
;
999 if (pd_wait_for(unit
,STAT_READY
,"do_pd_read") & STAT_ERR
) {
1001 if (pd_retries
< PD_MAX_RETRIES
) {
1003 pi_do_claimed(PI
,do_pd_read_start
);
1006 spin_lock_irqsave(&io_request_lock
,saved_flags
);
1009 do_pd_request(NULL
);
1010 spin_unlock_irqrestore(&io_request_lock
,saved_flags
);
1013 pd_ide_command(unit
,IDE_READ
,pd_block
,pd_run
);
1014 ps_set_intr(do_pd_read_drq
,pd_ready
,PD_TMO
,nice
);
1017 static void do_pd_read_drq( void )
1019 { int unit
= pd_unit
;
1023 if (pd_wait_for(unit
,STAT_DRQ
,"do_pd_read_drq") & STAT_ERR
) {
1025 if (pd_retries
< PD_MAX_RETRIES
) {
1027 pi_do_claimed(PI
,do_pd_read_start
);
1030 spin_lock_irqsave(&io_request_lock
,saved_flags
);
1033 do_pd_request(NULL
);
1034 spin_unlock_irqrestore(&io_request_lock
,saved_flags
);
1037 pi_read_block(PI
,pd_buf
,512);
1038 pd_count
--; pd_run
--;
1042 if (!pd_count
) pd_next_buf(unit
);
1045 spin_lock_irqsave(&io_request_lock
,saved_flags
);
1048 do_pd_request(NULL
);
1049 spin_unlock_irqrestore(&io_request_lock
,saved_flags
);
1052 static void do_pd_write( void )
1054 { ps_set_intr(do_pd_write_start
,0,0,nice
);
1057 static void do_pd_write_start( void )
1059 { int unit
= pd_unit
;
1065 if (pd_wait_for(unit
,STAT_READY
,"do_pd_write") & STAT_ERR
) {
1067 if (pd_retries
< PD_MAX_RETRIES
) {
1069 pi_do_claimed(PI
,do_pd_write_start
);
1072 spin_lock_irqsave(&io_request_lock
,saved_flags
);
1075 do_pd_request(NULL
);
1076 spin_unlock_irqrestore(&io_request_lock
,saved_flags
);
1079 pd_ide_command(unit
,IDE_WRITE
,pd_block
,pd_run
);
1081 if (pd_wait_for(unit
,STAT_DRQ
,"do_pd_write_drq") & STAT_ERR
) {
1083 if (pd_retries
< PD_MAX_RETRIES
) {
1085 pi_do_claimed(PI
,do_pd_write_start
);
1088 spin_lock_irqsave(&io_request_lock
,saved_flags
);
1091 do_pd_request(NULL
);
1092 spin_unlock_irqrestore(&io_request_lock
,saved_flags
);
1095 pi_write_block(PI
,pd_buf
,512);
1096 pd_count
--; pd_run
--;
1100 if (!pd_count
) pd_next_buf(unit
);
1102 ps_set_intr(do_pd_write_done
,pd_ready
,PD_TMO
,nice
);
1105 static void do_pd_write_done( void )
1107 { int unit
= pd_unit
;
1110 if (pd_wait_for(unit
,STAT_READY
,"do_pd_write_done") & STAT_ERR
) {
1112 if (pd_retries
< PD_MAX_RETRIES
) {
1114 pi_do_claimed(PI
,do_pd_write_start
);
1117 spin_lock_irqsave(&io_request_lock
,saved_flags
);
1120 do_pd_request(NULL
);
1121 spin_unlock_irqrestore(&io_request_lock
,saved_flags
);
1125 spin_lock_irqsave(&io_request_lock
,saved_flags
);
1128 do_pd_request(NULL
);
1129 spin_unlock_irqrestore(&io_request_lock
,saved_flags
);