1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Alan Korr
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
33 #include "ata_idle_notify.h"
34 #include "ata-target.h"
37 #define SECTOR_SIZE (512)
39 #define ATA_FEATURE ATA_ERROR
41 #define ATA_STATUS ATA_COMMAND
42 #define ATA_ALT_STATUS ATA_CONTROL
44 #define SELECT_DEVICE1 0x10
45 #define SELECT_LBA 0x40
47 #define CONTROL_nIEN 0x02
48 #define CONTROL_SRST 0x04
50 #define CMD_READ_SECTORS 0x20
51 #define CMD_WRITE_SECTORS 0x30
52 #define CMD_WRITE_SECTORS_EXT 0x34
53 #define CMD_READ_MULTIPLE 0xC4
54 #define CMD_READ_MULTIPLE_EXT 0x29
55 #define CMD_WRITE_MULTIPLE 0xC5
56 #define CMD_WRITE_MULTIPLE_EXT 0x39
57 #define CMD_SET_MULTIPLE_MODE 0xC6
58 #define CMD_STANDBY_IMMEDIATE 0xE0
59 #define CMD_STANDBY 0xE2
60 #define CMD_IDENTIFY 0xEC
61 #define CMD_SLEEP 0xE6
62 #define CMD_SET_FEATURES 0xEF
63 #define CMD_SECURITY_FREEZE_LOCK 0xF5
65 #define CMD_READ_DMA 0xC8
66 #define CMD_READ_DMA_EXT 0x25
67 #define CMD_WRITE_DMA 0xCA
68 #define CMD_WRITE_DMA_EXT 0x35
71 /* Should all be < 0x100 (which are reserved for control messages) */
75 #define READWRITE_TIMEOUT 5*HZ
77 #ifdef HAVE_ATA_POWER_OFF
78 #define ATA_POWER_OFF_TIMEOUT 2*HZ
81 #ifdef ATA_DRIVER_CLOSE
82 static unsigned int ata_thread_id
= 0;
85 #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
86 /* Hack - what's the deal with 5g? */
89 struct thread_entry
*thread
;
91 volatile unsigned char locked
;
92 IF_COP( struct corelock cl
; )
95 static void ata_lock_init(struct ata_lock
*l
)
97 corelock_init(&l
->cl
);
103 static void ata_lock_lock(struct ata_lock
*l
)
105 struct thread_entry
* const current
=
106 thread_id_entry(THREAD_ID_CURRENT
);
108 if (current
== l
->thread
)
114 corelock_lock(&l
->cl
);
116 IF_PRIO( current
->skip_count
= -1; )
118 while (l
->locked
!= 0)
120 corelock_unlock(&l
->cl
);
122 corelock_lock(&l
->cl
);
127 corelock_unlock(&l
->cl
);
130 static void ata_lock_unlock(struct ata_lock
*l
)
138 corelock_lock(&l
->cl
);
140 IF_PRIO( l
->thread
->skip_count
= 0; )
145 corelock_unlock(&l
->cl
);
148 #define mutex ata_lock
149 #define mutex_init ata_lock_init
150 #define mutex_lock ata_lock_lock
151 #define mutex_unlock ata_lock_unlock
152 #endif /* MAX_PHYS_SECTOR_SIZE */
154 #if defined(HAVE_USBSTACK) && defined(USE_ROCKBOX_USB)
155 #define ALLOW_USB_SPINDOWN
158 static struct mutex ata_mtx SHAREDBSS_ATTR
;
159 static int ata_device
; /* device 0 (master) or 1 (slave) */
161 static int spinup_time
= 0;
162 #if (CONFIG_LED == LED_REAL)
163 static bool ata_led_enabled
= true;
164 static bool ata_led_on
= false;
166 static bool spinup
= false;
167 static bool sleeping
= true;
168 static bool poweroff
= false;
169 static long sleep_timeout
= 5*HZ
;
171 static bool lba48
= false; /* set for 48 bit addressing */
173 static long ata_stack
[(DEFAULT_STACK_SIZE
*3)/sizeof(long)];
174 static const char ata_thread_name
[] = "ata";
175 static struct event_queue ata_queue SHAREDBSS_ATTR
;
176 static bool initialized
= false;
178 static long last_user_activity
= -1;
179 static long last_disk_activity
= -1;
181 static unsigned long total_sectors
;
182 static int multisectors
; /* number of supported multisectors */
183 static unsigned short identify_info
[SECTOR_SIZE
/2];
185 #ifdef MAX_PHYS_SECTOR_SIZE
187 struct sector_cache_entry
{
189 unsigned long sectornum
; /* logical sector */
190 unsigned char data
[MAX_PHYS_SECTOR_SIZE
];
192 /* buffer for reading and writing large physical sectors */
194 static struct sector_cache_entry sector_cache
;
195 static int phys_sector_mult
= 1;
199 static int dma_mode
= 0;
202 static int ata_power_on(void);
203 static int perform_soft_reset(void);
204 static int set_multiple_mode(int sectors
);
205 static int set_features(void);
207 STATICIRAM ICODE_ATTR
int wait_for_bsy(void)
209 long timeout
= current_tick
+ HZ
*30;
213 if (!(ATA_STATUS
& STATUS_BSY
))
215 last_disk_activity
= current_tick
;
217 } while (TIME_BEFORE(current_tick
, timeout
));
219 return 0; /* timeout */
222 STATICIRAM ICODE_ATTR
int wait_for_rdy(void)
229 timeout
= current_tick
+ HZ
*10;
233 if (ATA_ALT_STATUS
& STATUS_RDY
)
235 last_disk_activity
= current_tick
;
237 } while (TIME_BEFORE(current_tick
, timeout
));
239 return 0; /* timeout */
242 STATICIRAM ICODE_ATTR
int wait_for_start_of_transfer(void)
247 return (ATA_ALT_STATUS
& (STATUS_BSY
|STATUS_DRQ
)) == STATUS_DRQ
;
250 STATICIRAM ICODE_ATTR
int wait_for_end_of_transfer(void)
254 return (ATA_ALT_STATUS
&
255 (STATUS_BSY
|STATUS_RDY
|STATUS_DF
|STATUS_DRQ
|STATUS_ERR
))
259 #if (CONFIG_LED == LED_REAL)
260 /* Conditionally block LED access for the ATA driver, so the LED can be
261 * (mis)used for other purposes */
262 static void ata_led(bool on
)
269 #define ata_led(on) led(on)
272 #ifndef ATA_OPTIMIZED_READING
273 STATICIRAM ICODE_ATTR
void copy_read_sectors(unsigned char* buf
, int wordcount
)
275 unsigned short tmp
= 0;
277 if ( (unsigned long)buf
& 1)
278 { /* not 16-bit aligned, copy byte by byte */
279 unsigned char* bufend
= buf
+ wordcount
*2;
283 #if defined(SWAP_WORDS) || defined(ROCKBOX_LITTLE_ENDIAN)
284 *buf
++ = tmp
& 0xff; /* I assume big endian */
285 *buf
++ = tmp
>> 8; /* and don't use the SWAB16 macro */
290 } while (buf
< bufend
); /* tail loop is faster */
293 { /* 16-bit aligned, can do faster copy */
294 unsigned short* wbuf
= (unsigned short*)buf
;
295 unsigned short* wbufend
= wbuf
+ wordcount
;
299 *wbuf
= swap16(ATA_DATA
);
303 } while (++wbuf
< wbufend
); /* tail loop is faster */
306 #endif /* !ATA_OPTIMIZED_READING */
308 #ifndef ATA_OPTIMIZED_WRITING
309 STATICIRAM ICODE_ATTR
void copy_write_sectors(const unsigned char* buf
,
312 if ( (unsigned long)buf
& 1)
313 { /* not 16-bit aligned, copy byte by byte */
314 unsigned short tmp
= 0;
315 const unsigned char* bufend
= buf
+ wordcount
*2;
318 #if defined(SWAP_WORDS) || defined(ROCKBOX_LITTLE_ENDIAN)
319 tmp
= (unsigned short) *buf
++;
320 tmp
|= (unsigned short) *buf
++ << 8;
321 SET_16BITREG(ATA_DATA
, tmp
);
323 tmp
= (unsigned short) *buf
++ << 8;
324 tmp
|= (unsigned short) *buf
++;
325 SET_16BITREG(ATA_DATA
, tmp
);
327 } while (buf
< bufend
); /* tail loop is faster */
330 { /* 16-bit aligned, can do faster copy */
331 unsigned short* wbuf
= (unsigned short*)buf
;
332 unsigned short* wbufend
= wbuf
+ wordcount
;
336 SET_16BITREG(ATA_DATA
, swap16(*wbuf
));
338 SET_16BITREG(ATA_DATA
, *wbuf
);
340 } while (++wbuf
< wbufend
); /* tail loop is faster */
343 #endif /* !ATA_OPTIMIZED_WRITING */
345 static int ata_transfer_sectors(unsigned long start
,
359 #ifndef MAX_PHYS_SECTOR_SIZE
360 mutex_lock(&ata_mtx
);
363 if (start
+ incount
> total_sectors
) {
368 last_disk_activity
= current_tick
;
369 spinup_start
= current_tick
;
376 if (ata_power_on()) {
382 if (perform_soft_reset()) {
389 timeout
= current_tick
+ READWRITE_TIMEOUT
;
391 SET_REG(ATA_SELECT
, ata_device
);
401 while (TIME_BEFORE(current_tick
, timeout
)) {
403 last_disk_activity
= current_tick
;
406 /* If DMA is supported and parameters are ok for DMA, use it */
407 if (dma_mode
&& ata_dma_setup(inbuf
, incount
* SECTOR_SIZE
, write
))
414 SET_REG(ATA_NSECTOR
, count
>> 8);
415 SET_REG(ATA_NSECTOR
, count
& 0xff);
416 SET_REG(ATA_SECTOR
, (start
>> 24) & 0xff); /* 31:24 */
417 SET_REG(ATA_SECTOR
, start
& 0xff); /* 7:0 */
418 SET_REG(ATA_LCYL
, 0); /* 39:32 */
419 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff); /* 15:8 */
420 SET_REG(ATA_HCYL
, 0); /* 47:40 */
421 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff); /* 23:16 */
422 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
425 SET_REG(ATA_COMMAND
, usedma
? CMD_WRITE_DMA_EXT
: CMD_WRITE_MULTIPLE_EXT
);
427 SET_REG(ATA_COMMAND
, usedma
? CMD_READ_DMA_EXT
: CMD_READ_MULTIPLE_EXT
);
429 SET_REG(ATA_COMMAND
, write
? CMD_WRITE_MULTIPLE_EXT
: CMD_READ_MULTIPLE_EXT
);
435 SET_REG(ATA_NSECTOR
, count
& 0xff); /* 0 means 256 sectors */
436 SET_REG(ATA_SECTOR
, start
& 0xff);
437 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff);
438 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff);
439 SET_REG(ATA_SELECT
, ((start
>> 24) & 0xf) | SELECT_LBA
| ata_device
);
442 SET_REG(ATA_COMMAND
, usedma
? CMD_WRITE_DMA
: CMD_WRITE_MULTIPLE
);
444 SET_REG(ATA_COMMAND
, usedma
? CMD_READ_DMA
: CMD_READ_MULTIPLE
);
446 SET_REG(ATA_COMMAND
, write
? CMD_WRITE_MULTIPLE
: CMD_READ_MULTIPLE
);
450 /* wait at least 400ns between writing command and reading status */
451 __asm__
volatile ("nop");
452 __asm__
volatile ("nop");
453 __asm__
volatile ("nop");
454 __asm__
volatile ("nop");
455 __asm__
volatile ("nop");
459 if (!ata_dma_finish())
463 perform_soft_reset();
468 spinup_time
= current_tick
- spinup_start
;
475 #endif /* HAVE_ATA_DMA */
483 if (!wait_for_start_of_transfer()) {
484 /* We have timed out waiting for RDY and/or DRQ, possibly
485 because the hard drive is shaking and has problems
486 reading the data. We have two options:
488 2) Perform a soft reset and try again.
490 We choose alternative 2.
492 perform_soft_reset();
498 spinup_time
= current_tick
- spinup_start
;
504 /* read the status register exactly once per loop */
508 if (count
>= multisectors
)
509 sectors
= multisectors
;
513 wordcount
= sectors
* SECTOR_SIZE
/ 2;
516 copy_write_sectors(buf
, wordcount
);
518 copy_read_sectors(buf
, wordcount
);
521 "Device errors encountered during READ MULTIPLE commands
522 are posted at the beginning of the block or partial block
523 transfer, but the DRQ bit is still set to one and the data
524 transfer shall take place, including transfer of corrupted
528 if ( status
& (STATUS_BSY
| STATUS_ERR
| STATUS_DF
) ) {
529 perform_soft_reset();
531 /* no point retrying IDNF, sector no. was invalid */
532 if (error
& ERROR_IDNF
)
537 buf
+= sectors
* SECTOR_SIZE
; /* Advance one chunk of sectors */
540 last_disk_activity
= current_tick
;
544 if(!ret
&& !wait_for_end_of_transfer()) {
548 perform_soft_reset();
550 /* no point retrying IDNF, sector no. was invalid */
551 if (error
& ERROR_IDNF
)
560 #ifndef MAX_PHYS_SECTOR_SIZE
561 mutex_unlock(&ata_mtx
);
567 #ifndef MAX_PHYS_SECTOR_SIZE
568 int ata_read_sectors(IF_MD2(int drive
,)
573 #ifdef HAVE_MULTIDRIVE
574 (void)drive
; /* unused for now */
577 return ata_transfer_sectors(start
, incount
, inbuf
, false);
581 #ifndef MAX_PHYS_SECTOR_SIZE
582 int ata_write_sectors(IF_MD2(int drive
,)
587 #ifdef HAVE_MULTIDRIVE
588 (void)drive
; /* unused for now */
591 return ata_transfer_sectors(start
, count
, (void*)buf
, true);
595 #ifdef MAX_PHYS_SECTOR_SIZE
596 static int cache_sector(unsigned long sector
)
600 sector
&= ~(phys_sector_mult
- 1);
601 /* round down to physical sector boundary */
603 /* check whether the sector is already cached */
604 if (sector_cache
.inuse
&& (sector_cache
.sectornum
== sector
))
607 /* not found: read the sector */
608 sector_cache
.inuse
= false;
609 rc
= ata_transfer_sectors(sector
, phys_sector_mult
, sector_cache
.data
, false);
612 sector_cache
.sectornum
= sector
;
613 sector_cache
.inuse
= true;
618 static inline int flush_current_sector(void)
620 return ata_transfer_sectors(sector_cache
.sectornum
, phys_sector_mult
,
621 sector_cache
.data
, true);
624 int ata_read_sectors(IF_MD2(int drive
,)
632 #ifdef HAVE_MULTIDRIVE
633 (void)drive
; /* unused for now */
635 mutex_lock(&ata_mtx
);
637 offset
= start
& (phys_sector_mult
- 1);
639 if (offset
) /* first partial sector */
641 int partcount
= MIN(incount
, phys_sector_mult
- offset
);
643 rc
= cache_sector(start
);
649 memcpy(inbuf
, sector_cache
.data
+ offset
* SECTOR_SIZE
,
650 partcount
* SECTOR_SIZE
);
653 inbuf
+= partcount
* SECTOR_SIZE
;
654 incount
-= partcount
;
658 offset
= incount
& (phys_sector_mult
- 1);
663 rc
= ata_transfer_sectors(start
, incount
, inbuf
, false);
670 inbuf
+= incount
* SECTOR_SIZE
;
674 rc
= cache_sector(start
);
680 memcpy(inbuf
, sector_cache
.data
, offset
* SECTOR_SIZE
);
685 mutex_unlock(&ata_mtx
);
690 int ata_write_sectors(IF_MD2(int drive
,)
698 #ifdef HAVE_MULTIDRIVE
699 (void)drive
; /* unused for now */
701 mutex_lock(&ata_mtx
);
703 offset
= start
& (phys_sector_mult
- 1);
705 if (offset
) /* first partial sector */
707 int partcount
= MIN(count
, phys_sector_mult
- offset
);
709 rc
= cache_sector(start
);
715 memcpy(sector_cache
.data
+ offset
* SECTOR_SIZE
, buf
,
716 partcount
* SECTOR_SIZE
);
717 rc
= flush_current_sector();
724 buf
+= partcount
* SECTOR_SIZE
;
729 offset
= count
& (phys_sector_mult
- 1);
734 rc
= ata_transfer_sectors(start
, count
, (void*)buf
, true);
741 buf
+= count
* SECTOR_SIZE
;
745 rc
= cache_sector(start
);
751 memcpy(sector_cache
.data
, buf
, offset
* SECTOR_SIZE
);
752 rc
= flush_current_sector();
762 mutex_unlock(&ata_mtx
);
766 #endif /* MAX_PHYS_SECTOR_SIZE */
768 static int check_registers(void)
771 if ( ATA_STATUS
& STATUS_BSY
)
774 for (i
= 0; i
<64; i
++) {
775 SET_REG(ATA_NSECTOR
, WRITE_PATTERN1
);
776 SET_REG(ATA_SECTOR
, WRITE_PATTERN2
);
777 SET_REG(ATA_LCYL
, WRITE_PATTERN3
);
778 SET_REG(ATA_HCYL
, WRITE_PATTERN4
);
780 if (((ATA_NSECTOR
& READ_PATTERN1_MASK
) == READ_PATTERN1
) &&
781 ((ATA_SECTOR
& READ_PATTERN2_MASK
) == READ_PATTERN2
) &&
782 ((ATA_LCYL
& READ_PATTERN3_MASK
) == READ_PATTERN3
) &&
783 ((ATA_HCYL
& READ_PATTERN4_MASK
) == READ_PATTERN4
))
789 static int freeze_lock(void)
791 /* does the disk support Security Mode feature set? */
792 if (identify_info
[82] & 2)
794 SET_REG(ATA_SELECT
, ata_device
);
799 SET_REG(ATA_COMMAND
, CMD_SECURITY_FREEZE_LOCK
);
808 void ata_spindown(int seconds
)
810 sleep_timeout
= seconds
* HZ
;
813 bool ata_disk_is_active(void)
818 static int ata_perform_sleep(void)
820 mutex_lock(&ata_mtx
);
822 SET_REG(ATA_SELECT
, ata_device
);
824 if(!wait_for_rdy()) {
825 DEBUGF("ata_perform_sleep() - not RDY\n");
826 mutex_unlock(&ata_mtx
);
830 SET_REG(ATA_COMMAND
, CMD_SLEEP
);
834 DEBUGF("ata_perform_sleep() - CMD failed\n");
835 mutex_unlock(&ata_mtx
);
840 mutex_unlock(&ata_mtx
);
846 queue_post(&ata_queue
, Q_SLEEP
, 0);
849 void ata_sleepnow(void)
851 if (!spinup
&& !sleeping
&& !ata_mtx
.locked
&& initialized
)
853 call_storage_idle_notifys(false);
860 last_user_activity
= current_tick
;
863 static void ata_thread(void)
865 static long last_sleep
= 0;
866 struct queue_event ev
;
867 static long last_seen_mtx_unlock
= 0;
868 #ifdef ALLOW_USB_SPINDOWN
869 static bool usb_mode
= false;
873 queue_wait_w_tmo(&ata_queue
, &ev
, HZ
/2);
877 if (!spinup
&& !sleeping
)
881 if (!last_seen_mtx_unlock
)
882 last_seen_mtx_unlock
= current_tick
;
883 if (TIME_AFTER(current_tick
, last_seen_mtx_unlock
+(HZ
*2)))
885 #ifdef ALLOW_USB_SPINDOWN
889 call_storage_idle_notifys(false);
891 last_seen_mtx_unlock
= 0;
894 if ( sleep_timeout
&&
895 TIME_AFTER( current_tick
,
896 last_user_activity
+ sleep_timeout
) &&
897 TIME_AFTER( current_tick
,
898 last_disk_activity
+ sleep_timeout
) )
900 #ifdef ALLOW_USB_SPINDOWN
904 call_storage_idle_notifys(true);
907 last_sleep
= current_tick
;
911 #ifdef HAVE_ATA_POWER_OFF
912 if ( !spinup
&& sleeping
&& !poweroff
&&
913 TIME_AFTER( current_tick
, last_sleep
+ ATA_POWER_OFF_TIMEOUT
))
915 mutex_lock(&ata_mtx
);
916 ide_power_enable(false);
918 mutex_unlock(&ata_mtx
);
924 case SYS_USB_CONNECTED
:
925 /* Tell the USB thread that we are safe */
926 DEBUGF("ata_thread got SYS_USB_CONNECTED\n");
927 #ifdef ALLOW_USB_SPINDOWN
929 usb_acknowledge(SYS_USB_CONNECTED_ACK
);
930 /* There is no need to force ATA power on */
933 mutex_lock(&ata_mtx
);
940 perform_soft_reset();
944 mutex_unlock(&ata_mtx
);
947 /* Wait until the USB cable is extracted again */
948 usb_acknowledge(SYS_USB_CONNECTED_ACK
);
949 usb_wait_for_disconnect(&ata_queue
);
953 #ifdef ALLOW_USB_SPINDOWN
954 case SYS_USB_DISCONNECTED
:
955 /* Tell the USB thread that we are ready again */
956 DEBUGF("ata_thread got SYS_USB_DISCONNECTED\n");
957 usb_acknowledge(SYS_USB_DISCONNECTED_ACK
);
961 #endif /* USB_NONE */
964 #ifdef ALLOW_USB_SPINDOWN
968 call_storage_idle_notifys(false);
970 last_disk_activity
= current_tick
- sleep_timeout
+ (HZ
/2);
973 #ifdef ATA_DRIVER_CLOSE
981 /* Hardware reset protocol as specified in chapter 9.1, ATA spec draft v5 */
982 static int ata_hard_reset(void)
986 mutex_lock(&ata_mtx
);
991 SET_REG(ATA_SELECT
, ata_device
); /* select the right device */
992 ret
= wait_for_bsy();
994 /* Massage the return code so it is 0 on success and -1 on failure */
997 mutex_unlock(&ata_mtx
);
1002 static int perform_soft_reset(void)
1004 /* If this code is allowed to run on a Nano, the next reads from the flash will
1005 * time out, so we disable it. It shouldn't be necessary anyway, since the
1006 * ATA -> Flash interface automatically sleeps almost immediately after the
1012 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
1013 SET_REG(ATA_CONTROL
, CONTROL_nIEN
|CONTROL_SRST
);
1014 sleep(1); /* >= 5us */
1017 /* DMA requires INTRQ be enabled */
1018 SET_REG(ATA_CONTROL
, 0);
1020 SET_REG(ATA_CONTROL
, CONTROL_nIEN
);
1022 sleep(1); /* >2ms */
1024 /* This little sucker can take up to 30 seconds */
1028 ret
= wait_for_rdy();
1029 } while(!ret
&& retry_count
--);
1037 if (set_multiple_mode(multisectors
))
1046 int ata_soft_reset(void)
1050 mutex_lock(&ata_mtx
);
1052 ret
= perform_soft_reset();
1054 mutex_unlock(&ata_mtx
);
1058 static int ata_power_on(void)
1062 ide_power_enable(true);
1063 sleep(HZ
/4); /* allow voltage to build up */
1065 /* Accessing the PP IDE controller too early after powering up the disk
1066 * makes the core hang for a short time, causing an audio dropout. This
1067 * also depends on the disk; iPod Mini G2 needs at least HZ/5 to get rid
1068 * of the dropout. Since this time isn't additive (the wait_for_bsy() in
1069 * ata_hard_reset() will shortened by the same amount), it's a good idea
1070 * to do this on all HDD based targets. */
1072 if( ata_hard_reset() )
1075 rc
= set_features();
1079 if (set_multiple_mode(multisectors
))
1088 static int master_slave_detect(void)
1091 SET_REG(ATA_SELECT
, 0);
1092 if ( ATA_STATUS
& (STATUS_RDY
|STATUS_BSY
) ) {
1094 DEBUGF("Found master harddisk\n");
1098 SET_REG(ATA_SELECT
, SELECT_DEVICE1
);
1099 if ( ATA_STATUS
& (STATUS_RDY
|STATUS_BSY
) ) {
1100 ata_device
= SELECT_DEVICE1
;
1101 DEBUGF("Found slave harddisk\n");
1109 static int identify(void)
1113 SET_REG(ATA_SELECT
, ata_device
);
1115 if(!wait_for_rdy()) {
1116 DEBUGF("identify() - not RDY\n");
1119 SET_REG(ATA_COMMAND
, CMD_IDENTIFY
);
1121 if (!wait_for_start_of_transfer())
1123 DEBUGF("identify() - CMD failed\n");
1127 for (i
=0; i
<SECTOR_SIZE
/2; i
++) {
1128 /* the IDENTIFY words are already swapped, so we need to treat
1129 this info differently that normal sector data */
1130 #if defined(ROCKBOX_BIG_ENDIAN) && !defined(SWAP_WORDS)
1131 identify_info
[i
] = swap16(ATA_DATA
);
1133 identify_info
[i
] = ATA_DATA
;
1140 static int set_multiple_mode(int sectors
)
1142 SET_REG(ATA_SELECT
, ata_device
);
1144 if(!wait_for_rdy()) {
1145 DEBUGF("set_multiple_mode() - not RDY\n");
1149 SET_REG(ATA_NSECTOR
, sectors
);
1150 SET_REG(ATA_COMMAND
, CMD_SET_MULTIPLE_MODE
);
1152 if (!wait_for_rdy())
1154 DEBUGF("set_multiple_mode() - CMD failed\n");
1162 static int get_best_mode(unsigned short identword
, int max
, int modetype
)
1164 unsigned short testbit
= BIT_N(max
);
1167 if (identword
& testbit
)
1168 return max
| modetype
;
1177 static int set_features(void)
1180 unsigned char id_word
;
1181 unsigned char id_bit
;
1182 unsigned char subcommand
;
1183 unsigned char parameter
;
1185 { 83, 14, 0x03, 0 }, /* force PIO mode */
1186 { 83, 3, 0x05, 0x80 }, /* adv. power management: lowest w/o standby */
1187 { 83, 9, 0x42, 0x80 }, /* acoustic management: lowest noise */
1188 { 82, 6, 0xaa, 0 }, /* enable read look-ahead */
1190 { 0, 0, 0x03, 0 }, /* DMA mode */
1196 /* Find out the highest supported PIO mode */
1197 if(identify_info
[64] & 2)
1200 if(identify_info
[64] & 1)
1203 /* Update the table: set highest supported pio mode that we also support */
1204 features
[0].parameter
= 8 + pio_mode
;
1207 if (identify_info
[53] & (1<<2))
1208 /* Ultra DMA mode info present, find a mode */
1209 dma_mode
= get_best_mode(identify_info
[88], ATA_MAX_UDMA
, 0x40);
1212 /* No UDMA mode found, try to find a multi-word DMA mode */
1213 dma_mode
= get_best_mode(identify_info
[63], ATA_MAX_MWDMA
, 0x20);
1214 features
[4].id_word
= 63;
1217 features
[4].id_word
= 88;
1219 features
[4].id_bit
= dma_mode
& 7;
1220 features
[4].parameter
= dma_mode
;
1221 #endif /* HAVE_ATA_DMA */
1223 SET_REG(ATA_SELECT
, ata_device
);
1225 if (!wait_for_rdy()) {
1226 DEBUGF("set_features() - not RDY\n");
1230 for (i
=0; i
< (int)(sizeof(features
)/sizeof(features
[0])); i
++) {
1231 if (identify_info
[features
[i
].id_word
] & BIT_N(features
[i
].id_bit
)) {
1232 SET_REG(ATA_FEATURE
, features
[i
].subcommand
);
1233 SET_REG(ATA_NSECTOR
, features
[i
].parameter
);
1234 SET_REG(ATA_COMMAND
, CMD_SET_FEATURES
);
1236 if (!wait_for_rdy()) {
1237 DEBUGF("set_features() - CMD failed\n");
1241 if((ATA_ALT_STATUS
& STATUS_ERR
) && (i
!= 1)) {
1242 /* some CF cards don't like advanced powermanagement
1243 even if they mark it as supported - go figure... */
1244 if(ATA_ERROR
& ERROR_ABRT
) {
1251 #ifdef ATA_SET_DEVICE_FEATURES
1252 ata_set_pio_timings(pio_mode
);
1256 ata_dma_set_mode(dma_mode
);
1262 unsigned short* ata_get_identify(void)
1264 return identify_info
;
1267 static int init_and_check(bool hard_reset
)
1273 /* This should reset both master and slave, we don't yet know what's in */
1275 if (ata_hard_reset())
1279 rc
= master_slave_detect();
1283 /* symptom fix: else check_registers() below may fail */
1284 if (hard_reset
&& !wait_for_bsy())
1287 rc
= check_registers();
1299 if ( !initialized
) {
1300 mutex_init(&ata_mtx
);
1301 queue_init(&ata_queue
, true);
1304 mutex_lock(&ata_mtx
);
1306 /* must be called before ata_device_init() */
1307 coldstart
= ata_is_coldstart();
1312 #ifdef MAX_PHYS_SECTOR_SIZE
1313 memset(§or_cache
, 0, sizeof(sector_cache
));
1316 if ( !initialized
) {
1317 /* First call won't have multiple thread contention - this
1318 * may return at any point without having to unlock */
1319 mutex_unlock(&ata_mtx
);
1321 if (!ide_powered()) /* somebody has switched it off */
1323 ide_power_enable(true);
1324 sleep(HZ
/4); /* allow voltage to build up */
1328 /* DMA requires INTRQ be enabled */
1329 SET_REG(ATA_CONTROL
, 0);
1332 /* first try, hard reset at cold start only */
1333 rc
= init_and_check(coldstart
);
1336 { /* failed? -> second try, always with hard reset */
1337 DEBUGF("ata: init failed, retrying...\n");
1338 rc
= init_and_check(true);
1348 multisectors
= identify_info
[47] & 0xff;
1349 if (multisectors
== 0) /* Invalid multisector info, try with 16 */
1352 DEBUGF("ata: %d sectors per ata request\n",multisectors
);
1354 total_sectors
= identify_info
[60] | (identify_info
[61] << 16);
1357 if (identify_info
[83] & 0x0400 /* 48 bit address support */
1358 && total_sectors
== 0x0FFFFFFF) /* and disk size >= 128 GiB */
1359 { /* (needs BigLBA addressing) */
1360 if (identify_info
[102] || identify_info
[103])
1361 panicf("Unsupported disk size: >= 2^32 sectors");
1363 total_sectors
= identify_info
[100] | (identify_info
[101] << 16);
1364 lba48
= true; /* use BigLBA */
1372 rc
= set_features();
1376 #ifdef MAX_PHYS_SECTOR_SIZE
1377 /* Find out the physical sector size */
1378 if((identify_info
[106] & 0xe000) == 0x6000)
1379 phys_sector_mult
= BIT_N(identify_info
[106] & 0x000f);
1381 phys_sector_mult
= 1;
1383 DEBUGF("ata: %d logical sectors per phys sector", phys_sector_mult
);
1385 if (phys_sector_mult
> 1)
1387 /* Check if drive really needs emulation - if we can access
1388 * sector 1 then assume the drive will handle it better than
1389 * us, and ignore the large physical sectors.
1391 char throwaway
[SECTOR_SIZE
];
1392 rc
= ata_transfer_sectors(1, 1, &throwaway
, false);
1394 phys_sector_mult
= 1;
1397 if (phys_sector_mult
> (MAX_PHYS_SECTOR_SIZE
/SECTOR_SIZE
))
1398 panicf("Unsupported physical sector size: %d",
1399 phys_sector_mult
* SECTOR_SIZE
);
1402 mutex_lock(&ata_mtx
); /* Balance unlock below */
1404 last_disk_activity
= current_tick
;
1405 #ifdef ATA_DRIVER_CLOSE
1408 create_thread(ata_thread
, ata_stack
,
1409 sizeof(ata_stack
), 0, ata_thread_name
1410 IF_PRIO(, PRIORITY_USER_INTERFACE
)
1415 rc
= set_multiple_mode(multisectors
);
1419 mutex_unlock(&ata_mtx
);
1423 #ifdef ATA_DRIVER_CLOSE
1424 void ata_close(void)
1426 unsigned int thread_id
= ata_thread_id
;
1433 queue_post(&ata_queue
, Q_CLOSE
, 0);
1434 thread_wait(thread_id
);
1436 #endif /* ATA_DRIVER_CLOSE */
1438 #if (CONFIG_LED == LED_REAL)
1439 void ata_set_led_enabled(bool enabled
)
1441 ata_led_enabled
= enabled
;
1442 if (ata_led_enabled
)
1449 long ata_last_disk_activity(void)
1451 return last_disk_activity
;
1454 int ata_spinup_time(void)
1459 #ifdef STORAGE_GET_INFO
1460 void ata_get_info(IF_MD2(int drive
,)struct storage_info
*info
)
1462 unsigned short *src
,*dest
;
1463 static char vendor
[8];
1464 static char product
[16];
1465 static char revision
[4];
1466 #ifdef HAVE_MULTIDRIVE
1467 (void)drive
; /* unused for now */
1470 info
->sector_size
= SECTOR_SIZE
;
1471 info
->num_sectors
= total_sectors
;
1473 src
= (unsigned short*)&identify_info
[27];
1474 dest
= (unsigned short*)vendor
;
1476 dest
[i
] = htobe16(src
[i
]);
1477 info
->vendor
=vendor
;
1479 src
= (unsigned short*)&identify_info
[31];
1480 dest
= (unsigned short*)product
;
1482 dest
[i
] = htobe16(src
[i
]);
1483 info
->product
=product
;
1485 src
= (unsigned short*)&identify_info
[23];
1486 dest
= (unsigned short*)revision
;
1488 dest
[i
] = htobe16(src
[i
]);
1489 info
->revision
=revision
;
1494 /* Returns last DMA mode as set by set_features() */
1495 int ata_get_dma_mode(void)
1500 /* Needed to allow updating while waiting for DMA to complete */
1501 void ata_keep_active(void)
1503 last_disk_activity
= current_tick
;
1507 #ifdef CONFIG_STORAGE_MULTI
1508 int ata_num_drives(int first_drive
)
1510 /* We don't care which logical drive number(s) we have been assigned */