1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Alan Korr
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
31 #include "ata_idle_notify.h"
32 #include "ata-target.h"
34 #define SECTOR_SIZE (512)
36 #define ATA_FEATURE ATA_ERROR
38 #define ATA_STATUS ATA_COMMAND
39 #define ATA_ALT_STATUS ATA_CONTROL
41 #define SELECT_DEVICE1 0x10
42 #define SELECT_LBA 0x40
44 #define CONTROL_nIEN 0x02
45 #define CONTROL_SRST 0x04
47 #define CMD_READ_SECTORS 0x20
48 #define CMD_WRITE_SECTORS 0x30
49 #define CMD_WRITE_SECTORS_EXT 0x34
50 #define CMD_READ_MULTIPLE 0xC4
51 #define CMD_READ_MULTIPLE_EXT 0x29
52 #define CMD_WRITE_MULTIPLE 0xC5
53 #define CMD_SET_MULTIPLE_MODE 0xC6
54 #define CMD_STANDBY_IMMEDIATE 0xE0
55 #define CMD_STANDBY 0xE2
56 #define CMD_IDENTIFY 0xEC
57 #define CMD_SLEEP 0xE6
58 #define CMD_SET_FEATURES 0xEF
59 #define CMD_SECURITY_FREEZE_LOCK 0xF5
63 #define READ_TIMEOUT 5*HZ
65 #ifdef HAVE_ATA_POWER_OFF
66 #define ATA_POWER_OFF_TIMEOUT 2*HZ
69 #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
70 /* Hack - what's the deal with 5g? */
73 struct thread_entry
*thread
;
75 volatile unsigned char locked
;
76 IF_COP( struct corelock cl
; )
79 static void ata_lock_init(struct ata_lock
*l
)
81 corelock_init(&l
->cl
);
87 static void ata_lock_lock(struct ata_lock
*l
)
89 struct thread_entry
* const current
= thread_get_current();
91 if (current
== l
->thread
)
97 corelock_lock(&l
->cl
);
99 IF_PRIO( current
->skip_count
= -1; )
101 while (l
->locked
!= 0)
103 corelock_unlock(&l
->cl
);
105 corelock_lock(&l
->cl
);
110 corelock_unlock(&l
->cl
);
113 static void ata_lock_unlock(struct ata_lock
*l
)
121 corelock_lock(&l
->cl
);
123 IF_PRIO( l
->thread
->skip_count
= 0; )
128 corelock_unlock(&l
->cl
);
131 #define mutex ata_lock
132 #define mutex_init ata_lock_init
133 #define mutex_lock ata_lock_lock
134 #define mutex_unlock ata_lock_unlock
135 #endif /* MAX_PHYS_SECTOR_SIZE */
137 static struct mutex ata_mtx SHAREDBSS_ATTR
;
138 int ata_device
; /* device 0 (master) or 1 (slave) */
140 int ata_spinup_time
= 0;
141 #if (CONFIG_LED == LED_REAL)
142 static bool ata_led_enabled
= true;
143 static bool ata_led_on
= false;
145 static bool spinup
= false;
146 static bool sleeping
= true;
147 static bool poweroff
= false;
148 static long sleep_timeout
= 5*HZ
;
150 static bool lba48
= false; /* set for 48 bit addressing */
152 static long ata_stack
[(DEFAULT_STACK_SIZE
*3)/sizeof(long)];
153 static const char ata_thread_name
[] = "ata";
154 static struct event_queue ata_queue
;
155 static bool initialized
= false;
157 static long last_user_activity
= -1;
158 long last_disk_activity
= -1;
160 static unsigned long total_sectors
;
161 static int multisectors
; /* number of supported multisectors */
162 static unsigned short identify_info
[SECTOR_SIZE
/2];
164 #ifdef MAX_PHYS_SECTOR_SIZE
166 struct sector_cache_entry
{
168 unsigned long sectornum
; /* logical sector */
169 unsigned char data
[MAX_PHYS_SECTOR_SIZE
];
171 /* buffer for reading and writing large physical sectors */
173 static struct sector_cache_entry sector_cache
;
174 static int phys_sector_mult
= 1;
177 static int ata_power_on(void);
178 static int perform_soft_reset(void);
179 static int set_multiple_mode(int sectors
);
180 static int set_features(void);
182 STATICIRAM
int wait_for_bsy(void) ICODE_ATTR
;
183 STATICIRAM
int wait_for_bsy(void)
185 long timeout
= current_tick
+ HZ
*30;
186 while (TIME_BEFORE(current_tick
, timeout
) && (ATA_STATUS
& STATUS_BSY
)) {
187 last_disk_activity
= current_tick
;
191 if (TIME_BEFORE(current_tick
, timeout
))
194 return 0; /* timeout */
197 STATICIRAM
int wait_for_rdy(void) ICODE_ATTR
;
198 STATICIRAM
int wait_for_rdy(void)
205 timeout
= current_tick
+ HZ
*10;
207 while (TIME_BEFORE(current_tick
, timeout
) &&
208 !(ATA_ALT_STATUS
& STATUS_RDY
)) {
209 last_disk_activity
= current_tick
;
213 if (TIME_BEFORE(current_tick
, timeout
))
216 return 0; /* timeout */
219 STATICIRAM
int wait_for_start_of_transfer(void) ICODE_ATTR
;
220 STATICIRAM
int wait_for_start_of_transfer(void)
225 return (ATA_ALT_STATUS
& (STATUS_BSY
|STATUS_DRQ
)) == STATUS_DRQ
;
228 STATICIRAM
int wait_for_end_of_transfer(void) ICODE_ATTR
;
229 STATICIRAM
int wait_for_end_of_transfer(void)
233 return (ATA_ALT_STATUS
& (STATUS_RDY
|STATUS_DRQ
)) == STATUS_RDY
;
236 #if (CONFIG_LED == LED_REAL)
237 /* Conditionally block LED access for the ATA driver, so the LED can be
238 * (mis)used for other purposes */
239 static void ata_led(bool on
)
246 #define ata_led(on) led(on)
249 #ifndef ATA_OPTIMIZED_READING
250 STATICIRAM
void copy_read_sectors(unsigned char* buf
, int wordcount
) ICODE_ATTR
;
251 STATICIRAM
void copy_read_sectors(unsigned char* buf
, int wordcount
)
253 unsigned short tmp
= 0;
255 if ( (unsigned long)buf
& 1)
256 { /* not 16-bit aligned, copy byte by byte */
257 unsigned char* bufend
= buf
+ wordcount
*2;
261 #if defined(SWAP_WORDS) || defined(ROCKBOX_LITTLE_ENDIAN)
262 *buf
++ = tmp
& 0xff; /* I assume big endian */
263 *buf
++ = tmp
>> 8; /* and don't use the SWAB16 macro */
268 } while (buf
< bufend
); /* tail loop is faster */
271 { /* 16-bit aligned, can do faster copy */
272 unsigned short* wbuf
= (unsigned short*)buf
;
273 unsigned short* wbufend
= wbuf
+ wordcount
;
277 *wbuf
= swap16(ATA_DATA
);
281 } while (++wbuf
< wbufend
); /* tail loop is faster */
284 #endif /* !ATA_OPTIMIZED_READING */
286 #ifdef MAX_PHYS_SECTOR_SIZE
287 static int _read_sectors(unsigned long start
,
291 int ata_read_sectors(IF_MV2(int drive
,)
303 #ifndef MAX_PHYS_SECTOR_SIZE
304 #ifdef HAVE_MULTIVOLUME
305 (void)drive
; /* unused for now */
307 mutex_lock(&ata_mtx
);
310 if (start
+ incount
> total_sectors
) {
315 last_disk_activity
= current_tick
;
316 spinup_start
= current_tick
;
323 if (ata_power_on()) {
329 if (perform_soft_reset()) {
336 timeout
= current_tick
+ READ_TIMEOUT
;
338 SET_REG(ATA_SELECT
, ata_device
);
348 while (TIME_BEFORE(current_tick
, timeout
)) {
350 last_disk_activity
= current_tick
;
355 SET_REG(ATA_NSECTOR
, count
>> 8);
356 SET_REG(ATA_NSECTOR
, count
& 0xff);
357 SET_REG(ATA_SECTOR
, (start
>> 24) & 0xff); /* 31:24 */
358 SET_REG(ATA_SECTOR
, start
& 0xff); /* 7:0 */
359 SET_REG(ATA_LCYL
, 0); /* 39:32 */
360 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff); /* 15:8 */
361 SET_REG(ATA_HCYL
, 0); /* 47:40 */
362 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff); /* 23:16 */
363 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
364 SET_REG(ATA_COMMAND
, CMD_READ_MULTIPLE_EXT
);
369 SET_REG(ATA_NSECTOR
, count
& 0xff); /* 0 means 256 sectors */
370 SET_REG(ATA_SECTOR
, start
& 0xff);
371 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff);
372 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff);
373 SET_REG(ATA_SELECT
, ((start
>> 24) & 0xf) | SELECT_LBA
| ata_device
);
374 SET_REG(ATA_COMMAND
, CMD_READ_MULTIPLE
);
377 /* wait at least 400ns between writing command and reading status */
378 __asm__
volatile ("nop");
379 __asm__
volatile ("nop");
380 __asm__
volatile ("nop");
381 __asm__
volatile ("nop");
382 __asm__
volatile ("nop");
389 if (!wait_for_start_of_transfer()) {
390 /* We have timed out waiting for RDY and/or DRQ, possibly
391 because the hard drive is shaking and has problems reading
392 the data. We have two options:
394 2) Perform a soft reset and try again.
396 We choose alternative 2.
398 perform_soft_reset();
404 ata_spinup_time
= current_tick
- spinup_start
;
410 /* read the status register exactly once per loop */
413 if (count
>= multisectors
)
414 sectors
= multisectors
;
418 wordcount
= sectors
* SECTOR_SIZE
/ 2;
420 copy_read_sectors(buf
, wordcount
);
423 "Device errors encountered during READ MULTIPLE commands are
424 posted at the beginning of the block or partial block transfer,
425 but the DRQ bit is still set to one and the data transfer shall
426 take place, including transfer of corrupted data, if any."
429 if ( status
& (STATUS_BSY
| STATUS_ERR
| STATUS_DF
) ) {
430 perform_soft_reset();
435 buf
+= sectors
* SECTOR_SIZE
; /* Advance one chunk of sectors */
438 last_disk_activity
= current_tick
;
441 if(!ret
&& !wait_for_end_of_transfer()) {
442 perform_soft_reset();
451 #ifndef MAX_PHYS_SECTOR_SIZE
452 mutex_unlock(&ata_mtx
);
458 #ifndef ATA_OPTIMIZED_WRITING
459 STATICIRAM
void copy_write_sectors(const unsigned char* buf
, int wordcount
)
461 STATICIRAM
void copy_write_sectors(const unsigned char* buf
, int wordcount
)
463 if ( (unsigned long)buf
& 1)
464 { /* not 16-bit aligned, copy byte by byte */
465 unsigned short tmp
= 0;
466 const unsigned char* bufend
= buf
+ wordcount
*2;
469 #if defined(SWAP_WORDS) || defined(ROCKBOX_LITTLE_ENDIAN)
470 tmp
= (unsigned short) *buf
++;
471 tmp
|= (unsigned short) *buf
++ << 8;
472 SET_16BITREG(ATA_DATA
, tmp
);
474 tmp
= (unsigned short) *buf
++ << 8;
475 tmp
|= (unsigned short) *buf
++;
476 SET_16BITREG(ATA_DATA
, tmp
);
478 } while (buf
< bufend
); /* tail loop is faster */
481 { /* 16-bit aligned, can do faster copy */
482 unsigned short* wbuf
= (unsigned short*)buf
;
483 unsigned short* wbufend
= wbuf
+ wordcount
;
487 SET_16BITREG(ATA_DATA
, swap16(*wbuf
));
489 SET_16BITREG(ATA_DATA
, *wbuf
);
491 } while (++wbuf
< wbufend
); /* tail loop is faster */
494 #endif /* !ATA_OPTIMIZED_WRITING */
496 #ifdef MAX_PHYS_SECTOR_SIZE
497 static int _write_sectors(unsigned long start
,
501 int ata_write_sectors(IF_MV2(int drive
,)
511 #ifndef MAX_PHYS_SECTOR_SIZE
512 #ifdef HAVE_MULTIVOLUME
513 (void)drive
; /* unused for now */
515 mutex_lock(&ata_mtx
);
518 if (start
+ count
> total_sectors
)
519 panicf("Writing past end of disk");
521 last_disk_activity
= current_tick
;
522 spinup_start
= current_tick
;
529 if (ata_power_on()) {
535 if (perform_soft_reset()) {
542 SET_REG(ATA_SELECT
, ata_device
);
552 SET_REG(ATA_NSECTOR
, count
>> 8);
553 SET_REG(ATA_NSECTOR
, count
& 0xff);
554 SET_REG(ATA_SECTOR
, (start
>> 24) & 0xff); /* 31:24 */
555 SET_REG(ATA_SECTOR
, start
& 0xff); /* 7:0 */
556 SET_REG(ATA_LCYL
, 0); /* 39:32 */
557 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff); /* 15:8 */
558 SET_REG(ATA_HCYL
, 0); /* 47:40 */
559 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff); /* 23:16 */
560 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
561 SET_REG(ATA_COMMAND
, CMD_WRITE_SECTORS_EXT
);
566 SET_REG(ATA_NSECTOR
, count
& 0xff); /* 0 means 256 sectors */
567 SET_REG(ATA_SECTOR
, start
& 0xff);
568 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff);
569 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff);
570 SET_REG(ATA_SELECT
, ((start
>> 24) & 0xf) | SELECT_LBA
| ata_device
);
571 SET_REG(ATA_COMMAND
, CMD_WRITE_SECTORS
);
574 for (i
=0; i
<count
; i
++) {
576 if (!wait_for_start_of_transfer()) {
582 ata_spinup_time
= current_tick
- spinup_start
;
588 copy_write_sectors(buf
, SECTOR_SIZE
/2);
591 /* reading the status register clears the interrupt */
596 last_disk_activity
= current_tick
;
599 if(!ret
&& !wait_for_end_of_transfer()) {
600 DEBUGF("End on transfer failed. -- jyp");
606 #ifndef MAX_PHYS_SECTOR_SIZE
607 mutex_unlock(&ata_mtx
);
613 #ifdef MAX_PHYS_SECTOR_SIZE
614 static int cache_sector(unsigned long sector
)
618 sector
&= ~(phys_sector_mult
- 1);
619 /* round down to physical sector boundary */
621 /* check whether the sector is already cached */
622 if (sector_cache
.inuse
&& (sector_cache
.sectornum
== sector
))
625 /* not found: read the sector */
626 sector_cache
.inuse
= false;
627 rc
= _read_sectors(sector
, phys_sector_mult
, sector_cache
.data
);
630 sector_cache
.sectornum
= sector
;
631 sector_cache
.inuse
= true;
636 static inline int flush_current_sector(void)
638 return _write_sectors(sector_cache
.sectornum
, phys_sector_mult
,
642 int ata_read_sectors(IF_MV2(int drive
,)
650 #ifdef HAVE_MULTIVOLUME
651 (void)drive
; /* unused for now */
653 mutex_lock(&ata_mtx
);
655 offset
= start
& (phys_sector_mult
- 1);
657 if (offset
) /* first partial sector */
659 int partcount
= MIN(incount
, phys_sector_mult
- offset
);
661 rc
= cache_sector(start
);
667 memcpy(inbuf
, sector_cache
.data
+ offset
* SECTOR_SIZE
,
668 partcount
* SECTOR_SIZE
);
671 inbuf
+= partcount
* SECTOR_SIZE
;
672 incount
-= partcount
;
676 offset
= incount
& (phys_sector_mult
- 1);
681 rc
= _read_sectors(start
, incount
, inbuf
);
688 inbuf
+= incount
* SECTOR_SIZE
;
692 rc
= cache_sector(start
);
698 memcpy(inbuf
, sector_cache
.data
, offset
* SECTOR_SIZE
);
703 mutex_unlock(&ata_mtx
);
708 int ata_write_sectors(IF_MV2(int drive
,)
716 #ifdef HAVE_MULTIVOLUME
717 (void)drive
; /* unused for now */
719 mutex_lock(&ata_mtx
);
721 offset
= start
& (phys_sector_mult
- 1);
723 if (offset
) /* first partial sector */
725 int partcount
= MIN(count
, phys_sector_mult
- offset
);
727 rc
= cache_sector(start
);
733 memcpy(sector_cache
.data
+ offset
* SECTOR_SIZE
, buf
,
734 partcount
* SECTOR_SIZE
);
735 rc
= flush_current_sector();
742 buf
+= partcount
* SECTOR_SIZE
;
747 offset
= count
& (phys_sector_mult
- 1);
752 rc
= _write_sectors(start
, count
, buf
);
759 buf
+= count
* SECTOR_SIZE
;
763 rc
= cache_sector(start
);
769 memcpy(sector_cache
.data
, buf
, offset
* SECTOR_SIZE
);
770 rc
= flush_current_sector();
780 mutex_unlock(&ata_mtx
);
784 #endif /* MAX_PHYS_SECTOR_SIZE */
786 static int check_registers(void)
789 if ( ATA_STATUS
& STATUS_BSY
)
792 for (i
= 0; i
<64; i
++) {
793 SET_REG(ATA_NSECTOR
, WRITE_PATTERN1
);
794 SET_REG(ATA_SECTOR
, WRITE_PATTERN2
);
795 SET_REG(ATA_LCYL
, WRITE_PATTERN3
);
796 SET_REG(ATA_HCYL
, WRITE_PATTERN4
);
798 if (((ATA_NSECTOR
& READ_PATTERN1_MASK
) == READ_PATTERN1
) &&
799 ((ATA_SECTOR
& READ_PATTERN2_MASK
) == READ_PATTERN2
) &&
800 ((ATA_LCYL
& READ_PATTERN3_MASK
) == READ_PATTERN3
) &&
801 ((ATA_HCYL
& READ_PATTERN4_MASK
) == READ_PATTERN4
))
807 static int freeze_lock(void)
809 /* does the disk support Security Mode feature set? */
810 if (identify_info
[82] & 2)
812 SET_REG(ATA_SELECT
, ata_device
);
817 SET_REG(ATA_COMMAND
, CMD_SECURITY_FREEZE_LOCK
);
826 void ata_spindown(int seconds
)
828 sleep_timeout
= seconds
* HZ
;
831 bool ata_disk_is_active(void)
836 static int ata_perform_sleep(void)
838 mutex_lock(&ata_mtx
);
840 SET_REG(ATA_SELECT
, ata_device
);
842 if(!wait_for_rdy()) {
843 DEBUGF("ata_perform_sleep() - not RDY\n");
844 mutex_unlock(&ata_mtx
);
848 SET_REG(ATA_COMMAND
, CMD_SLEEP
);
852 DEBUGF("ata_perform_sleep() - CMD failed\n");
853 mutex_unlock(&ata_mtx
);
858 mutex_unlock(&ata_mtx
);
864 queue_post(&ata_queue
, Q_SLEEP
, 0);
867 void ata_sleepnow(void)
869 if (!spinup
&& !sleeping
&& !ata_mtx
.locked
&& initialized
)
871 call_ata_idle_notifys(false);
878 last_user_activity
= current_tick
;
881 static void ata_thread(void)
883 static long last_sleep
= 0;
884 struct queue_event ev
;
885 static long last_seen_mtx_unlock
= 0;
888 queue_wait_w_tmo(&ata_queue
, &ev
, HZ
/2);
892 if (!spinup
&& !sleeping
)
896 if (!last_seen_mtx_unlock
)
897 last_seen_mtx_unlock
= current_tick
;
898 if (TIME_AFTER(current_tick
, last_seen_mtx_unlock
+(HZ
*2)))
900 call_ata_idle_notifys(false);
901 last_seen_mtx_unlock
= 0;
904 if ( sleep_timeout
&&
905 TIME_AFTER( current_tick
,
906 last_user_activity
+ sleep_timeout
) &&
907 TIME_AFTER( current_tick
,
908 last_disk_activity
+ sleep_timeout
) )
910 call_ata_idle_notifys(true);
912 last_sleep
= current_tick
;
916 #ifdef HAVE_ATA_POWER_OFF
917 if ( !spinup
&& sleeping
&& !poweroff
&&
918 TIME_AFTER( current_tick
, last_sleep
+ ATA_POWER_OFF_TIMEOUT
))
920 mutex_lock(&ata_mtx
);
921 ide_power_enable(false);
922 mutex_unlock(&ata_mtx
);
929 case SYS_USB_CONNECTED
:
931 mutex_lock(&ata_mtx
);
935 mutex_unlock(&ata_mtx
);
938 /* Tell the USB thread that we are safe */
939 DEBUGF("ata_thread got SYS_USB_CONNECTED\n");
940 usb_acknowledge(SYS_USB_CONNECTED_ACK
);
942 /* Wait until the USB cable is extracted again */
943 usb_wait_for_disconnect(&ata_queue
);
947 call_ata_idle_notifys(false);
948 last_disk_activity
= current_tick
- sleep_timeout
+ (HZ
/2);
954 /* Hardware reset protocol as specified in chapter 9.1, ATA spec draft v5 */
955 int ata_hard_reset(void)
962 SET_REG(ATA_SELECT
, ata_device
); /* select the right device */
963 ret
= wait_for_bsy();
965 /* Massage the return code so it is 0 on success and -1 on failure */
971 static int perform_soft_reset(void)
973 /* If this code is allowed to run on a Nano, the next reads from the flash will
974 * time out, so we disable it. It shouldn't be necessary anyway, since the
975 * ATA -> Flash interface automatically sleeps almost immediately after the
981 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
982 SET_REG(ATA_CONTROL
, CONTROL_nIEN
|CONTROL_SRST
);
983 sleep(1); /* >= 5us */
985 SET_REG(ATA_CONTROL
, CONTROL_nIEN
);
988 /* This little sucker can take up to 30 seconds */
992 ret
= wait_for_rdy();
993 } while(!ret
&& retry_count
--);
1001 if (set_multiple_mode(multisectors
))
1010 int ata_soft_reset(void)
1014 mutex_lock(&ata_mtx
);
1016 ret
= perform_soft_reset();
1018 mutex_unlock(&ata_mtx
);
1022 static int ata_power_on(void)
1026 ide_power_enable(true);
1027 sleep(HZ
/50); /* allow voltage to build up */
1028 if( ata_hard_reset() )
1031 rc
= set_features();
1035 if (set_multiple_mode(multisectors
))
1044 static int master_slave_detect(void)
1047 SET_REG(ATA_SELECT
, 0);
1048 if ( ATA_STATUS
& (STATUS_RDY
|STATUS_BSY
) ) {
1050 DEBUGF("Found master harddisk\n");
1054 SET_REG(ATA_SELECT
, SELECT_DEVICE1
);
1055 if ( ATA_STATUS
& (STATUS_RDY
|STATUS_BSY
) ) {
1056 ata_device
= SELECT_DEVICE1
;
1057 DEBUGF("Found slave harddisk\n");
1065 static int identify(void)
1069 SET_REG(ATA_SELECT
, ata_device
);
1071 if(!wait_for_rdy()) {
1072 DEBUGF("identify() - not RDY\n");
1075 SET_REG(ATA_COMMAND
, CMD_IDENTIFY
);
1077 if (!wait_for_start_of_transfer())
1079 DEBUGF("identify() - CMD failed\n");
1083 for (i
=0; i
<SECTOR_SIZE
/2; i
++) {
1084 /* the IDENTIFY words are already swapped, so we need to treat
1085 this info differently that normal sector data */
1086 #if defined(ROCKBOX_BIG_ENDIAN) && !defined(SWAP_WORDS)
1087 identify_info
[i
] = swap16(ATA_DATA
);
1089 identify_info
[i
] = ATA_DATA
;
1096 static int set_multiple_mode(int sectors
)
1098 SET_REG(ATA_SELECT
, ata_device
);
1100 if(!wait_for_rdy()) {
1101 DEBUGF("set_multiple_mode() - not RDY\n");
1105 SET_REG(ATA_NSECTOR
, sectors
);
1106 SET_REG(ATA_COMMAND
, CMD_SET_MULTIPLE_MODE
);
1108 if (!wait_for_rdy())
1110 DEBUGF("set_multiple_mode() - CMD failed\n");
1117 static int set_features(void)
1120 unsigned char id_word
;
1121 unsigned char id_bit
;
1122 unsigned char subcommand
;
1123 unsigned char parameter
;
1125 { 83, 14, 0x03, 0 }, /* force PIO mode */
1126 { 83, 3, 0x05, 0x80 }, /* adv. power management: lowest w/o standby */
1127 { 83, 9, 0x42, 0x80 }, /* acoustic management: lowest noise */
1128 { 82, 6, 0xaa, 0 }, /* enable read look-ahead */
1133 /* Find out the highest supported PIO mode */
1134 if(identify_info
[64] & 2)
1137 if(identify_info
[64] & 1)
1140 /* Update the table: set highest supported pio mode that we also support */
1141 features
[0].parameter
= 8 + pio_mode
;
1143 SET_REG(ATA_SELECT
, ata_device
);
1145 if (!wait_for_rdy()) {
1146 DEBUGF("set_features() - not RDY\n");
1150 for (i
=0; i
< (int)(sizeof(features
)/sizeof(features
[0])); i
++) {
1151 if (identify_info
[features
[i
].id_word
] & (1 << features
[i
].id_bit
)) {
1152 SET_REG(ATA_FEATURE
, features
[i
].subcommand
);
1153 SET_REG(ATA_NSECTOR
, features
[i
].parameter
);
1154 SET_REG(ATA_COMMAND
, CMD_SET_FEATURES
);
1156 if (!wait_for_rdy()) {
1157 DEBUGF("set_features() - CMD failed\n");
1161 if((ATA_ALT_STATUS
& STATUS_ERR
) && (i
!= 1)) {
1162 /* some CF cards don't like advanced powermanagement
1163 even if they mark it as supported - go figure... */
1164 if(ATA_ERROR
& ERROR_ABRT
) {
1174 unsigned short* ata_get_identify(void)
1176 return identify_info
;
1179 static int init_and_check(bool hard_reset
)
1185 /* This should reset both master and slave, we don't yet know what's in */
1187 if (ata_hard_reset())
1191 rc
= master_slave_detect();
1195 /* symptom fix: else check_registers() below may fail */
1196 if (hard_reset
&& !wait_for_bsy())
1199 rc
= check_registers();
1211 if ( !initialized
) {
1212 mutex_init(&ata_mtx
);
1213 queue_init(&ata_queue
, true);
1216 mutex_lock(&ata_mtx
);
1218 /* must be called before ata_device_init() */
1219 coldstart
= ata_is_coldstart();
1224 #ifdef MAX_PHYS_SECTOR_SIZE
1225 memset(§or_cache
, 0, sizeof(sector_cache
));
1228 if ( !initialized
) {
1229 /* First call won't have multiple thread contention - this
1230 * may return at any point without having to unlock */
1231 mutex_unlock(&ata_mtx
);
1233 if (!ide_powered()) /* somebody has switched it off */
1235 ide_power_enable(true);
1236 sleep(HZ
/50); /* allow voltage to build up */
1239 /* first try, hard reset at cold start only */
1240 rc
= init_and_check(coldstart
);
1243 { /* failed? -> second try, always with hard reset */
1244 DEBUGF("ata: init failed, retrying...\n");
1245 rc
= init_and_check(true);
1255 multisectors
= identify_info
[47] & 0xff;
1256 if (multisectors
== 0) /* Invalid multisector info, try with 16 */
1259 DEBUGF("ata: %d sectors per ata request\n",multisectors
);
1261 #ifdef MAX_PHYS_SECTOR_SIZE
1262 /* Find out the physical sector size */
1263 if((identify_info
[106] & 0xe000) == 0x6000)
1264 phys_sector_mult
= 1 << (identify_info
[106] & 0x000f);
1266 phys_sector_mult
= 1;
1268 DEBUGF("ata: %d logical sectors per phys sector", phys_sector_mult
);
1270 if (phys_sector_mult
> (MAX_PHYS_SECTOR_SIZE
/SECTOR_SIZE
))
1271 panicf("Unsupported physical sector size: %d",
1272 phys_sector_mult
* SECTOR_SIZE
);
1275 total_sectors
= identify_info
[60] | (identify_info
[61] << 16);
1278 if (identify_info
[83] & 0x0400 /* 48 bit address support */
1279 && total_sectors
== 0x0FFFFFFF) /* and disk size >= 128 GiB */
1280 { /* (needs BigLBA addressing) */
1281 if (identify_info
[102] || identify_info
[103])
1282 panicf("Unsupported disk size: >= 2^32 sectors");
1284 total_sectors
= identify_info
[100] | (identify_info
[101] << 16);
1285 lba48
= true; /* use BigLBA */
1293 rc
= set_features();
1297 mutex_lock(&ata_mtx
); /* Balance unlock below */
1299 last_disk_activity
= current_tick
;
1300 create_thread(ata_thread
, ata_stack
,
1301 sizeof(ata_stack
), 0, ata_thread_name
1302 IF_PRIO(, PRIORITY_USER_INTERFACE
)
1307 rc
= set_multiple_mode(multisectors
);
1311 mutex_unlock(&ata_mtx
);
1315 #if (CONFIG_LED == LED_REAL)
1316 void ata_set_led_enabled(bool enabled
)
1318 ata_led_enabled
= enabled
;
1319 if (ata_led_enabled
)