1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Alan Korr
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
33 #include "ata_idle_notify.h"
34 #include "ata-target.h"
36 #define SECTOR_SIZE (512)
38 #define ATA_FEATURE ATA_ERROR
40 #define ATA_STATUS ATA_COMMAND
41 #define ATA_ALT_STATUS ATA_CONTROL
43 #define SELECT_DEVICE1 0x10
44 #define SELECT_LBA 0x40
46 #define CONTROL_nIEN 0x02
47 #define CONTROL_SRST 0x04
49 #define CMD_READ_SECTORS 0x20
50 #define CMD_WRITE_SECTORS 0x30
51 #define CMD_WRITE_SECTORS_EXT 0x34
52 #define CMD_READ_MULTIPLE 0xC4
53 #define CMD_READ_MULTIPLE_EXT 0x29
54 #define CMD_WRITE_MULTIPLE 0xC5
55 #define CMD_SET_MULTIPLE_MODE 0xC6
56 #define CMD_STANDBY_IMMEDIATE 0xE0
57 #define CMD_STANDBY 0xE2
58 #define CMD_IDENTIFY 0xEC
59 #define CMD_SLEEP 0xE6
60 #define CMD_SET_FEATURES 0xEF
61 #define CMD_SECURITY_FREEZE_LOCK 0xF5
66 #define READ_TIMEOUT 5*HZ
68 #ifdef HAVE_ATA_POWER_OFF
69 #define ATA_POWER_OFF_TIMEOUT 2*HZ
72 #ifdef ATA_DRIVER_CLOSE
73 static struct thread_entry
*ata_thread_p
= NULL
;
76 #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
77 /* Hack - what's the deal with 5g? */
80 struct thread_entry
*thread
;
82 volatile unsigned char locked
;
83 IF_COP( struct corelock cl
; )
86 static void ata_lock_init(struct ata_lock
*l
)
88 corelock_init(&l
->cl
);
94 static void ata_lock_lock(struct ata_lock
*l
)
96 struct thread_entry
* const current
= thread_get_current();
98 if (current
== l
->thread
)
104 corelock_lock(&l
->cl
);
106 IF_PRIO( current
->skip_count
= -1; )
108 while (l
->locked
!= 0)
110 corelock_unlock(&l
->cl
);
112 corelock_lock(&l
->cl
);
117 corelock_unlock(&l
->cl
);
120 static void ata_lock_unlock(struct ata_lock
*l
)
128 corelock_lock(&l
->cl
);
130 IF_PRIO( l
->thread
->skip_count
= 0; )
135 corelock_unlock(&l
->cl
);
138 #define mutex ata_lock
139 #define mutex_init ata_lock_init
140 #define mutex_lock ata_lock_lock
141 #define mutex_unlock ata_lock_unlock
142 #endif /* MAX_PHYS_SECTOR_SIZE */
144 static struct mutex ata_mtx SHAREDBSS_ATTR
;
145 static int ata_device
; /* device 0 (master) or 1 (slave) */
147 int ata_spinup_time
= 0;
148 #if (CONFIG_LED == LED_REAL)
149 static bool ata_led_enabled
= true;
150 static bool ata_led_on
= false;
152 static bool spinup
= false;
153 static bool sleeping
= true;
154 static bool poweroff
= false;
155 static long sleep_timeout
= 5*HZ
;
157 static bool lba48
= false; /* set for 48 bit addressing */
159 static long ata_stack
[(DEFAULT_STACK_SIZE
*3)/sizeof(long)];
160 static const char ata_thread_name
[] = "ata";
161 static struct event_queue ata_queue
;
162 static bool initialized
= false;
164 static long last_user_activity
= -1;
165 long last_disk_activity
= -1;
167 static unsigned long total_sectors
;
168 static int multisectors
; /* number of supported multisectors */
169 static unsigned short identify_info
[SECTOR_SIZE
/2];
171 #ifdef MAX_PHYS_SECTOR_SIZE
173 struct sector_cache_entry
{
175 unsigned long sectornum
; /* logical sector */
176 unsigned char data
[MAX_PHYS_SECTOR_SIZE
];
178 /* buffer for reading and writing large physical sectors */
180 static struct sector_cache_entry sector_cache
;
181 static int phys_sector_mult
= 1;
184 static int ata_power_on(void);
185 static int perform_soft_reset(void);
186 static int set_multiple_mode(int sectors
);
187 static int set_features(void);
189 STATICIRAM ICODE_ATTR
int wait_for_bsy(void)
191 long timeout
= current_tick
+ HZ
*30;
195 if (!(ATA_STATUS
& STATUS_BSY
))
197 last_disk_activity
= current_tick
;
199 } while (TIME_BEFORE(current_tick
, timeout
));
201 return 0; /* timeout */
204 STATICIRAM ICODE_ATTR
int wait_for_rdy(void)
211 timeout
= current_tick
+ HZ
*10;
215 if (ATA_ALT_STATUS
& STATUS_RDY
)
217 last_disk_activity
= current_tick
;
219 } while (TIME_BEFORE(current_tick
, timeout
));
221 return 0; /* timeout */
224 STATICIRAM ICODE_ATTR
int wait_for_start_of_transfer(void)
229 return (ATA_ALT_STATUS
& (STATUS_BSY
|STATUS_DRQ
)) == STATUS_DRQ
;
232 STATICIRAM ICODE_ATTR
int wait_for_end_of_transfer(void)
236 return (ATA_ALT_STATUS
& (STATUS_RDY
|STATUS_DRQ
)) == STATUS_RDY
;
239 #if (CONFIG_LED == LED_REAL)
240 /* Conditionally block LED access for the ATA driver, so the LED can be
241 * (mis)used for other purposes */
242 static void ata_led(bool on
)
249 #define ata_led(on) led(on)
252 #ifndef ATA_OPTIMIZED_READING
253 STATICIRAM ICODE_ATTR
void copy_read_sectors(unsigned char* buf
, int wordcount
)
255 unsigned short tmp
= 0;
257 if ( (unsigned long)buf
& 1)
258 { /* not 16-bit aligned, copy byte by byte */
259 unsigned char* bufend
= buf
+ wordcount
*2;
263 #if defined(SWAP_WORDS) || defined(ROCKBOX_LITTLE_ENDIAN)
264 *buf
++ = tmp
& 0xff; /* I assume big endian */
265 *buf
++ = tmp
>> 8; /* and don't use the SWAB16 macro */
270 } while (buf
< bufend
); /* tail loop is faster */
273 { /* 16-bit aligned, can do faster copy */
274 unsigned short* wbuf
= (unsigned short*)buf
;
275 unsigned short* wbufend
= wbuf
+ wordcount
;
279 *wbuf
= swap16(ATA_DATA
);
283 } while (++wbuf
< wbufend
); /* tail loop is faster */
286 #endif /* !ATA_OPTIMIZED_READING */
288 #ifdef MAX_PHYS_SECTOR_SIZE
289 static int _read_sectors(unsigned long start
,
293 int ata_read_sectors(IF_MV2(int drive
,)
305 #ifndef MAX_PHYS_SECTOR_SIZE
306 #ifdef HAVE_MULTIVOLUME
307 (void)drive
; /* unused for now */
309 mutex_lock(&ata_mtx
);
312 if (start
+ incount
> total_sectors
) {
317 last_disk_activity
= current_tick
;
318 spinup_start
= current_tick
;
325 if (ata_power_on()) {
331 if (perform_soft_reset()) {
338 timeout
= current_tick
+ READ_TIMEOUT
;
340 SET_REG(ATA_SELECT
, ata_device
);
350 while (TIME_BEFORE(current_tick
, timeout
)) {
352 last_disk_activity
= current_tick
;
357 SET_REG(ATA_NSECTOR
, count
>> 8);
358 SET_REG(ATA_NSECTOR
, count
& 0xff);
359 SET_REG(ATA_SECTOR
, (start
>> 24) & 0xff); /* 31:24 */
360 SET_REG(ATA_SECTOR
, start
& 0xff); /* 7:0 */
361 SET_REG(ATA_LCYL
, 0); /* 39:32 */
362 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff); /* 15:8 */
363 SET_REG(ATA_HCYL
, 0); /* 47:40 */
364 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff); /* 23:16 */
365 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
366 SET_REG(ATA_COMMAND
, CMD_READ_MULTIPLE_EXT
);
371 SET_REG(ATA_NSECTOR
, count
& 0xff); /* 0 means 256 sectors */
372 SET_REG(ATA_SECTOR
, start
& 0xff);
373 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff);
374 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff);
375 SET_REG(ATA_SELECT
, ((start
>> 24) & 0xf) | SELECT_LBA
| ata_device
);
376 SET_REG(ATA_COMMAND
, CMD_READ_MULTIPLE
);
379 /* wait at least 400ns between writing command and reading status */
380 __asm__
volatile ("nop");
381 __asm__
volatile ("nop");
382 __asm__
volatile ("nop");
383 __asm__
volatile ("nop");
384 __asm__
volatile ("nop");
391 if (!wait_for_start_of_transfer()) {
392 /* We have timed out waiting for RDY and/or DRQ, possibly
393 because the hard drive is shaking and has problems reading
394 the data. We have two options:
396 2) Perform a soft reset and try again.
398 We choose alternative 2.
400 perform_soft_reset();
406 ata_spinup_time
= current_tick
- spinup_start
;
412 /* read the status register exactly once per loop */
415 if (count
>= multisectors
)
416 sectors
= multisectors
;
420 wordcount
= sectors
* SECTOR_SIZE
/ 2;
422 copy_read_sectors(buf
, wordcount
);
425 "Device errors encountered during READ MULTIPLE commands are
426 posted at the beginning of the block or partial block transfer,
427 but the DRQ bit is still set to one and the data transfer shall
428 take place, including transfer of corrupted data, if any."
431 if ( status
& (STATUS_BSY
| STATUS_ERR
| STATUS_DF
) ) {
432 perform_soft_reset();
437 buf
+= sectors
* SECTOR_SIZE
; /* Advance one chunk of sectors */
440 last_disk_activity
= current_tick
;
443 if(!ret
&& !wait_for_end_of_transfer()) {
444 perform_soft_reset();
453 #ifndef MAX_PHYS_SECTOR_SIZE
454 mutex_unlock(&ata_mtx
);
460 #ifndef ATA_OPTIMIZED_WRITING
461 STATICIRAM ICODE_ATTR
void copy_write_sectors(const unsigned char* buf
,
464 if ( (unsigned long)buf
& 1)
465 { /* not 16-bit aligned, copy byte by byte */
466 unsigned short tmp
= 0;
467 const unsigned char* bufend
= buf
+ wordcount
*2;
470 #if defined(SWAP_WORDS) || defined(ROCKBOX_LITTLE_ENDIAN)
471 tmp
= (unsigned short) *buf
++;
472 tmp
|= (unsigned short) *buf
++ << 8;
473 SET_16BITREG(ATA_DATA
, tmp
);
475 tmp
= (unsigned short) *buf
++ << 8;
476 tmp
|= (unsigned short) *buf
++;
477 SET_16BITREG(ATA_DATA
, tmp
);
479 } while (buf
< bufend
); /* tail loop is faster */
482 { /* 16-bit aligned, can do faster copy */
483 unsigned short* wbuf
= (unsigned short*)buf
;
484 unsigned short* wbufend
= wbuf
+ wordcount
;
488 SET_16BITREG(ATA_DATA
, swap16(*wbuf
));
490 SET_16BITREG(ATA_DATA
, *wbuf
);
492 } while (++wbuf
< wbufend
); /* tail loop is faster */
495 #endif /* !ATA_OPTIMIZED_WRITING */
497 #ifdef MAX_PHYS_SECTOR_SIZE
498 static int _write_sectors(unsigned long start
,
502 int ata_write_sectors(IF_MV2(int drive
,)
512 #ifndef MAX_PHYS_SECTOR_SIZE
513 #ifdef HAVE_MULTIVOLUME
514 (void)drive
; /* unused for now */
516 mutex_lock(&ata_mtx
);
519 if (start
+ count
> total_sectors
)
520 panicf("Writing past end of disk");
522 last_disk_activity
= current_tick
;
523 spinup_start
= current_tick
;
530 if (ata_power_on()) {
536 if (perform_soft_reset()) {
543 SET_REG(ATA_SELECT
, ata_device
);
553 SET_REG(ATA_NSECTOR
, count
>> 8);
554 SET_REG(ATA_NSECTOR
, count
& 0xff);
555 SET_REG(ATA_SECTOR
, (start
>> 24) & 0xff); /* 31:24 */
556 SET_REG(ATA_SECTOR
, start
& 0xff); /* 7:0 */
557 SET_REG(ATA_LCYL
, 0); /* 39:32 */
558 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff); /* 15:8 */
559 SET_REG(ATA_HCYL
, 0); /* 47:40 */
560 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff); /* 23:16 */
561 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
562 SET_REG(ATA_COMMAND
, CMD_WRITE_SECTORS_EXT
);
567 SET_REG(ATA_NSECTOR
, count
& 0xff); /* 0 means 256 sectors */
568 SET_REG(ATA_SECTOR
, start
& 0xff);
569 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff);
570 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff);
571 SET_REG(ATA_SELECT
, ((start
>> 24) & 0xf) | SELECT_LBA
| ata_device
);
572 SET_REG(ATA_COMMAND
, CMD_WRITE_SECTORS
);
575 for (i
=0; i
<count
; i
++) {
577 if (!wait_for_start_of_transfer()) {
583 ata_spinup_time
= current_tick
- spinup_start
;
589 copy_write_sectors(buf
, SECTOR_SIZE
/2);
592 /* reading the status register clears the interrupt */
597 last_disk_activity
= current_tick
;
600 if(!ret
&& !wait_for_end_of_transfer()) {
601 DEBUGF("End on transfer failed. -- jyp");
607 #ifndef MAX_PHYS_SECTOR_SIZE
608 mutex_unlock(&ata_mtx
);
614 #ifdef MAX_PHYS_SECTOR_SIZE
615 static int cache_sector(unsigned long sector
)
619 sector
&= ~(phys_sector_mult
- 1);
620 /* round down to physical sector boundary */
622 /* check whether the sector is already cached */
623 if (sector_cache
.inuse
&& (sector_cache
.sectornum
== sector
))
626 /* not found: read the sector */
627 sector_cache
.inuse
= false;
628 rc
= _read_sectors(sector
, phys_sector_mult
, sector_cache
.data
);
631 sector_cache
.sectornum
= sector
;
632 sector_cache
.inuse
= true;
637 static inline int flush_current_sector(void)
639 return _write_sectors(sector_cache
.sectornum
, phys_sector_mult
,
643 int ata_read_sectors(IF_MV2(int drive
,)
651 #ifdef HAVE_MULTIVOLUME
652 (void)drive
; /* unused for now */
654 mutex_lock(&ata_mtx
);
656 offset
= start
& (phys_sector_mult
- 1);
658 if (offset
) /* first partial sector */
660 int partcount
= MIN(incount
, phys_sector_mult
- offset
);
662 rc
= cache_sector(start
);
668 memcpy(inbuf
, sector_cache
.data
+ offset
* SECTOR_SIZE
,
669 partcount
* SECTOR_SIZE
);
672 inbuf
+= partcount
* SECTOR_SIZE
;
673 incount
-= partcount
;
677 offset
= incount
& (phys_sector_mult
- 1);
682 rc
= _read_sectors(start
, incount
, inbuf
);
689 inbuf
+= incount
* SECTOR_SIZE
;
693 rc
= cache_sector(start
);
699 memcpy(inbuf
, sector_cache
.data
, offset
* SECTOR_SIZE
);
704 mutex_unlock(&ata_mtx
);
709 int ata_write_sectors(IF_MV2(int drive
,)
717 #ifdef HAVE_MULTIVOLUME
718 (void)drive
; /* unused for now */
720 mutex_lock(&ata_mtx
);
722 offset
= start
& (phys_sector_mult
- 1);
724 if (offset
) /* first partial sector */
726 int partcount
= MIN(count
, phys_sector_mult
- offset
);
728 rc
= cache_sector(start
);
734 memcpy(sector_cache
.data
+ offset
* SECTOR_SIZE
, buf
,
735 partcount
* SECTOR_SIZE
);
736 rc
= flush_current_sector();
743 buf
+= partcount
* SECTOR_SIZE
;
748 offset
= count
& (phys_sector_mult
- 1);
753 rc
= _write_sectors(start
, count
, buf
);
760 buf
+= count
* SECTOR_SIZE
;
764 rc
= cache_sector(start
);
770 memcpy(sector_cache
.data
, buf
, offset
* SECTOR_SIZE
);
771 rc
= flush_current_sector();
781 mutex_unlock(&ata_mtx
);
785 #endif /* MAX_PHYS_SECTOR_SIZE */
787 static int check_registers(void)
790 if ( ATA_STATUS
& STATUS_BSY
)
793 for (i
= 0; i
<64; i
++) {
794 SET_REG(ATA_NSECTOR
, WRITE_PATTERN1
);
795 SET_REG(ATA_SECTOR
, WRITE_PATTERN2
);
796 SET_REG(ATA_LCYL
, WRITE_PATTERN3
);
797 SET_REG(ATA_HCYL
, WRITE_PATTERN4
);
799 if (((ATA_NSECTOR
& READ_PATTERN1_MASK
) == READ_PATTERN1
) &&
800 ((ATA_SECTOR
& READ_PATTERN2_MASK
) == READ_PATTERN2
) &&
801 ((ATA_LCYL
& READ_PATTERN3_MASK
) == READ_PATTERN3
) &&
802 ((ATA_HCYL
& READ_PATTERN4_MASK
) == READ_PATTERN4
))
808 static int freeze_lock(void)
810 /* does the disk support Security Mode feature set? */
811 if (identify_info
[82] & 2)
813 SET_REG(ATA_SELECT
, ata_device
);
818 SET_REG(ATA_COMMAND
, CMD_SECURITY_FREEZE_LOCK
);
827 void ata_spindown(int seconds
)
829 sleep_timeout
= seconds
* HZ
;
832 bool ata_disk_is_active(void)
837 static int ata_perform_sleep(void)
839 mutex_lock(&ata_mtx
);
841 SET_REG(ATA_SELECT
, ata_device
);
843 if(!wait_for_rdy()) {
844 DEBUGF("ata_perform_sleep() - not RDY\n");
845 mutex_unlock(&ata_mtx
);
849 SET_REG(ATA_COMMAND
, CMD_SLEEP
);
853 DEBUGF("ata_perform_sleep() - CMD failed\n");
854 mutex_unlock(&ata_mtx
);
859 mutex_unlock(&ata_mtx
);
865 queue_post(&ata_queue
, Q_SLEEP
, 0);
868 void ata_sleepnow(void)
870 if (!spinup
&& !sleeping
&& !ata_mtx
.locked
&& initialized
)
872 call_ata_idle_notifys(false);
879 last_user_activity
= current_tick
;
882 static void ata_thread(void)
884 static long last_sleep
= 0;
885 struct queue_event ev
;
886 static long last_seen_mtx_unlock
= 0;
889 queue_wait_w_tmo(&ata_queue
, &ev
, HZ
/2);
893 if (!spinup
&& !sleeping
)
897 if (!last_seen_mtx_unlock
)
898 last_seen_mtx_unlock
= current_tick
;
899 if (TIME_AFTER(current_tick
, last_seen_mtx_unlock
+(HZ
*2)))
901 call_ata_idle_notifys(false);
902 last_seen_mtx_unlock
= 0;
905 if ( sleep_timeout
&&
906 TIME_AFTER( current_tick
,
907 last_user_activity
+ sleep_timeout
) &&
908 TIME_AFTER( current_tick
,
909 last_disk_activity
+ sleep_timeout
) )
911 call_ata_idle_notifys(true);
913 last_sleep
= current_tick
;
917 #ifdef HAVE_ATA_POWER_OFF
918 if ( !spinup
&& sleeping
&& !poweroff
&&
919 TIME_AFTER( current_tick
, last_sleep
+ ATA_POWER_OFF_TIMEOUT
))
921 mutex_lock(&ata_mtx
);
922 ide_power_enable(false);
923 mutex_unlock(&ata_mtx
);
930 case SYS_USB_CONNECTED
:
932 mutex_lock(&ata_mtx
);
936 mutex_unlock(&ata_mtx
);
939 /* Tell the USB thread that we are safe */
940 DEBUGF("ata_thread got SYS_USB_CONNECTED\n");
941 usb_acknowledge(SYS_USB_CONNECTED_ACK
);
943 /* Wait until the USB cable is extracted again */
944 usb_wait_for_disconnect(&ata_queue
);
948 call_ata_idle_notifys(false);
949 last_disk_activity
= current_tick
- sleep_timeout
+ (HZ
/2);
952 #ifdef ATA_DRIVER_CLOSE
960 /* Hardware reset protocol as specified in chapter 9.1, ATA spec draft v5 */
961 int ata_hard_reset(void)
965 mutex_lock(&ata_mtx
);
970 SET_REG(ATA_SELECT
, ata_device
); /* select the right device */
971 ret
= wait_for_bsy();
973 /* Massage the return code so it is 0 on success and -1 on failure */
976 mutex_unlock(&ata_mtx
);
981 static int perform_soft_reset(void)
983 /* If this code is allowed to run on a Nano, the next reads from the flash will
984 * time out, so we disable it. It shouldn't be necessary anyway, since the
985 * ATA -> Flash interface automatically sleeps almost immediately after the
991 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
992 SET_REG(ATA_CONTROL
, CONTROL_nIEN
|CONTROL_SRST
);
993 sleep(1); /* >= 5us */
995 SET_REG(ATA_CONTROL
, CONTROL_nIEN
);
998 /* This little sucker can take up to 30 seconds */
1002 ret
= wait_for_rdy();
1003 } while(!ret
&& retry_count
--);
1011 if (set_multiple_mode(multisectors
))
1020 int ata_soft_reset(void)
1024 mutex_lock(&ata_mtx
);
1026 ret
= perform_soft_reset();
1028 mutex_unlock(&ata_mtx
);
1032 static int ata_power_on(void)
1036 ide_power_enable(true);
1037 sleep(HZ
/4); /* allow voltage to build up */
1039 /* Accessing the PP IDE controller too early after powering up the disk
1040 * makes the core hang for a short time, causing an audio dropout. This
1041 * also depends on the disk; iPod Mini G2 needs at least HZ/5 to get rid
1042 * of the dropout. Since this time isn't additive (the wait_for_bsy() in
1043 * ata_hard_reset() will shortened by the same amount), it's a good idea
1044 * to do this on all HDD based targets. */
1046 if( ata_hard_reset() )
1049 rc
= set_features();
1053 if (set_multiple_mode(multisectors
))
1062 static int master_slave_detect(void)
1065 SET_REG(ATA_SELECT
, 0);
1066 if ( ATA_STATUS
& (STATUS_RDY
|STATUS_BSY
) ) {
1068 DEBUGF("Found master harddisk\n");
1072 SET_REG(ATA_SELECT
, SELECT_DEVICE1
);
1073 if ( ATA_STATUS
& (STATUS_RDY
|STATUS_BSY
) ) {
1074 ata_device
= SELECT_DEVICE1
;
1075 DEBUGF("Found slave harddisk\n");
1083 static int identify(void)
1087 SET_REG(ATA_SELECT
, ata_device
);
1089 if(!wait_for_rdy()) {
1090 DEBUGF("identify() - not RDY\n");
1093 SET_REG(ATA_COMMAND
, CMD_IDENTIFY
);
1095 if (!wait_for_start_of_transfer())
1097 DEBUGF("identify() - CMD failed\n");
1101 for (i
=0; i
<SECTOR_SIZE
/2; i
++) {
1102 /* the IDENTIFY words are already swapped, so we need to treat
1103 this info differently that normal sector data */
1104 #if defined(ROCKBOX_BIG_ENDIAN) && !defined(SWAP_WORDS)
1105 identify_info
[i
] = swap16(ATA_DATA
);
1107 identify_info
[i
] = ATA_DATA
;
1114 static int set_multiple_mode(int sectors
)
1116 SET_REG(ATA_SELECT
, ata_device
);
1118 if(!wait_for_rdy()) {
1119 DEBUGF("set_multiple_mode() - not RDY\n");
1123 SET_REG(ATA_NSECTOR
, sectors
);
1124 SET_REG(ATA_COMMAND
, CMD_SET_MULTIPLE_MODE
);
1126 if (!wait_for_rdy())
1128 DEBUGF("set_multiple_mode() - CMD failed\n");
1135 static int set_features(void)
1138 unsigned char id_word
;
1139 unsigned char id_bit
;
1140 unsigned char subcommand
;
1141 unsigned char parameter
;
1143 { 83, 14, 0x03, 0 }, /* force PIO mode */
1144 { 83, 3, 0x05, 0x80 }, /* adv. power management: lowest w/o standby */
1145 { 83, 9, 0x42, 0x80 }, /* acoustic management: lowest noise */
1146 { 82, 6, 0xaa, 0 }, /* enable read look-ahead */
1151 /* Find out the highest supported PIO mode */
1152 if(identify_info
[64] & 2)
1155 if(identify_info
[64] & 1)
1158 /* Update the table: set highest supported pio mode that we also support */
1159 features
[0].parameter
= 8 + pio_mode
;
1161 SET_REG(ATA_SELECT
, ata_device
);
1163 if (!wait_for_rdy()) {
1164 DEBUGF("set_features() - not RDY\n");
1168 for (i
=0; i
< (int)(sizeof(features
)/sizeof(features
[0])); i
++) {
1169 if (identify_info
[features
[i
].id_word
] & (1 << features
[i
].id_bit
)) {
1170 SET_REG(ATA_FEATURE
, features
[i
].subcommand
);
1171 SET_REG(ATA_NSECTOR
, features
[i
].parameter
);
1172 SET_REG(ATA_COMMAND
, CMD_SET_FEATURES
);
1174 if (!wait_for_rdy()) {
1175 DEBUGF("set_features() - CMD failed\n");
1179 if((ATA_ALT_STATUS
& STATUS_ERR
) && (i
!= 1)) {
1180 /* some CF cards don't like advanced powermanagement
1181 even if they mark it as supported - go figure... */
1182 if(ATA_ERROR
& ERROR_ABRT
) {
1189 #ifdef ATA_SET_DEVICE_FEATURES
1190 ata_set_pio_timings(pio_mode
);
1196 unsigned short* ata_get_identify(void)
1198 return identify_info
;
1201 static int init_and_check(bool hard_reset
)
1207 /* This should reset both master and slave, we don't yet know what's in */
1209 if (ata_hard_reset())
1213 rc
= master_slave_detect();
1217 /* symptom fix: else check_registers() below may fail */
1218 if (hard_reset
&& !wait_for_bsy())
1221 rc
= check_registers();
1233 if ( !initialized
) {
1234 mutex_init(&ata_mtx
);
1235 queue_init(&ata_queue
, true);
1238 mutex_lock(&ata_mtx
);
1240 /* must be called before ata_device_init() */
1241 coldstart
= ata_is_coldstart();
1246 #ifdef MAX_PHYS_SECTOR_SIZE
1247 memset(§or_cache
, 0, sizeof(sector_cache
));
1250 if ( !initialized
) {
1251 /* First call won't have multiple thread contention - this
1252 * may return at any point without having to unlock */
1253 mutex_unlock(&ata_mtx
);
1255 if (!ide_powered()) /* somebody has switched it off */
1257 ide_power_enable(true);
1258 sleep(HZ
/4); /* allow voltage to build up */
1261 /* first try, hard reset at cold start only */
1262 rc
= init_and_check(coldstart
);
1265 { /* failed? -> second try, always with hard reset */
1266 DEBUGF("ata: init failed, retrying...\n");
1267 rc
= init_and_check(true);
1277 multisectors
= identify_info
[47] & 0xff;
1278 if (multisectors
== 0) /* Invalid multisector info, try with 16 */
1281 DEBUGF("ata: %d sectors per ata request\n",multisectors
);
1283 #ifdef MAX_PHYS_SECTOR_SIZE
1284 /* Find out the physical sector size */
1285 if((identify_info
[106] & 0xe000) == 0x6000)
1286 phys_sector_mult
= 1 << (identify_info
[106] & 0x000f);
1288 phys_sector_mult
= 1;
1290 DEBUGF("ata: %d logical sectors per phys sector", phys_sector_mult
);
1292 if (phys_sector_mult
> (MAX_PHYS_SECTOR_SIZE
/SECTOR_SIZE
))
1293 panicf("Unsupported physical sector size: %d",
1294 phys_sector_mult
* SECTOR_SIZE
);
1297 total_sectors
= identify_info
[60] | (identify_info
[61] << 16);
1300 if (identify_info
[83] & 0x0400 /* 48 bit address support */
1301 && total_sectors
== 0x0FFFFFFF) /* and disk size >= 128 GiB */
1302 { /* (needs BigLBA addressing) */
1303 if (identify_info
[102] || identify_info
[103])
1304 panicf("Unsupported disk size: >= 2^32 sectors");
1306 total_sectors
= identify_info
[100] | (identify_info
[101] << 16);
1307 lba48
= true; /* use BigLBA */
1315 rc
= set_features();
1319 mutex_lock(&ata_mtx
); /* Balance unlock below */
1321 last_disk_activity
= current_tick
;
1322 #ifdef ATA_DRIVER_CLOSE
1325 create_thread(ata_thread
, ata_stack
,
1326 sizeof(ata_stack
), 0, ata_thread_name
1327 IF_PRIO(, PRIORITY_USER_INTERFACE
)
1332 rc
= set_multiple_mode(multisectors
);
1336 mutex_unlock(&ata_mtx
);
1340 #ifdef ATA_DRIVER_CLOSE
1341 void ata_close(void)
1343 struct thread_entry
*thread
= ata_thread_p
;
1348 ata_thread_p
= NULL
;
1350 queue_post(&ata_queue
, Q_CLOSE
, 0);
1351 thread_wait(thread
);
1353 #endif /* ATA_DRIVER_CLOSE */
1355 #if (CONFIG_LED == LED_REAL)
1356 void ata_set_led_enabled(bool enabled
)
1358 ata_led_enabled
= enabled
;
1359 if (ata_led_enabled
)