1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Alan Korr
12 * All files in this archive are subject to the GNU General Public License.
13 * See the file COPYING in the source tree root for full license agreement.
15 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
16 * KIND, either express or implied.
18 ****************************************************************************/
31 #include "ata_idle_notify.h"
32 #include "ata-target.h"
34 #define SECTOR_SIZE (512)
36 #define ATA_FEATURE ATA_ERROR
38 #define ATA_STATUS ATA_COMMAND
39 #define ATA_ALT_STATUS ATA_CONTROL
41 #define SELECT_DEVICE1 0x10
42 #define SELECT_LBA 0x40
44 #define CONTROL_nIEN 0x02
45 #define CONTROL_SRST 0x04
47 #define CMD_READ_SECTORS 0x20
48 #define CMD_WRITE_SECTORS 0x30
49 #define CMD_WRITE_SECTORS_EXT 0x34
50 #define CMD_READ_MULTIPLE 0xC4
51 #define CMD_READ_MULTIPLE_EXT 0x29
52 #define CMD_WRITE_MULTIPLE 0xC5
53 #define CMD_SET_MULTIPLE_MODE 0xC6
54 #define CMD_STANDBY_IMMEDIATE 0xE0
55 #define CMD_STANDBY 0xE2
56 #define CMD_IDENTIFY 0xEC
57 #define CMD_SLEEP 0xE6
58 #define CMD_SET_FEATURES 0xEF
59 #define CMD_SECURITY_FREEZE_LOCK 0xF5
64 #define READ_TIMEOUT 5*HZ
66 #ifdef HAVE_ATA_POWER_OFF
67 #define ATA_POWER_OFF_TIMEOUT 2*HZ
70 #ifdef ATA_DRIVER_CLOSE
71 static struct thread_entry
*ata_thread_p
= NULL
;
74 #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
75 /* Hack - what's the deal with 5g? */
78 struct thread_entry
*thread
;
80 volatile unsigned char locked
;
81 IF_COP( struct corelock cl
; )
84 static void ata_lock_init(struct ata_lock
*l
)
86 corelock_init(&l
->cl
);
92 static void ata_lock_lock(struct ata_lock
*l
)
94 struct thread_entry
* const current
= thread_get_current();
96 if (current
== l
->thread
)
102 corelock_lock(&l
->cl
);
104 IF_PRIO( current
->skip_count
= -1; )
106 while (l
->locked
!= 0)
108 corelock_unlock(&l
->cl
);
110 corelock_lock(&l
->cl
);
115 corelock_unlock(&l
->cl
);
118 static void ata_lock_unlock(struct ata_lock
*l
)
126 corelock_lock(&l
->cl
);
128 IF_PRIO( l
->thread
->skip_count
= 0; )
133 corelock_unlock(&l
->cl
);
136 #define mutex ata_lock
137 #define mutex_init ata_lock_init
138 #define mutex_lock ata_lock_lock
139 #define mutex_unlock ata_lock_unlock
140 #endif /* MAX_PHYS_SECTOR_SIZE */
142 static struct mutex ata_mtx SHAREDBSS_ATTR
;
143 int ata_device
; /* device 0 (master) or 1 (slave) */
145 int ata_spinup_time
= 0;
146 #if (CONFIG_LED == LED_REAL)
147 static bool ata_led_enabled
= true;
148 static bool ata_led_on
= false;
150 static bool spinup
= false;
151 static bool sleeping
= true;
152 static bool poweroff
= false;
153 static long sleep_timeout
= 5*HZ
;
155 static bool lba48
= false; /* set for 48 bit addressing */
157 static long ata_stack
[(DEFAULT_STACK_SIZE
*3)/sizeof(long)];
158 static const char ata_thread_name
[] = "ata";
159 static struct event_queue ata_queue
;
160 static bool initialized
= false;
162 static long last_user_activity
= -1;
163 long last_disk_activity
= -1;
165 static unsigned long total_sectors
;
166 static int multisectors
; /* number of supported multisectors */
167 static unsigned short identify_info
[SECTOR_SIZE
/2];
169 #ifdef MAX_PHYS_SECTOR_SIZE
171 struct sector_cache_entry
{
173 unsigned long sectornum
; /* logical sector */
174 unsigned char data
[MAX_PHYS_SECTOR_SIZE
];
176 /* buffer for reading and writing large physical sectors */
178 static struct sector_cache_entry sector_cache
;
179 static int phys_sector_mult
= 1;
182 static int ata_power_on(void);
183 static int perform_soft_reset(void);
184 static int set_multiple_mode(int sectors
);
185 static int set_features(void);
187 STATICIRAM ICODE_ATTR
int wait_for_bsy(void)
189 long timeout
= current_tick
+ HZ
*30;
193 if (!(ATA_STATUS
& STATUS_BSY
))
195 last_disk_activity
= current_tick
;
197 } while (TIME_BEFORE(current_tick
, timeout
));
199 return 0; /* timeout */
202 STATICIRAM ICODE_ATTR
int wait_for_rdy(void)
209 timeout
= current_tick
+ HZ
*10;
213 if (ATA_ALT_STATUS
& STATUS_RDY
)
215 last_disk_activity
= current_tick
;
217 } while (TIME_BEFORE(current_tick
, timeout
));
219 return 0; /* timeout */
222 STATICIRAM ICODE_ATTR
int wait_for_start_of_transfer(void)
227 return (ATA_ALT_STATUS
& (STATUS_BSY
|STATUS_DRQ
)) == STATUS_DRQ
;
230 STATICIRAM ICODE_ATTR
int wait_for_end_of_transfer(void)
234 return (ATA_ALT_STATUS
& (STATUS_RDY
|STATUS_DRQ
)) == STATUS_RDY
;
237 #if (CONFIG_LED == LED_REAL)
238 /* Conditionally block LED access for the ATA driver, so the LED can be
239 * (mis)used for other purposes */
240 static void ata_led(bool on
)
247 #define ata_led(on) led(on)
250 #ifndef ATA_OPTIMIZED_READING
251 STATICIRAM ICODE_ATTR
void copy_read_sectors(unsigned char* buf
, int wordcount
)
253 unsigned short tmp
= 0;
255 if ( (unsigned long)buf
& 1)
256 { /* not 16-bit aligned, copy byte by byte */
257 unsigned char* bufend
= buf
+ wordcount
*2;
261 #if defined(SWAP_WORDS) || defined(ROCKBOX_LITTLE_ENDIAN)
262 *buf
++ = tmp
& 0xff; /* I assume big endian */
263 *buf
++ = tmp
>> 8; /* and don't use the SWAB16 macro */
268 } while (buf
< bufend
); /* tail loop is faster */
271 { /* 16-bit aligned, can do faster copy */
272 unsigned short* wbuf
= (unsigned short*)buf
;
273 unsigned short* wbufend
= wbuf
+ wordcount
;
277 *wbuf
= swap16(ATA_DATA
);
281 } while (++wbuf
< wbufend
); /* tail loop is faster */
284 #endif /* !ATA_OPTIMIZED_READING */
286 #ifdef MAX_PHYS_SECTOR_SIZE
287 static int _read_sectors(unsigned long start
,
291 int ata_read_sectors(IF_MV2(int drive
,)
303 #ifndef MAX_PHYS_SECTOR_SIZE
304 #ifdef HAVE_MULTIVOLUME
305 (void)drive
; /* unused for now */
307 mutex_lock(&ata_mtx
);
310 if (start
+ incount
> total_sectors
) {
315 last_disk_activity
= current_tick
;
316 spinup_start
= current_tick
;
323 if (ata_power_on()) {
329 if (perform_soft_reset()) {
336 timeout
= current_tick
+ READ_TIMEOUT
;
338 SET_REG(ATA_SELECT
, ata_device
);
348 while (TIME_BEFORE(current_tick
, timeout
)) {
350 last_disk_activity
= current_tick
;
355 SET_REG(ATA_NSECTOR
, count
>> 8);
356 SET_REG(ATA_NSECTOR
, count
& 0xff);
357 SET_REG(ATA_SECTOR
, (start
>> 24) & 0xff); /* 31:24 */
358 SET_REG(ATA_SECTOR
, start
& 0xff); /* 7:0 */
359 SET_REG(ATA_LCYL
, 0); /* 39:32 */
360 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff); /* 15:8 */
361 SET_REG(ATA_HCYL
, 0); /* 47:40 */
362 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff); /* 23:16 */
363 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
364 SET_REG(ATA_COMMAND
, CMD_READ_MULTIPLE_EXT
);
369 SET_REG(ATA_NSECTOR
, count
& 0xff); /* 0 means 256 sectors */
370 SET_REG(ATA_SECTOR
, start
& 0xff);
371 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff);
372 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff);
373 SET_REG(ATA_SELECT
, ((start
>> 24) & 0xf) | SELECT_LBA
| ata_device
);
374 SET_REG(ATA_COMMAND
, CMD_READ_MULTIPLE
);
377 /* wait at least 400ns between writing command and reading status */
378 __asm__
volatile ("nop");
379 __asm__
volatile ("nop");
380 __asm__
volatile ("nop");
381 __asm__
volatile ("nop");
382 __asm__
volatile ("nop");
389 if (!wait_for_start_of_transfer()) {
390 /* We have timed out waiting for RDY and/or DRQ, possibly
391 because the hard drive is shaking and has problems reading
392 the data. We have two options:
394 2) Perform a soft reset and try again.
396 We choose alternative 2.
398 perform_soft_reset();
404 ata_spinup_time
= current_tick
- spinup_start
;
410 /* read the status register exactly once per loop */
413 if (count
>= multisectors
)
414 sectors
= multisectors
;
418 wordcount
= sectors
* SECTOR_SIZE
/ 2;
420 copy_read_sectors(buf
, wordcount
);
423 "Device errors encountered during READ MULTIPLE commands are
424 posted at the beginning of the block or partial block transfer,
425 but the DRQ bit is still set to one and the data transfer shall
426 take place, including transfer of corrupted data, if any."
429 if ( status
& (STATUS_BSY
| STATUS_ERR
| STATUS_DF
) ) {
430 perform_soft_reset();
435 buf
+= sectors
* SECTOR_SIZE
; /* Advance one chunk of sectors */
438 last_disk_activity
= current_tick
;
441 if(!ret
&& !wait_for_end_of_transfer()) {
442 perform_soft_reset();
451 #ifndef MAX_PHYS_SECTOR_SIZE
452 mutex_unlock(&ata_mtx
);
458 #ifndef ATA_OPTIMIZED_WRITING
459 STATICIRAM ICODE_ATTR
void copy_write_sectors(const unsigned char* buf
,
462 if ( (unsigned long)buf
& 1)
463 { /* not 16-bit aligned, copy byte by byte */
464 unsigned short tmp
= 0;
465 const unsigned char* bufend
= buf
+ wordcount
*2;
468 #if defined(SWAP_WORDS) || defined(ROCKBOX_LITTLE_ENDIAN)
469 tmp
= (unsigned short) *buf
++;
470 tmp
|= (unsigned short) *buf
++ << 8;
471 SET_16BITREG(ATA_DATA
, tmp
);
473 tmp
= (unsigned short) *buf
++ << 8;
474 tmp
|= (unsigned short) *buf
++;
475 SET_16BITREG(ATA_DATA
, tmp
);
477 } while (buf
< bufend
); /* tail loop is faster */
480 { /* 16-bit aligned, can do faster copy */
481 unsigned short* wbuf
= (unsigned short*)buf
;
482 unsigned short* wbufend
= wbuf
+ wordcount
;
486 SET_16BITREG(ATA_DATA
, swap16(*wbuf
));
488 SET_16BITREG(ATA_DATA
, *wbuf
);
490 } while (++wbuf
< wbufend
); /* tail loop is faster */
493 #endif /* !ATA_OPTIMIZED_WRITING */
495 #ifdef MAX_PHYS_SECTOR_SIZE
496 static int _write_sectors(unsigned long start
,
500 int ata_write_sectors(IF_MV2(int drive
,)
510 #ifndef MAX_PHYS_SECTOR_SIZE
511 #ifdef HAVE_MULTIVOLUME
512 (void)drive
; /* unused for now */
514 mutex_lock(&ata_mtx
);
517 if (start
+ count
> total_sectors
)
518 panicf("Writing past end of disk");
520 last_disk_activity
= current_tick
;
521 spinup_start
= current_tick
;
528 if (ata_power_on()) {
534 if (perform_soft_reset()) {
541 SET_REG(ATA_SELECT
, ata_device
);
551 SET_REG(ATA_NSECTOR
, count
>> 8);
552 SET_REG(ATA_NSECTOR
, count
& 0xff);
553 SET_REG(ATA_SECTOR
, (start
>> 24) & 0xff); /* 31:24 */
554 SET_REG(ATA_SECTOR
, start
& 0xff); /* 7:0 */
555 SET_REG(ATA_LCYL
, 0); /* 39:32 */
556 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff); /* 15:8 */
557 SET_REG(ATA_HCYL
, 0); /* 47:40 */
558 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff); /* 23:16 */
559 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
560 SET_REG(ATA_COMMAND
, CMD_WRITE_SECTORS_EXT
);
565 SET_REG(ATA_NSECTOR
, count
& 0xff); /* 0 means 256 sectors */
566 SET_REG(ATA_SECTOR
, start
& 0xff);
567 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff);
568 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff);
569 SET_REG(ATA_SELECT
, ((start
>> 24) & 0xf) | SELECT_LBA
| ata_device
);
570 SET_REG(ATA_COMMAND
, CMD_WRITE_SECTORS
);
573 for (i
=0; i
<count
; i
++) {
575 if (!wait_for_start_of_transfer()) {
581 ata_spinup_time
= current_tick
- spinup_start
;
587 copy_write_sectors(buf
, SECTOR_SIZE
/2);
590 /* reading the status register clears the interrupt */
595 last_disk_activity
= current_tick
;
598 if(!ret
&& !wait_for_end_of_transfer()) {
599 DEBUGF("End on transfer failed. -- jyp");
605 #ifndef MAX_PHYS_SECTOR_SIZE
606 mutex_unlock(&ata_mtx
);
612 #ifdef MAX_PHYS_SECTOR_SIZE
613 static int cache_sector(unsigned long sector
)
617 sector
&= ~(phys_sector_mult
- 1);
618 /* round down to physical sector boundary */
620 /* check whether the sector is already cached */
621 if (sector_cache
.inuse
&& (sector_cache
.sectornum
== sector
))
624 /* not found: read the sector */
625 sector_cache
.inuse
= false;
626 rc
= _read_sectors(sector
, phys_sector_mult
, sector_cache
.data
);
629 sector_cache
.sectornum
= sector
;
630 sector_cache
.inuse
= true;
635 static inline int flush_current_sector(void)
637 return _write_sectors(sector_cache
.sectornum
, phys_sector_mult
,
641 int ata_read_sectors(IF_MV2(int drive
,)
649 #ifdef HAVE_MULTIVOLUME
650 (void)drive
; /* unused for now */
652 mutex_lock(&ata_mtx
);
654 offset
= start
& (phys_sector_mult
- 1);
656 if (offset
) /* first partial sector */
658 int partcount
= MIN(incount
, phys_sector_mult
- offset
);
660 rc
= cache_sector(start
);
666 memcpy(inbuf
, sector_cache
.data
+ offset
* SECTOR_SIZE
,
667 partcount
* SECTOR_SIZE
);
670 inbuf
+= partcount
* SECTOR_SIZE
;
671 incount
-= partcount
;
675 offset
= incount
& (phys_sector_mult
- 1);
680 rc
= _read_sectors(start
, incount
, inbuf
);
687 inbuf
+= incount
* SECTOR_SIZE
;
691 rc
= cache_sector(start
);
697 memcpy(inbuf
, sector_cache
.data
, offset
* SECTOR_SIZE
);
702 mutex_unlock(&ata_mtx
);
707 int ata_write_sectors(IF_MV2(int drive
,)
715 #ifdef HAVE_MULTIVOLUME
716 (void)drive
; /* unused for now */
718 mutex_lock(&ata_mtx
);
720 offset
= start
& (phys_sector_mult
- 1);
722 if (offset
) /* first partial sector */
724 int partcount
= MIN(count
, phys_sector_mult
- offset
);
726 rc
= cache_sector(start
);
732 memcpy(sector_cache
.data
+ offset
* SECTOR_SIZE
, buf
,
733 partcount
* SECTOR_SIZE
);
734 rc
= flush_current_sector();
741 buf
+= partcount
* SECTOR_SIZE
;
746 offset
= count
& (phys_sector_mult
- 1);
751 rc
= _write_sectors(start
, count
, buf
);
758 buf
+= count
* SECTOR_SIZE
;
762 rc
= cache_sector(start
);
768 memcpy(sector_cache
.data
, buf
, offset
* SECTOR_SIZE
);
769 rc
= flush_current_sector();
779 mutex_unlock(&ata_mtx
);
783 #endif /* MAX_PHYS_SECTOR_SIZE */
785 static int check_registers(void)
788 if ( ATA_STATUS
& STATUS_BSY
)
791 for (i
= 0; i
<64; i
++) {
792 SET_REG(ATA_NSECTOR
, WRITE_PATTERN1
);
793 SET_REG(ATA_SECTOR
, WRITE_PATTERN2
);
794 SET_REG(ATA_LCYL
, WRITE_PATTERN3
);
795 SET_REG(ATA_HCYL
, WRITE_PATTERN4
);
797 if (((ATA_NSECTOR
& READ_PATTERN1_MASK
) == READ_PATTERN1
) &&
798 ((ATA_SECTOR
& READ_PATTERN2_MASK
) == READ_PATTERN2
) &&
799 ((ATA_LCYL
& READ_PATTERN3_MASK
) == READ_PATTERN3
) &&
800 ((ATA_HCYL
& READ_PATTERN4_MASK
) == READ_PATTERN4
))
806 static int freeze_lock(void)
808 /* does the disk support Security Mode feature set? */
809 if (identify_info
[82] & 2)
811 SET_REG(ATA_SELECT
, ata_device
);
816 SET_REG(ATA_COMMAND
, CMD_SECURITY_FREEZE_LOCK
);
825 void ata_spindown(int seconds
)
827 sleep_timeout
= seconds
* HZ
;
830 bool ata_disk_is_active(void)
835 static int ata_perform_sleep(void)
837 mutex_lock(&ata_mtx
);
839 SET_REG(ATA_SELECT
, ata_device
);
841 if(!wait_for_rdy()) {
842 DEBUGF("ata_perform_sleep() - not RDY\n");
843 mutex_unlock(&ata_mtx
);
847 SET_REG(ATA_COMMAND
, CMD_SLEEP
);
851 DEBUGF("ata_perform_sleep() - CMD failed\n");
852 mutex_unlock(&ata_mtx
);
857 mutex_unlock(&ata_mtx
);
863 queue_post(&ata_queue
, Q_SLEEP
, 0);
866 void ata_sleepnow(void)
868 if (!spinup
&& !sleeping
&& !ata_mtx
.locked
&& initialized
)
870 call_ata_idle_notifys(false);
877 last_user_activity
= current_tick
;
880 static void ata_thread(void)
882 static long last_sleep
= 0;
883 struct queue_event ev
;
884 static long last_seen_mtx_unlock
= 0;
887 queue_wait_w_tmo(&ata_queue
, &ev
, HZ
/2);
891 if (!spinup
&& !sleeping
)
895 if (!last_seen_mtx_unlock
)
896 last_seen_mtx_unlock
= current_tick
;
897 if (TIME_AFTER(current_tick
, last_seen_mtx_unlock
+(HZ
*2)))
899 call_ata_idle_notifys(false);
900 last_seen_mtx_unlock
= 0;
903 if ( sleep_timeout
&&
904 TIME_AFTER( current_tick
,
905 last_user_activity
+ sleep_timeout
) &&
906 TIME_AFTER( current_tick
,
907 last_disk_activity
+ sleep_timeout
) )
909 call_ata_idle_notifys(true);
911 last_sleep
= current_tick
;
915 #ifdef HAVE_ATA_POWER_OFF
916 if ( !spinup
&& sleeping
&& !poweroff
&&
917 TIME_AFTER( current_tick
, last_sleep
+ ATA_POWER_OFF_TIMEOUT
))
919 mutex_lock(&ata_mtx
);
920 ide_power_enable(false);
921 mutex_unlock(&ata_mtx
);
928 case SYS_USB_CONNECTED
:
930 mutex_lock(&ata_mtx
);
934 mutex_unlock(&ata_mtx
);
937 /* Tell the USB thread that we are safe */
938 DEBUGF("ata_thread got SYS_USB_CONNECTED\n");
939 usb_acknowledge(SYS_USB_CONNECTED_ACK
);
941 /* Wait until the USB cable is extracted again */
942 usb_wait_for_disconnect(&ata_queue
);
946 call_ata_idle_notifys(false);
947 last_disk_activity
= current_tick
- sleep_timeout
+ (HZ
/2);
950 #ifdef ATA_DRIVER_CLOSE
958 /* Hardware reset protocol as specified in chapter 9.1, ATA spec draft v5 */
959 int ata_hard_reset(void)
963 mutex_lock(&ata_mtx
);
968 SET_REG(ATA_SELECT
, ata_device
); /* select the right device */
969 ret
= wait_for_bsy();
971 /* Massage the return code so it is 0 on success and -1 on failure */
974 mutex_unlock(&ata_mtx
);
979 static int perform_soft_reset(void)
981 /* If this code is allowed to run on a Nano, the next reads from the flash will
982 * time out, so we disable it. It shouldn't be necessary anyway, since the
983 * ATA -> Flash interface automatically sleeps almost immediately after the
989 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
990 SET_REG(ATA_CONTROL
, CONTROL_nIEN
|CONTROL_SRST
);
991 sleep(1); /* >= 5us */
993 SET_REG(ATA_CONTROL
, CONTROL_nIEN
);
996 /* This little sucker can take up to 30 seconds */
1000 ret
= wait_for_rdy();
1001 } while(!ret
&& retry_count
--);
1009 if (set_multiple_mode(multisectors
))
1018 int ata_soft_reset(void)
1022 mutex_lock(&ata_mtx
);
1024 ret
= perform_soft_reset();
1026 mutex_unlock(&ata_mtx
);
1030 static int ata_power_on(void)
1034 ide_power_enable(true);
1035 sleep(HZ
/4); /* allow voltage to build up */
1037 /* Accessing the PP IDE controller too early after powering up the disk
1038 * makes the core hang for a short time, causing an audio dropout. This
1039 * also depends on the disk; iPod Mini G2 needs at least HZ/5 to get rid
1040 * of the dropout. Since this time isn't additive (the wait_for_bsy() in
1041 * ata_hard_reset() will shortened by the same amount), it's a good idea
1042 * to do this on all HDD based targets. */
1044 if( ata_hard_reset() )
1047 rc
= set_features();
1051 if (set_multiple_mode(multisectors
))
1060 static int master_slave_detect(void)
1063 SET_REG(ATA_SELECT
, 0);
1064 if ( ATA_STATUS
& (STATUS_RDY
|STATUS_BSY
) ) {
1066 DEBUGF("Found master harddisk\n");
1070 SET_REG(ATA_SELECT
, SELECT_DEVICE1
);
1071 if ( ATA_STATUS
& (STATUS_RDY
|STATUS_BSY
) ) {
1072 ata_device
= SELECT_DEVICE1
;
1073 DEBUGF("Found slave harddisk\n");
1081 static int identify(void)
1085 SET_REG(ATA_SELECT
, ata_device
);
1087 if(!wait_for_rdy()) {
1088 DEBUGF("identify() - not RDY\n");
1091 SET_REG(ATA_COMMAND
, CMD_IDENTIFY
);
1093 if (!wait_for_start_of_transfer())
1095 DEBUGF("identify() - CMD failed\n");
1099 for (i
=0; i
<SECTOR_SIZE
/2; i
++) {
1100 /* the IDENTIFY words are already swapped, so we need to treat
1101 this info differently that normal sector data */
1102 #if defined(ROCKBOX_BIG_ENDIAN) && !defined(SWAP_WORDS)
1103 identify_info
[i
] = swap16(ATA_DATA
);
1105 identify_info
[i
] = ATA_DATA
;
1112 static int set_multiple_mode(int sectors
)
1114 SET_REG(ATA_SELECT
, ata_device
);
1116 if(!wait_for_rdy()) {
1117 DEBUGF("set_multiple_mode() - not RDY\n");
1121 SET_REG(ATA_NSECTOR
, sectors
);
1122 SET_REG(ATA_COMMAND
, CMD_SET_MULTIPLE_MODE
);
1124 if (!wait_for_rdy())
1126 DEBUGF("set_multiple_mode() - CMD failed\n");
1133 static int set_features(void)
1136 unsigned char id_word
;
1137 unsigned char id_bit
;
1138 unsigned char subcommand
;
1139 unsigned char parameter
;
1141 { 83, 14, 0x03, 0 }, /* force PIO mode */
1142 { 83, 3, 0x05, 0x80 }, /* adv. power management: lowest w/o standby */
1143 { 83, 9, 0x42, 0x80 }, /* acoustic management: lowest noise */
1144 { 82, 6, 0xaa, 0 }, /* enable read look-ahead */
1149 /* Find out the highest supported PIO mode */
1150 if(identify_info
[64] & 2)
1153 if(identify_info
[64] & 1)
1156 /* Update the table: set highest supported pio mode that we also support */
1157 features
[0].parameter
= 8 + pio_mode
;
1159 SET_REG(ATA_SELECT
, ata_device
);
1161 if (!wait_for_rdy()) {
1162 DEBUGF("set_features() - not RDY\n");
1166 for (i
=0; i
< (int)(sizeof(features
)/sizeof(features
[0])); i
++) {
1167 if (identify_info
[features
[i
].id_word
] & (1 << features
[i
].id_bit
)) {
1168 SET_REG(ATA_FEATURE
, features
[i
].subcommand
);
1169 SET_REG(ATA_NSECTOR
, features
[i
].parameter
);
1170 SET_REG(ATA_COMMAND
, CMD_SET_FEATURES
);
1172 if (!wait_for_rdy()) {
1173 DEBUGF("set_features() - CMD failed\n");
1177 if((ATA_ALT_STATUS
& STATUS_ERR
) && (i
!= 1)) {
1178 /* some CF cards don't like advanced powermanagement
1179 even if they mark it as supported - go figure... */
1180 if(ATA_ERROR
& ERROR_ABRT
) {
1187 #ifdef ATA_SET_DEVICE_FEATURES
1188 ata_set_pio_timings(pio_mode
);
1194 unsigned short* ata_get_identify(void)
1196 return identify_info
;
1199 static int init_and_check(bool hard_reset
)
1205 /* This should reset both master and slave, we don't yet know what's in */
1207 if (ata_hard_reset())
1211 rc
= master_slave_detect();
1215 /* symptom fix: else check_registers() below may fail */
1216 if (hard_reset
&& !wait_for_bsy())
1219 rc
= check_registers();
1231 if ( !initialized
) {
1232 mutex_init(&ata_mtx
);
1233 queue_init(&ata_queue
, true);
1236 mutex_lock(&ata_mtx
);
1238 /* must be called before ata_device_init() */
1239 coldstart
= ata_is_coldstart();
1244 #ifdef MAX_PHYS_SECTOR_SIZE
1245 memset(§or_cache
, 0, sizeof(sector_cache
));
1248 if ( !initialized
) {
1249 /* First call won't have multiple thread contention - this
1250 * may return at any point without having to unlock */
1251 mutex_unlock(&ata_mtx
);
1253 if (!ide_powered()) /* somebody has switched it off */
1255 ide_power_enable(true);
1256 sleep(HZ
/4); /* allow voltage to build up */
1259 /* first try, hard reset at cold start only */
1260 rc
= init_and_check(coldstart
);
1263 { /* failed? -> second try, always with hard reset */
1264 DEBUGF("ata: init failed, retrying...\n");
1265 rc
= init_and_check(true);
1275 multisectors
= identify_info
[47] & 0xff;
1276 if (multisectors
== 0) /* Invalid multisector info, try with 16 */
1279 DEBUGF("ata: %d sectors per ata request\n",multisectors
);
1281 #ifdef MAX_PHYS_SECTOR_SIZE
1282 /* Find out the physical sector size */
1283 if((identify_info
[106] & 0xe000) == 0x6000)
1284 phys_sector_mult
= 1 << (identify_info
[106] & 0x000f);
1286 phys_sector_mult
= 1;
1288 DEBUGF("ata: %d logical sectors per phys sector", phys_sector_mult
);
1290 if (phys_sector_mult
> (MAX_PHYS_SECTOR_SIZE
/SECTOR_SIZE
))
1291 panicf("Unsupported physical sector size: %d",
1292 phys_sector_mult
* SECTOR_SIZE
);
1295 total_sectors
= identify_info
[60] | (identify_info
[61] << 16);
1298 if (identify_info
[83] & 0x0400 /* 48 bit address support */
1299 && total_sectors
== 0x0FFFFFFF) /* and disk size >= 128 GiB */
1300 { /* (needs BigLBA addressing) */
1301 if (identify_info
[102] || identify_info
[103])
1302 panicf("Unsupported disk size: >= 2^32 sectors");
1304 total_sectors
= identify_info
[100] | (identify_info
[101] << 16);
1305 lba48
= true; /* use BigLBA */
1313 rc
= set_features();
1317 mutex_lock(&ata_mtx
); /* Balance unlock below */
1319 last_disk_activity
= current_tick
;
1320 #ifdef ATA_DRIVER_CLOSE
1323 create_thread(ata_thread
, ata_stack
,
1324 sizeof(ata_stack
), 0, ata_thread_name
1325 IF_PRIO(, PRIORITY_USER_INTERFACE
)
1330 rc
= set_multiple_mode(multisectors
);
1334 mutex_unlock(&ata_mtx
);
1338 #ifdef ATA_DRIVER_CLOSE
1339 void ata_close(void)
1341 struct thread_entry
*thread
= ata_thread_p
;
1346 ata_thread_p
= NULL
;
1348 queue_post(&ata_queue
, Q_CLOSE
, 0);
1349 thread_wait(thread
);
1351 #endif /* ATA_DRIVER_CLOSE */
1353 #if (CONFIG_LED == LED_REAL)
1354 void ata_set_led_enabled(bool enabled
)
1356 ata_led_enabled
= enabled
;
1357 if (ata_led_enabled
)