2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/blkdev.h>
25 #include <linux/wait.h>
26 #include <linux/mutex.h>
27 #include <linux/kthread.h>
28 #include <linux/log2.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
32 /**** Helper functions used for Div, Remainder operation on u64 ****/
34 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
35 * Function: GLOB_Calc_Used_Bits
36 * Inputs: Power of 2 number
37 * Outputs: Number of Used Bits
38 * 0, if the argument is 0
39 * Description: Calculate the number of bits used by a given power of 2 number
40 * Number can be upto 32 bit
41 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
42 int GLOB_Calc_Used_Bits(u32 n
)
69 return ((n
== 0) ? (0) : tot_bits
);
72 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
73 * Function: GLOB_u64_Div
74 * Inputs: Number of u64
75 * A power of 2 number as Division
76 * Outputs: Quotient of the Divisor operation
77 * Description: It divides the address by divisor by using bit shift operation
78 * (essentially without explicitely using "/").
79 * Divisor is a power of 2 number and Divided is of u64
80 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
81 u64
GLOB_u64_Div(u64 addr
, u32 divisor
)
83 return (u64
)(addr
>> GLOB_Calc_Used_Bits(divisor
));
86 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
87 * Function: GLOB_u64_Remainder
88 * Inputs: Number of u64
89 * Divisor Type (1 -PageAddress, 2- BlockAddress)
90 * Outputs: Remainder of the Division operation
91 * Description: It calculates the remainder of a number (of u64) by
92 * divisor(power of 2 number ) by using bit shifting and multiply
93 * operation(essentially without explicitely using "/").
94 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
95 u64
GLOB_u64_Remainder(u64 addr
, u32 divisor_type
)
99 if (divisor_type
== 1) { /* Remainder -- Page */
100 result
= (addr
>> DeviceInfo
.nBitsInPageDataSize
);
101 result
= result
* DeviceInfo
.wPageDataSize
;
102 } else if (divisor_type
== 2) { /* Remainder -- Block */
103 result
= (addr
>> DeviceInfo
.nBitsInBlockDataSize
);
104 result
= result
* DeviceInfo
.wBlockDataSize
;
107 result
= addr
- result
;
112 #define NUM_DEVICES 1
115 #define GLOB_SBD_NAME "nd"
116 #define GLOB_SBD_IRQ_NUM (29)
118 #define GLOB_SBD_IOCTL_GC (0x7701)
119 #define GLOB_SBD_IOCTL_WL (0x7702)
120 #define GLOB_SBD_IOCTL_FORMAT (0x7703)
121 #define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
122 #define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
123 #define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
124 #define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
125 #define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
126 #define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
127 #define GLOB_SBD_IOCTL_READ_DATA (0x770A)
129 static int reserved_mb
= 0;
130 module_param(reserved_mb
, int, 0);
131 MODULE_PARM_DESC(reserved_mb
, "Reserved space for OS image, in MiB (default 25 MiB)");
133 int nand_debug_level
;
134 module_param(nand_debug_level
, int, 0644);
135 MODULE_PARM_DESC(nand_debug_level
, "debug level value: 1-3");
137 MODULE_LICENSE("GPL");
139 struct spectra_nand_dev
{
144 void __iomem
*ioaddr
; /* Mapped address */
145 struct request_queue
*queue
;
146 struct task_struct
*thread
;
152 static int GLOB_SBD_majornum
;
154 static char *GLOB_version
= GLOB_VERSION
;
156 static struct spectra_nand_dev nand_device
[NUM_DEVICES
];
158 static struct mutex spectra_lock
;
160 static int res_blks_os
= 1;
162 struct spectra_indentfy_dev_tag IdentifyDeviceData
;
164 static int force_flush_cache(void)
166 nand_dbg_print(NAND_DBG_DEBUG
, "%s, Line %d, Function: %s\n",
167 __FILE__
, __LINE__
, __func__
);
169 if (ERR
== GLOB_FTL_Flush_Cache()) {
170 printk(KERN_ERR
"Fail to Flush FTL Cache!\n");
174 if (glob_ftl_execute_cmds())
182 struct ioctl_rw_page_info
{
187 static int ioctl_read_page_data(unsigned long arg
)
190 struct ioctl_rw_page_info info
;
193 if (copy_from_user(&info
, (void __user
*)arg
, sizeof(info
)))
196 buf
= kmalloc(IdentifyDeviceData
.PageDataSize
, GFP_ATOMIC
);
198 printk(KERN_ERR
"ioctl_read_page_data: "
199 "failed to allocate memory\n");
203 mutex_lock(&spectra_lock
);
204 result
= GLOB_FTL_Page_Read(buf
,
205 (u64
)info
.page
* IdentifyDeviceData
.PageDataSize
);
206 mutex_unlock(&spectra_lock
);
208 if (copy_to_user((void __user
*)info
.data
, buf
,
209 IdentifyDeviceData
.PageDataSize
)) {
210 printk(KERN_ERR
"ioctl_read_page_data: "
211 "failed to copy user data\n");
220 static int ioctl_write_page_data(unsigned long arg
)
223 struct ioctl_rw_page_info info
;
226 if (copy_from_user(&info
, (void __user
*)arg
, sizeof(info
)))
229 buf
= kmalloc(IdentifyDeviceData
.PageDataSize
, GFP_ATOMIC
);
231 printk(KERN_ERR
"ioctl_write_page_data: "
232 "failed to allocate memory\n");
236 if (copy_from_user(buf
, (void __user
*)info
.data
,
237 IdentifyDeviceData
.PageDataSize
)) {
238 printk(KERN_ERR
"ioctl_write_page_data: "
239 "failed to copy user data\n");
244 mutex_lock(&spectra_lock
);
245 result
= GLOB_FTL_Page_Write(buf
,
246 (u64
)info
.page
* IdentifyDeviceData
.PageDataSize
);
247 mutex_unlock(&spectra_lock
);
253 /* Return how many blocks should be reserved for bad block replacement */
254 static int get_res_blk_num_bad_blk(void)
256 return IdentifyDeviceData
.wDataBlockNum
/ 10;
259 /* Return how many blocks should be reserved for OS image */
260 static int get_res_blk_num_os(void)
262 u32 res_blks
, blk_size
;
264 blk_size
= IdentifyDeviceData
.PageDataSize
*
265 IdentifyDeviceData
.PagesPerBlock
;
267 res_blks
= (reserved_mb
* 1024 * 1024) / blk_size
;
269 if ((res_blks
< 1) || (res_blks
>= IdentifyDeviceData
.wDataBlockNum
))
270 res_blks
= 1; /* Reserved 1 block for block table */
275 /* Transfer a full request. */
276 static int do_transfer(struct spectra_nand_dev
*tr
, struct request
*req
)
278 u64 start_addr
, addr
;
279 u32 logical_start_sect
, hd_start_sect
;
281 u32 rsect
, tsect
= 0;
283 u32 ratio
= IdentifyDeviceData
.PageDataSize
>> 9;
285 start_addr
= (u64
)(blk_rq_pos(req
)) << 9;
286 /* Add a big enough offset to prevent the OS Image from
287 * being accessed or damaged by file system */
288 start_addr
+= IdentifyDeviceData
.PageDataSize
*
289 IdentifyDeviceData
.PagesPerBlock
*
292 if (req
->cmd_type
& REQ_FLUSH
) {
293 if (force_flush_cache()) /* Fail to flush cache */
299 if (req
->cmd_type
!= REQ_TYPE_FS
)
302 if (blk_rq_pos(req
) + blk_rq_cur_sectors(req
) > get_capacity(tr
->gd
)) {
303 printk(KERN_ERR
"Spectra error: request over the NAND "
304 "capacity!sector %d, current_nr_sectors %d, "
305 "while capacity is %d\n",
306 (int)blk_rq_pos(req
),
307 blk_rq_cur_sectors(req
),
308 (int)get_capacity(tr
->gd
));
312 logical_start_sect
= start_addr
>> 9;
313 hd_start_sect
= logical_start_sect
/ ratio
;
314 rsect
= logical_start_sect
- hd_start_sect
* ratio
;
316 addr
= (u64
)hd_start_sect
* ratio
* 512;
318 nsect
= blk_rq_cur_sectors(req
);
321 tsect
= (ratio
- rsect
) < nsect
? (ratio
- rsect
) : nsect
;
323 switch (rq_data_dir(req
)) {
325 /* Read the first NAND page */
327 if (GLOB_FTL_Page_Read(tr
->tmp_buf
, addr
)) {
328 printk(KERN_ERR
"Error in %s, Line %d\n",
332 memcpy(buf
, tr
->tmp_buf
+ (rsect
<< 9), tsect
<< 9);
333 addr
+= IdentifyDeviceData
.PageDataSize
;
338 /* Read the other NAND pages */
339 for (hd_sects
= nsect
/ ratio
; hd_sects
> 0; hd_sects
--) {
340 if (GLOB_FTL_Page_Read(buf
, addr
)) {
341 printk(KERN_ERR
"Error in %s, Line %d\n",
345 addr
+= IdentifyDeviceData
.PageDataSize
;
346 buf
+= IdentifyDeviceData
.PageDataSize
;
349 /* Read the last NAND pages */
351 if (GLOB_FTL_Page_Read(tr
->tmp_buf
, addr
)) {
352 printk(KERN_ERR
"Error in %s, Line %d\n",
356 memcpy(buf
, tr
->tmp_buf
, (nsect
% ratio
) << 9);
359 if (glob_ftl_execute_cmds())
367 /* Write the first NAND page */
369 if (GLOB_FTL_Page_Read(tr
->tmp_buf
, addr
)) {
370 printk(KERN_ERR
"Error in %s, Line %d\n",
374 memcpy(tr
->tmp_buf
+ (rsect
<< 9), buf
, tsect
<< 9);
375 if (GLOB_FTL_Page_Write(tr
->tmp_buf
, addr
)) {
376 printk(KERN_ERR
"Error in %s, Line %d\n",
380 addr
+= IdentifyDeviceData
.PageDataSize
;
385 /* Write the other NAND pages */
386 for (hd_sects
= nsect
/ ratio
; hd_sects
> 0; hd_sects
--) {
387 if (GLOB_FTL_Page_Write(buf
, addr
)) {
388 printk(KERN_ERR
"Error in %s, Line %d\n",
392 addr
+= IdentifyDeviceData
.PageDataSize
;
393 buf
+= IdentifyDeviceData
.PageDataSize
;
396 /* Write the last NAND pages */
398 if (GLOB_FTL_Page_Read(tr
->tmp_buf
, addr
)) {
399 printk(KERN_ERR
"Error in %s, Line %d\n",
403 memcpy(tr
->tmp_buf
, buf
, (nsect
% ratio
) << 9);
404 if (GLOB_FTL_Page_Write(tr
->tmp_buf
, addr
)) {
405 printk(KERN_ERR
"Error in %s, Line %d\n",
411 if (glob_ftl_execute_cmds())
419 printk(KERN_NOTICE
"Unknown request %u\n", rq_data_dir(req
));
424 /* This function is copied from drivers/mtd/mtd_blkdevs.c */
425 static int spectra_trans_thread(void *arg
)
427 struct spectra_nand_dev
*tr
= arg
;
428 struct request_queue
*rq
= tr
->queue
;
429 struct request
*req
= NULL
;
431 /* we might get involved when memory gets low, so use PF_MEMALLOC */
432 current
->flags
|= PF_MEMALLOC
;
434 spin_lock_irq(rq
->queue_lock
);
435 while (!kthread_should_stop()) {
439 req
= blk_fetch_request(rq
);
441 set_current_state(TASK_INTERRUPTIBLE
);
442 spin_unlock_irq(rq
->queue_lock
);
444 spin_lock_irq(rq
->queue_lock
);
449 spin_unlock_irq(rq
->queue_lock
);
451 mutex_lock(&spectra_lock
);
452 res
= do_transfer(tr
, req
);
453 mutex_unlock(&spectra_lock
);
455 spin_lock_irq(rq
->queue_lock
);
457 if (!__blk_end_request_cur(req
, res
))
462 __blk_end_request_all(req
, -EIO
);
464 spin_unlock_irq(rq
->queue_lock
);
470 /* Request function that "handles clustering". */
471 static void GLOB_SBD_request(struct request_queue
*rq
)
473 struct spectra_nand_dev
*pdev
= rq
->queuedata
;
474 wake_up_process(pdev
->thread
);
477 static int GLOB_SBD_open(struct block_device
*bdev
, fmode_t mode
)
480 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
481 __FILE__
, __LINE__
, __func__
);
485 static int GLOB_SBD_release(struct gendisk
*disk
, fmode_t mode
)
489 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
490 __FILE__
, __LINE__
, __func__
);
492 mutex_lock(&spectra_lock
);
493 ret
= force_flush_cache();
494 mutex_unlock(&spectra_lock
);
499 static int GLOB_SBD_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
503 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
505 nand_dbg_print(NAND_DBG_DEBUG
,
506 "heads: %d, sectors: %d, cylinders: %d\n",
507 geo
->heads
, geo
->sectors
, geo
->cylinders
);
512 int GLOB_SBD_ioctl(struct block_device
*bdev
, fmode_t mode
,
513 unsigned int cmd
, unsigned long arg
)
517 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
518 __FILE__
, __LINE__
, __func__
);
521 case GLOB_SBD_IOCTL_GC
:
522 nand_dbg_print(NAND_DBG_DEBUG
,
523 "Spectra IOCTL: Garbage Collection "
524 "being performed\n");
525 if (PASS
!= GLOB_FTL_Garbage_Collection())
529 case GLOB_SBD_IOCTL_WL
:
530 nand_dbg_print(NAND_DBG_DEBUG
,
531 "Spectra IOCTL: Static Wear Leveling "
532 "being performed\n");
533 if (PASS
!= GLOB_FTL_Wear_Leveling())
537 case GLOB_SBD_IOCTL_FORMAT
:
538 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: Flash format "
539 "being performed\n");
540 if (PASS
!= GLOB_FTL_Flash_Format())
544 case GLOB_SBD_IOCTL_FLUSH_CACHE
:
545 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: Cache flush "
546 "being performed\n");
547 mutex_lock(&spectra_lock
);
548 ret
= force_flush_cache();
549 mutex_unlock(&spectra_lock
);
552 case GLOB_SBD_IOCTL_COPY_BLK_TABLE
:
553 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
554 "Copy block table\n");
555 if (copy_to_user((void __user
*)arg
,
556 get_blk_table_start_addr(),
557 get_blk_table_len()))
561 case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE
:
562 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
563 "Copy wear leveling table\n");
564 if (copy_to_user((void __user
*)arg
,
565 get_wear_leveling_table_start_addr(),
566 get_wear_leveling_table_len()))
570 case GLOB_SBD_IOCTL_GET_NAND_INFO
:
571 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
573 if (copy_to_user((void __user
*)arg
, &IdentifyDeviceData
,
574 sizeof(IdentifyDeviceData
)))
578 case GLOB_SBD_IOCTL_WRITE_DATA
:
579 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
580 "Write one page data\n");
581 return ioctl_write_page_data(arg
);
583 case GLOB_SBD_IOCTL_READ_DATA
:
584 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
585 "Read one page data\n");
586 return ioctl_read_page_data(arg
);
592 static DEFINE_MUTEX(ffsport_mutex
);
594 int GLOB_SBD_unlocked_ioctl(struct block_device
*bdev
, fmode_t mode
,
595 unsigned int cmd
, unsigned long arg
)
599 mutex_lock(&ffsport_mutex
);
600 ret
= GLOB_SBD_ioctl(bdev
, mode
, cmd
, arg
);
601 mutex_unlock(&ffsport_mutex
);
606 static struct block_device_operations GLOB_SBD_ops
= {
607 .owner
= THIS_MODULE
,
608 .open
= GLOB_SBD_open
,
609 .release
= GLOB_SBD_release
,
610 .ioctl
= GLOB_SBD_unlocked_ioctl
,
611 .getgeo
= GLOB_SBD_getgeo
,
614 static int SBD_setup_device(struct spectra_nand_dev
*dev
, int which
)
619 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
620 __FILE__
, __LINE__
, __func__
);
622 memset(dev
, 0, sizeof(struct spectra_nand_dev
));
624 nand_dbg_print(NAND_DBG_WARN
, "Reserved %d blocks "
625 "for OS image, %d blocks for bad block replacement.\n",
626 get_res_blk_num_os(),
627 get_res_blk_num_bad_blk());
629 res_blks
= get_res_blk_num_bad_blk() + get_res_blk_num_os();
631 dev
->size
= (u64
)IdentifyDeviceData
.PageDataSize
*
632 IdentifyDeviceData
.PagesPerBlock
*
633 (IdentifyDeviceData
.wDataBlockNum
- res_blks
);
635 res_blks_os
= get_res_blk_num_os();
637 spin_lock_init(&dev
->qlock
);
639 dev
->tmp_buf
= kmalloc(IdentifyDeviceData
.PageDataSize
, GFP_ATOMIC
);
641 printk(KERN_ERR
"Failed to kmalloc memory in %s Line %d, exit.\n",
646 dev
->queue
= blk_init_queue(GLOB_SBD_request
, &dev
->qlock
);
647 if (dev
->queue
== NULL
) {
649 "Spectra: Request queue could not be initialized."
653 dev
->queue
->queuedata
= dev
;
655 /* As Linux block layer doens't support >4KB hardware sector, */
656 /* Here we force report 512 byte hardware sector size to Kernel */
657 blk_queue_logical_block_size(dev
->queue
, 512);
659 blk_queue_ordered(dev
->queue
, QUEUE_ORDERED_DRAIN_FLUSH
);
661 dev
->thread
= kthread_run(spectra_trans_thread
, dev
, "nand_thd");
662 if (IS_ERR(dev
->thread
)) {
663 blk_cleanup_queue(dev
->queue
);
664 unregister_blkdev(GLOB_SBD_majornum
, GLOB_SBD_NAME
);
665 return PTR_ERR(dev
->thread
);
668 dev
->gd
= alloc_disk(PARTITIONS
);
671 "Spectra: Could not allocate disk. Aborting \n ");
674 dev
->gd
->major
= GLOB_SBD_majornum
;
675 dev
->gd
->first_minor
= which
* PARTITIONS
;
676 dev
->gd
->fops
= &GLOB_SBD_ops
;
677 dev
->gd
->queue
= dev
->queue
;
678 dev
->gd
->private_data
= dev
;
679 snprintf(dev
->gd
->disk_name
, 32, "%s%c", GLOB_SBD_NAME
, which
+ 'a');
681 sects
= dev
->size
>> 9;
682 nand_dbg_print(NAND_DBG_WARN
, "Capacity sects: %d\n", sects
);
683 set_capacity(dev
->gd
, sects
);
693 static ssize_t show_nand_block_num(struct device *dev,
694 struct device_attribute *attr, char *buf)
696 return snprintf(buf, PAGE_SIZE, "%d\n",
697 (int)IdentifyDeviceData.wDataBlockNum);
700 static ssize_t show_nand_pages_per_block(struct device *dev,
701 struct device_attribute *attr, char *buf)
703 return snprintf(buf, PAGE_SIZE, "%d\n",
704 (int)IdentifyDeviceData.PagesPerBlock);
707 static ssize_t show_nand_page_size(struct device *dev,
708 struct device_attribute *attr, char *buf)
710 return snprintf(buf, PAGE_SIZE, "%d\n",
711 (int)IdentifyDeviceData.PageDataSize);
714 static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
715 static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
716 static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
718 static void create_sysfs_entry(struct device *dev)
720 if (device_create_file(dev, &dev_attr_nand_block_num))
721 printk(KERN_ERR "Spectra: "
722 "failed to create sysfs entry nand_block_num.\n");
723 if (device_create_file(dev, &dev_attr_nand_pages_per_block))
724 printk(KERN_ERR "Spectra: "
725 "failed to create sysfs entry nand_pages_per_block.\n");
726 if (device_create_file(dev, &dev_attr_nand_page_size))
727 printk(KERN_ERR "Spectra: "
728 "failed to create sysfs entry nand_page_size.\n");
732 static int GLOB_SBD_init(void)
736 /* Set debug output level (0~3) here. 3 is most verbose */
737 printk(KERN_ALERT
"Spectra: %s\n", GLOB_version
);
739 mutex_init(&spectra_lock
);
741 GLOB_SBD_majornum
= register_blkdev(0, GLOB_SBD_NAME
);
742 if (GLOB_SBD_majornum
<= 0) {
743 printk(KERN_ERR
"Unable to get the major %d for Spectra",
748 if (PASS
!= GLOB_FTL_Flash_Init()) {
749 printk(KERN_ERR
"Spectra: Unable to Initialize Flash Device. "
751 goto out_flash_register
;
754 /* create_sysfs_entry(&dev->dev); */
756 if (PASS
!= GLOB_FTL_IdentifyDevice(&IdentifyDeviceData
)) {
757 printk(KERN_ERR
"Spectra: Unable to Read Flash Device. "
759 goto out_flash_register
;
761 nand_dbg_print(NAND_DBG_WARN
, "In GLOB_SBD_init: "
762 "Num blocks=%d, pagesperblock=%d, "
763 "pagedatasize=%d, ECCBytesPerSector=%d\n",
764 (int)IdentifyDeviceData
.NumBlocks
,
765 (int)IdentifyDeviceData
.PagesPerBlock
,
766 (int)IdentifyDeviceData
.PageDataSize
,
767 (int)IdentifyDeviceData
.wECCBytesPerSector
);
770 printk(KERN_ALERT
"Spectra: searching block table, please wait ...\n");
771 if (GLOB_FTL_Init() != PASS
) {
772 printk(KERN_ERR
"Spectra: Unable to Initialize FTL Layer. "
774 goto out_ftl_flash_register
;
776 printk(KERN_ALERT
"Spectra: block table has been found.\n");
778 for (i
= 0; i
< NUM_DEVICES
; i
++)
779 if (SBD_setup_device(&nand_device
[i
], i
) == -ENOMEM
)
780 goto out_ftl_flash_register
;
782 nand_dbg_print(NAND_DBG_DEBUG
,
783 "Spectra: module loaded with major number %d\n",
788 out_ftl_flash_register
:
789 GLOB_FTL_Cache_Release();
791 GLOB_FTL_Flash_Release();
792 unregister_blkdev(GLOB_SBD_majornum
, GLOB_SBD_NAME
);
793 printk(KERN_ERR
"Spectra: Module load failed.\n");
798 static void __exit
GLOB_SBD_exit(void)
802 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
803 __FILE__
, __LINE__
, __func__
);
805 for (i
= 0; i
< NUM_DEVICES
; i
++) {
806 struct spectra_nand_dev
*dev
= &nand_device
[i
];
808 del_gendisk(dev
->gd
);
812 blk_cleanup_queue(dev
->queue
);
816 unregister_blkdev(GLOB_SBD_majornum
, GLOB_SBD_NAME
);
818 mutex_lock(&spectra_lock
);
820 mutex_unlock(&spectra_lock
);
822 GLOB_FTL_Cache_Release();
824 GLOB_FTL_Flash_Release();
826 nand_dbg_print(NAND_DBG_DEBUG
,
827 "Spectra FTL module (major number %d) unloaded.\n",
831 module_init(GLOB_SBD_init
);
832 module_exit(GLOB_SBD_exit
);