kexec: sysrq: simplify sysrq-c handler
[linux-2.6/libata-dev.git] / drivers / block / mg_disk.c
blobf703f54782469e2ae9491cbd6d6dd89708fb8a55
1 /*
2 * drivers/block/mg_disk.c
4 * Support for the mGine m[g]flash IO mode.
5 * Based on legacy hd.c
7 * (c) 2008 mGine Co.,LTD
8 * (c) 2008 unsik Kim <donari75@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/fs.h>
18 #include <linux/blkdev.h>
19 #include <linux/hdreg.h>
20 #include <linux/ata.h>
21 #include <linux/interrupt.h>
22 #include <linux/delay.h>
23 #include <linux/platform_device.h>
24 #include <linux/gpio.h>
25 #include <linux/mg_disk.h>
27 #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
29 /* name for block device */
30 #define MG_DISK_NAME "mgd"
32 #define MG_DISK_MAJ 0
33 #define MG_DISK_MAX_PART 16
34 #define MG_SECTOR_SIZE 512
35 #define MG_MAX_SECTS 256
37 /* Register offsets */
38 #define MG_BUFF_OFFSET 0x8000
39 #define MG_STORAGE_BUFFER_SIZE 0x200
40 #define MG_REG_OFFSET 0xC000
41 #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
42 #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
43 #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
44 #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
45 #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
46 #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
47 #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
48 #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
49 #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
50 #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
51 #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
53 /* handy status */
54 #define MG_STAT_READY (ATA_DRDY | ATA_DSC)
55 #define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
56 ATA_ERR))) == MG_STAT_READY)
58 /* error code for others */
59 #define MG_ERR_NONE 0
60 #define MG_ERR_TIMEOUT 0x100
61 #define MG_ERR_INIT_STAT 0x101
62 #define MG_ERR_TRANSLATION 0x102
63 #define MG_ERR_CTRL_RST 0x103
64 #define MG_ERR_INV_STAT 0x104
65 #define MG_ERR_RSTOUT 0x105
67 #define MG_MAX_ERRORS 6 /* Max read/write errors */
69 /* command */
70 #define MG_CMD_RD 0x20
71 #define MG_CMD_WR 0x30
72 #define MG_CMD_SLEEP 0x99
73 #define MG_CMD_WAKEUP 0xC3
74 #define MG_CMD_ID 0xEC
75 #define MG_CMD_WR_CONF 0x3C
76 #define MG_CMD_RD_CONF 0x40
78 /* operation mode */
79 #define MG_OP_CASCADE (1 << 0)
80 #define MG_OP_CASCADE_SYNC_RD (1 << 1)
81 #define MG_OP_CASCADE_SYNC_WR (1 << 2)
82 #define MG_OP_INTERLEAVE (1 << 3)
84 /* synchronous */
85 #define MG_BURST_LAT_4 (3 << 4)
86 #define MG_BURST_LAT_5 (4 << 4)
87 #define MG_BURST_LAT_6 (5 << 4)
88 #define MG_BURST_LAT_7 (6 << 4)
89 #define MG_BURST_LAT_8 (7 << 4)
90 #define MG_BURST_LEN_4 (1 << 1)
91 #define MG_BURST_LEN_8 (2 << 1)
92 #define MG_BURST_LEN_16 (3 << 1)
93 #define MG_BURST_LEN_32 (4 << 1)
94 #define MG_BURST_LEN_CONT (0 << 1)
96 /* timeout value (unit: ms) */
97 #define MG_TMAX_CONF_TO_CMD 1
98 #define MG_TMAX_WAIT_RD_DRQ 10
99 #define MG_TMAX_WAIT_WR_DRQ 500
100 #define MG_TMAX_RST_TO_BUSY 10
101 #define MG_TMAX_HDRST_TO_RDY 500
102 #define MG_TMAX_SWRST_TO_RDY 500
103 #define MG_TMAX_RSTOUT 3000
105 #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
107 /* main structure for mflash driver */
108 struct mg_host {
109 struct device *dev;
111 struct request_queue *breq;
112 struct request *req;
113 spinlock_t lock;
114 struct gendisk *gd;
116 struct timer_list timer;
117 void (*mg_do_intr) (struct mg_host *);
119 u16 id[ATA_ID_WORDS];
121 u16 cyls;
122 u16 heads;
123 u16 sectors;
124 u32 n_sectors;
125 u32 nres_sectors;
127 void __iomem *dev_base;
128 unsigned int irq;
129 unsigned int rst;
130 unsigned int rstout;
132 u32 major;
133 u32 error;
137 * Debugging macro and defines
139 #undef DO_MG_DEBUG
140 #ifdef DO_MG_DEBUG
141 # define MG_DBG(fmt, args...) \
142 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
143 #else /* CONFIG_MG_DEBUG */
144 # define MG_DBG(fmt, args...) do { } while (0)
145 #endif /* CONFIG_MG_DEBUG */
147 static void mg_request(struct request_queue *);
149 static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
151 if (__blk_end_request(host->req, err, nr_bytes))
152 return true;
154 host->req = NULL;
155 return false;
158 static bool mg_end_request_cur(struct mg_host *host, int err)
160 return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
163 static void mg_dump_status(const char *msg, unsigned int stat,
164 struct mg_host *host)
166 char *name = MG_DISK_NAME;
168 if (host->req)
169 name = host->req->rq_disk->disk_name;
171 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
172 if (stat & ATA_BUSY)
173 printk("Busy ");
174 if (stat & ATA_DRDY)
175 printk("DriveReady ");
176 if (stat & ATA_DF)
177 printk("WriteFault ");
178 if (stat & ATA_DSC)
179 printk("SeekComplete ");
180 if (stat & ATA_DRQ)
181 printk("DataRequest ");
182 if (stat & ATA_CORR)
183 printk("CorrectedError ");
184 if (stat & ATA_ERR)
185 printk("Error ");
186 printk("}\n");
187 if ((stat & ATA_ERR) == 0) {
188 host->error = 0;
189 } else {
190 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
191 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
192 host->error & 0xff);
193 if (host->error & ATA_BBK)
194 printk("BadSector ");
195 if (host->error & ATA_UNC)
196 printk("UncorrectableError ");
197 if (host->error & ATA_IDNF)
198 printk("SectorIdNotFound ");
199 if (host->error & ATA_ABORTED)
200 printk("DriveStatusError ");
201 if (host->error & ATA_AMNF)
202 printk("AddrMarkNotFound ");
203 printk("}");
204 if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
205 if (host->req)
206 printk(", sector=%u",
207 (unsigned int)blk_rq_pos(host->req));
209 printk("\n");
213 static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
215 u8 status;
216 unsigned long expire, cur_jiffies;
217 struct mg_drv_data *prv_data = host->dev->platform_data;
219 host->error = MG_ERR_NONE;
220 expire = jiffies + msecs_to_jiffies(msec);
222 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
224 do {
225 cur_jiffies = jiffies;
226 if (status & ATA_BUSY) {
227 if (expect == ATA_BUSY)
228 break;
229 } else {
230 /* Check the error condition! */
231 if (status & ATA_ERR) {
232 mg_dump_status("mg_wait", status, host);
233 break;
236 if (expect == MG_STAT_READY)
237 if (MG_READY_OK(status))
238 break;
240 if (expect == ATA_DRQ)
241 if (status & ATA_DRQ)
242 break;
244 if (!msec) {
245 mg_dump_status("not ready", status, host);
246 return MG_ERR_INV_STAT;
248 if (prv_data->use_polling)
249 msleep(1);
251 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
252 } while (time_before(cur_jiffies, expire));
254 if (time_after_eq(cur_jiffies, expire) && msec)
255 host->error = MG_ERR_TIMEOUT;
257 return host->error;
260 static unsigned int mg_wait_rstout(u32 rstout, u32 msec)
262 unsigned long expire;
264 expire = jiffies + msecs_to_jiffies(msec);
265 while (time_before(jiffies, expire)) {
266 if (gpio_get_value(rstout) == 1)
267 return MG_ERR_NONE;
268 msleep(10);
271 return MG_ERR_RSTOUT;
274 static void mg_unexpected_intr(struct mg_host *host)
276 u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
278 mg_dump_status("mg_unexpected_intr", status, host);
281 static irqreturn_t mg_irq(int irq, void *dev_id)
283 struct mg_host *host = dev_id;
284 void (*handler)(struct mg_host *) = host->mg_do_intr;
286 spin_lock(&host->lock);
288 host->mg_do_intr = NULL;
289 del_timer(&host->timer);
290 if (!handler)
291 handler = mg_unexpected_intr;
292 handler(host);
294 spin_unlock(&host->lock);
296 return IRQ_HANDLED;
299 /* local copy of ata_id_string() */
300 static void mg_id_string(const u16 *id, unsigned char *s,
301 unsigned int ofs, unsigned int len)
303 unsigned int c;
305 BUG_ON(len & 1);
307 while (len > 0) {
308 c = id[ofs] >> 8;
309 *s = c;
310 s++;
312 c = id[ofs] & 0xff;
313 *s = c;
314 s++;
316 ofs++;
317 len -= 2;
321 /* local copy of ata_id_c_string() */
322 static void mg_id_c_string(const u16 *id, unsigned char *s,
323 unsigned int ofs, unsigned int len)
325 unsigned char *p;
327 mg_id_string(id, s, ofs, len - 1);
329 p = s + strnlen(s, len - 1);
330 while (p > s && p[-1] == ' ')
331 p--;
332 *p = '\0';
335 static int mg_get_disk_id(struct mg_host *host)
337 u32 i;
338 s32 err;
339 const u16 *id = host->id;
340 struct mg_drv_data *prv_data = host->dev->platform_data;
341 char fwrev[ATA_ID_FW_REV_LEN + 1];
342 char model[ATA_ID_PROD_LEN + 1];
343 char serial[ATA_ID_SERNO_LEN + 1];
345 if (!prv_data->use_polling)
346 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
348 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
349 err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
350 if (err)
351 return err;
353 for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++)
354 host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base +
355 MG_BUFF_OFFSET + i * 2));
357 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
358 err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD);
359 if (err)
360 return err;
362 if ((id[ATA_ID_FIELD_VALID] & 1) == 0)
363 return MG_ERR_TRANSLATION;
365 host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
366 host->cyls = id[ATA_ID_CYLS];
367 host->heads = id[ATA_ID_HEADS];
368 host->sectors = id[ATA_ID_SECTORS];
370 if (MG_RES_SEC && host->heads && host->sectors) {
371 /* modify cyls, n_sectors */
372 host->cyls = (host->n_sectors - MG_RES_SEC) /
373 host->heads / host->sectors;
374 host->nres_sectors = host->n_sectors - host->cyls *
375 host->heads * host->sectors;
376 host->n_sectors -= host->nres_sectors;
379 mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
380 mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
381 mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
382 printk(KERN_INFO "mg_disk: model: %s\n", model);
383 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
384 printk(KERN_INFO "mg_disk: serial: %s\n", serial);
385 printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n",
386 host->n_sectors, host->nres_sectors);
388 if (!prv_data->use_polling)
389 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
391 return err;
395 static int mg_disk_init(struct mg_host *host)
397 struct mg_drv_data *prv_data = host->dev->platform_data;
398 s32 err;
399 u8 init_status;
401 /* hdd rst low */
402 gpio_set_value(host->rst, 0);
403 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
404 if (err)
405 return err;
407 /* hdd rst high */
408 gpio_set_value(host->rst, 1);
409 err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY);
410 if (err)
411 return err;
413 /* soft reset on */
414 outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0),
415 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
416 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
417 if (err)
418 return err;
420 /* soft reset off */
421 outb(prv_data->use_polling ? ATA_NIEN : 0,
422 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
423 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
424 if (err)
425 return err;
427 init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf;
429 if (init_status == 0xf)
430 return MG_ERR_INIT_STAT;
432 return err;
435 static void mg_bad_rw_intr(struct mg_host *host)
437 if (host->req)
438 if (++host->req->errors >= MG_MAX_ERRORS ||
439 host->error == MG_ERR_TIMEOUT)
440 mg_end_request_cur(host, -EIO);
443 static unsigned int mg_out(struct mg_host *host,
444 unsigned int sect_num,
445 unsigned int sect_cnt,
446 unsigned int cmd,
447 void (*intr_addr)(struct mg_host *))
449 struct mg_drv_data *prv_data = host->dev->platform_data;
451 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
452 return host->error;
454 if (!prv_data->use_polling) {
455 host->mg_do_intr = intr_addr;
456 mod_timer(&host->timer, jiffies + 3 * HZ);
458 if (MG_RES_SEC)
459 sect_num += MG_RES_SEC;
460 outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT);
461 outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM);
462 outb((u8)(sect_num >> 8), (unsigned long)host->dev_base +
463 MG_REG_CYL_LOW);
464 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
465 MG_REG_CYL_HIGH);
466 outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS),
467 (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
468 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
469 return MG_ERR_NONE;
472 static void mg_read(struct request *req)
474 u32 j;
475 struct mg_host *host = req->rq_disk->private_data;
477 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
478 MG_CMD_RD, NULL) != MG_ERR_NONE)
479 mg_bad_rw_intr(host);
481 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
482 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
484 do {
485 u16 *buff = (u16 *)req->buffer;
487 if (mg_wait(host, ATA_DRQ,
488 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
489 mg_bad_rw_intr(host);
490 return;
492 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
493 *buff++ = inw((unsigned long)host->dev_base +
494 MG_BUFF_OFFSET + (j << 1));
496 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
497 MG_REG_COMMAND);
498 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
501 static void mg_write(struct request *req)
503 u32 j;
504 struct mg_host *host = req->rq_disk->private_data;
506 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
507 MG_CMD_WR, NULL) != MG_ERR_NONE) {
508 mg_bad_rw_intr(host);
509 return;
512 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
513 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
515 do {
516 u16 *buff = (u16 *)req->buffer;
518 if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
519 mg_bad_rw_intr(host);
520 return;
522 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
523 outw(*buff++, (unsigned long)host->dev_base +
524 MG_BUFF_OFFSET + (j << 1));
526 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
527 MG_REG_COMMAND);
528 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
531 static void mg_read_intr(struct mg_host *host)
533 struct request *req = host->req;
534 u32 i;
535 u16 *buff;
537 /* check status */
538 do {
539 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
540 if (i & ATA_BUSY)
541 break;
542 if (!MG_READY_OK(i))
543 break;
544 if (i & ATA_DRQ)
545 goto ok_to_read;
546 } while (0);
547 mg_dump_status("mg_read_intr", i, host);
548 mg_bad_rw_intr(host);
549 mg_request(host->breq);
550 return;
552 ok_to_read:
553 /* get current segment of request */
554 buff = (u16 *)req->buffer;
556 /* read 1 sector */
557 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
558 *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
559 (i << 1));
561 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
562 blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
564 /* send read confirm */
565 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
567 if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
568 /* set handler if read remains */
569 host->mg_do_intr = mg_read_intr;
570 mod_timer(&host->timer, jiffies + 3 * HZ);
571 } else /* goto next request */
572 mg_request(host->breq);
575 static void mg_write_intr(struct mg_host *host)
577 struct request *req = host->req;
578 u32 i, j;
579 u16 *buff;
580 bool rem;
582 /* check status */
583 do {
584 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
585 if (i & ATA_BUSY)
586 break;
587 if (!MG_READY_OK(i))
588 break;
589 if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
590 goto ok_to_write;
591 } while (0);
592 mg_dump_status("mg_write_intr", i, host);
593 mg_bad_rw_intr(host);
594 mg_request(host->breq);
595 return;
597 ok_to_write:
598 if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
599 /* write 1 sector and set handler if remains */
600 buff = (u16 *)req->buffer;
601 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
602 outw(*buff, (unsigned long)host->dev_base +
603 MG_BUFF_OFFSET + (j << 1));
604 buff++;
606 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
607 blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
608 host->mg_do_intr = mg_write_intr;
609 mod_timer(&host->timer, jiffies + 3 * HZ);
612 /* send write confirm */
613 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
615 if (!rem)
616 mg_request(host->breq);
619 void mg_times_out(unsigned long data)
621 struct mg_host *host = (struct mg_host *)data;
622 char *name;
624 spin_lock_irq(&host->lock);
626 if (!host->req)
627 goto out_unlock;
629 host->mg_do_intr = NULL;
631 name = host->req->rq_disk->disk_name;
632 printk(KERN_DEBUG "%s: timeout\n", name);
634 host->error = MG_ERR_TIMEOUT;
635 mg_bad_rw_intr(host);
637 out_unlock:
638 mg_request(host->breq);
639 spin_unlock_irq(&host->lock);
642 static void mg_request_poll(struct request_queue *q)
644 struct mg_host *host = q->queuedata;
646 while (1) {
647 if (!host->req) {
648 host->req = blk_fetch_request(q);
649 if (!host->req)
650 break;
653 if (unlikely(!blk_fs_request(host->req))) {
654 mg_end_request_cur(host, -EIO);
655 continue;
658 if (rq_data_dir(host->req) == READ)
659 mg_read(host->req);
660 else
661 mg_write(host->req);
665 static unsigned int mg_issue_req(struct request *req,
666 struct mg_host *host,
667 unsigned int sect_num,
668 unsigned int sect_cnt)
670 u16 *buff;
671 u32 i;
673 switch (rq_data_dir(req)) {
674 case READ:
675 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
676 != MG_ERR_NONE) {
677 mg_bad_rw_intr(host);
678 return host->error;
680 break;
681 case WRITE:
682 /* TODO : handler */
683 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
684 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
685 != MG_ERR_NONE) {
686 mg_bad_rw_intr(host);
687 return host->error;
689 del_timer(&host->timer);
690 mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
691 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
692 if (host->error) {
693 mg_bad_rw_intr(host);
694 return host->error;
696 buff = (u16 *)req->buffer;
697 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
698 outw(*buff, (unsigned long)host->dev_base +
699 MG_BUFF_OFFSET + (i << 1));
700 buff++;
702 mod_timer(&host->timer, jiffies + 3 * HZ);
703 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
704 MG_REG_COMMAND);
705 break;
707 return MG_ERR_NONE;
710 /* This function also called from IRQ context */
711 static void mg_request(struct request_queue *q)
713 struct mg_host *host = q->queuedata;
714 struct request *req;
715 u32 sect_num, sect_cnt;
717 while (1) {
718 if (!host->req) {
719 host->req = blk_fetch_request(q);
720 if (!host->req)
721 break;
723 req = host->req;
725 /* check unwanted request call */
726 if (host->mg_do_intr)
727 return;
729 del_timer(&host->timer);
731 sect_num = blk_rq_pos(req);
732 /* deal whole segments */
733 sect_cnt = blk_rq_sectors(req);
735 /* sanity check */
736 if (sect_num >= get_capacity(req->rq_disk) ||
737 ((sect_num + sect_cnt) >
738 get_capacity(req->rq_disk))) {
739 printk(KERN_WARNING
740 "%s: bad access: sector=%d, count=%d\n",
741 req->rq_disk->disk_name,
742 sect_num, sect_cnt);
743 mg_end_request_cur(host, -EIO);
744 continue;
747 if (unlikely(!blk_fs_request(req))) {
748 mg_end_request_cur(host, -EIO);
749 continue;
752 if (!mg_issue_req(req, host, sect_num, sect_cnt))
753 return;
757 static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo)
759 struct mg_host *host = bdev->bd_disk->private_data;
761 geo->cylinders = (unsigned short)host->cyls;
762 geo->heads = (unsigned char)host->heads;
763 geo->sectors = (unsigned char)host->sectors;
764 return 0;
767 static struct block_device_operations mg_disk_ops = {
768 .getgeo = mg_getgeo
771 static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
773 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
774 struct mg_host *host = prv_data->host;
776 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
777 return -EIO;
779 if (!prv_data->use_polling)
780 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
782 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
783 /* wait until mflash deep sleep */
784 msleep(1);
786 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
787 if (!prv_data->use_polling)
788 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
789 return -EIO;
792 return 0;
795 static int mg_resume(struct platform_device *plat_dev)
797 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
798 struct mg_host *host = prv_data->host;
800 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
801 return -EIO;
803 outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND);
804 /* wait until mflash wakeup */
805 msleep(1);
807 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
808 return -EIO;
810 if (!prv_data->use_polling)
811 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
813 return 0;
816 static int mg_probe(struct platform_device *plat_dev)
818 struct mg_host *host;
819 struct resource *rsc;
820 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
821 int err = 0;
823 if (!prv_data) {
824 printk(KERN_ERR "%s:%d fail (no driver_data)\n",
825 __func__, __LINE__);
826 err = -EINVAL;
827 goto probe_err;
830 /* alloc mg_host */
831 host = kzalloc(sizeof(struct mg_host), GFP_KERNEL);
832 if (!host) {
833 printk(KERN_ERR "%s:%d fail (no memory for mg_host)\n",
834 __func__, __LINE__);
835 err = -ENOMEM;
836 goto probe_err;
838 host->major = MG_DISK_MAJ;
840 /* link each other */
841 prv_data->host = host;
842 host->dev = &plat_dev->dev;
844 /* io remap */
845 rsc = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
846 if (!rsc) {
847 printk(KERN_ERR "%s:%d platform_get_resource fail\n",
848 __func__, __LINE__);
849 err = -EINVAL;
850 goto probe_err_2;
852 host->dev_base = ioremap(rsc->start , rsc->end + 1);
853 if (!host->dev_base) {
854 printk(KERN_ERR "%s:%d ioremap fail\n",
855 __func__, __LINE__);
856 err = -EIO;
857 goto probe_err_2;
859 MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base);
861 /* get reset pin */
862 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
863 MG_RST_PIN);
864 if (!rsc) {
865 printk(KERN_ERR "%s:%d get reset pin fail\n",
866 __func__, __LINE__);
867 err = -EIO;
868 goto probe_err_3;
870 host->rst = rsc->start;
872 /* init rst pin */
873 err = gpio_request(host->rst, MG_RST_PIN);
874 if (err)
875 goto probe_err_3;
876 gpio_direction_output(host->rst, 1);
878 /* reset out pin */
879 if (!(prv_data->dev_attr & MG_DEV_MASK))
880 goto probe_err_3a;
882 if (prv_data->dev_attr != MG_BOOT_DEV) {
883 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
884 MG_RSTOUT_PIN);
885 if (!rsc) {
886 printk(KERN_ERR "%s:%d get reset-out pin fail\n",
887 __func__, __LINE__);
888 err = -EIO;
889 goto probe_err_3a;
891 host->rstout = rsc->start;
892 err = gpio_request(host->rstout, MG_RSTOUT_PIN);
893 if (err)
894 goto probe_err_3a;
895 gpio_direction_input(host->rstout);
898 /* disk reset */
899 if (prv_data->dev_attr == MG_STORAGE_DEV) {
900 /* If POR seq. not yet finised, wait */
901 err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
902 if (err)
903 goto probe_err_3b;
904 err = mg_disk_init(host);
905 if (err) {
906 printk(KERN_ERR "%s:%d fail (err code : %d)\n",
907 __func__, __LINE__, err);
908 err = -EIO;
909 goto probe_err_3b;
913 /* get irq resource */
914 if (!prv_data->use_polling) {
915 host->irq = platform_get_irq(plat_dev, 0);
916 if (host->irq == -ENXIO) {
917 err = host->irq;
918 goto probe_err_3b;
920 err = request_irq(host->irq, mg_irq,
921 IRQF_DISABLED | IRQF_TRIGGER_RISING,
922 MG_DEV_NAME, host);
923 if (err) {
924 printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
925 __func__, __LINE__, err);
926 goto probe_err_3b;
931 /* get disk id */
932 err = mg_get_disk_id(host);
933 if (err) {
934 printk(KERN_ERR "%s:%d fail (err code : %d)\n",
935 __func__, __LINE__, err);
936 err = -EIO;
937 goto probe_err_4;
940 err = register_blkdev(host->major, MG_DISK_NAME);
941 if (err < 0) {
942 printk(KERN_ERR "%s:%d register_blkdev fail (err code : %d)\n",
943 __func__, __LINE__, err);
944 goto probe_err_4;
946 if (!host->major)
947 host->major = err;
949 spin_lock_init(&host->lock);
951 if (prv_data->use_polling)
952 host->breq = blk_init_queue(mg_request_poll, &host->lock);
953 else
954 host->breq = blk_init_queue(mg_request, &host->lock);
956 if (!host->breq) {
957 err = -ENOMEM;
958 printk(KERN_ERR "%s:%d (blk_init_queue) fail\n",
959 __func__, __LINE__);
960 goto probe_err_5;
962 host->breq->queuedata = host;
964 /* mflash is random device, thanx for the noop */
965 elevator_exit(host->breq->elevator);
966 err = elevator_init(host->breq, "noop");
967 if (err) {
968 printk(KERN_ERR "%s:%d (elevator_init) fail\n",
969 __func__, __LINE__);
970 goto probe_err_6;
972 blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
973 blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
975 init_timer(&host->timer);
976 host->timer.function = mg_times_out;
977 host->timer.data = (unsigned long)host;
979 host->gd = alloc_disk(MG_DISK_MAX_PART);
980 if (!host->gd) {
981 printk(KERN_ERR "%s:%d (alloc_disk) fail\n",
982 __func__, __LINE__);
983 err = -ENOMEM;
984 goto probe_err_7;
986 host->gd->major = host->major;
987 host->gd->first_minor = 0;
988 host->gd->fops = &mg_disk_ops;
989 host->gd->queue = host->breq;
990 host->gd->private_data = host;
991 sprintf(host->gd->disk_name, MG_DISK_NAME"a");
993 set_capacity(host->gd, host->n_sectors);
995 add_disk(host->gd);
997 return err;
999 probe_err_7:
1000 del_timer_sync(&host->timer);
1001 probe_err_6:
1002 blk_cleanup_queue(host->breq);
1003 probe_err_5:
1004 unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME);
1005 probe_err_4:
1006 if (!prv_data->use_polling)
1007 free_irq(host->irq, host);
1008 probe_err_3b:
1009 gpio_free(host->rstout);
1010 probe_err_3a:
1011 gpio_free(host->rst);
1012 probe_err_3:
1013 iounmap(host->dev_base);
1014 probe_err_2:
1015 kfree(host);
1016 probe_err:
1017 return err;
1020 static int mg_remove(struct platform_device *plat_dev)
1022 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
1023 struct mg_host *host = prv_data->host;
1024 int err = 0;
1026 /* delete timer */
1027 del_timer_sync(&host->timer);
1029 /* remove disk */
1030 if (host->gd) {
1031 del_gendisk(host->gd);
1032 put_disk(host->gd);
1034 /* remove queue */
1035 if (host->breq)
1036 blk_cleanup_queue(host->breq);
1038 /* unregister blk device */
1039 unregister_blkdev(host->major, MG_DISK_NAME);
1041 /* free irq */
1042 if (!prv_data->use_polling)
1043 free_irq(host->irq, host);
1045 /* free reset-out pin */
1046 if (prv_data->dev_attr != MG_BOOT_DEV)
1047 gpio_free(host->rstout);
1049 /* free rst pin */
1050 if (host->rst)
1051 gpio_free(host->rst);
1053 /* unmap io */
1054 if (host->dev_base)
1055 iounmap(host->dev_base);
1057 /* free mg_host */
1058 kfree(host);
1060 return err;
1063 static struct platform_driver mg_disk_driver = {
1064 .probe = mg_probe,
1065 .remove = mg_remove,
1066 .suspend = mg_suspend,
1067 .resume = mg_resume,
1068 .driver = {
1069 .name = MG_DEV_NAME,
1070 .owner = THIS_MODULE,
1074 /****************************************************************************
1076 * Module stuff
1078 ****************************************************************************/
1080 static int __init mg_init(void)
1082 printk(KERN_INFO "mGine mflash driver, (c) 2008 mGine Co.\n");
1083 return platform_driver_register(&mg_disk_driver);
1086 static void __exit mg_exit(void)
1088 printk(KERN_INFO "mflash driver : bye bye\n");
1089 platform_driver_unregister(&mg_disk_driver);
1092 module_init(mg_init);
1093 module_exit(mg_exit);
1095 MODULE_LICENSE("GPL");
1096 MODULE_AUTHOR("unsik Kim <donari75@gmail.com>");
1097 MODULE_DESCRIPTION("mGine m[g]flash device driver");