Add linux-next specific files for 20110831
[linux-2.6/next.git] / drivers / mtd / lpddr / lpddr_cmds.c
blob1dca31d9a8b3dc189c06d4236e1d19bb77810a38
1 /*
2 * LPDDR flash memory device operations. This module provides read, write,
3 * erase, lock/unlock support for LPDDR flash memories
4 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
5 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
6 * Many thanks to Roman Borisov for initial enabling
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 * TODO:
23 * Implement VPP management
24 * Implement XIP support
25 * Implement OTP support
27 #include <linux/mtd/pfow.h>
28 #include <linux/mtd/qinfo.h>
29 #include <linux/slab.h>
30 #include <linux/module.h>
32 static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
33 size_t *retlen, u_char *buf);
34 static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
35 size_t len, size_t *retlen, const u_char *buf);
36 static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
37 unsigned long count, loff_t to, size_t *retlen);
38 static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
39 static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
40 static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
41 static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
42 size_t *retlen, void **mtdbuf, resource_size_t *phys);
43 static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
44 static int get_chip(struct map_info *map, struct flchip *chip, int mode);
45 static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
46 static void put_chip(struct map_info *map, struct flchip *chip);
48 struct mtd_info *lpddr_cmdset(struct map_info *map)
50 struct lpddr_private *lpddr = map->fldrv_priv;
51 struct flchip_shared *shared;
52 struct flchip *chip;
53 struct mtd_info *mtd;
54 int numchips;
55 int i, j;
57 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
58 if (!mtd) {
59 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
60 return NULL;
62 mtd->priv = map;
63 mtd->type = MTD_NORFLASH;
65 /* Fill in the default mtd operations */
66 mtd->read = lpddr_read;
67 mtd->type = MTD_NORFLASH;
68 mtd->flags = MTD_CAP_NORFLASH;
69 mtd->flags &= ~MTD_BIT_WRITEABLE;
70 mtd->erase = lpddr_erase;
71 mtd->write = lpddr_write_buffers;
72 mtd->writev = lpddr_writev;
73 mtd->read_oob = NULL;
74 mtd->write_oob = NULL;
75 mtd->sync = NULL;
76 mtd->lock = lpddr_lock;
77 mtd->unlock = lpddr_unlock;
78 mtd->suspend = NULL;
79 mtd->resume = NULL;
80 if (map_is_linear(map)) {
81 mtd->point = lpddr_point;
82 mtd->unpoint = lpddr_unpoint;
84 mtd->block_isbad = NULL;
85 mtd->block_markbad = NULL;
86 mtd->size = 1 << lpddr->qinfo->DevSizeShift;
87 mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
88 mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
90 shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips,
91 GFP_KERNEL);
92 if (!shared) {
93 kfree(lpddr);
94 kfree(mtd);
95 return NULL;
98 chip = &lpddr->chips[0];
99 numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
100 for (i = 0; i < numchips; i++) {
101 shared[i].writing = shared[i].erasing = NULL;
102 mutex_init(&shared[i].lock);
103 for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
104 *chip = lpddr->chips[i];
105 chip->start += j << lpddr->chipshift;
106 chip->oldstate = chip->state = FL_READY;
107 chip->priv = &shared[i];
108 /* those should be reset too since
109 they create memory references. */
110 init_waitqueue_head(&chip->wq);
111 mutex_init(&chip->mutex);
112 chip++;
116 return mtd;
118 EXPORT_SYMBOL(lpddr_cmdset);
120 static int wait_for_ready(struct map_info *map, struct flchip *chip,
121 unsigned int chip_op_time)
123 unsigned int timeo, reset_timeo, sleep_time;
124 unsigned int dsr;
125 flstate_t chip_state = chip->state;
126 int ret = 0;
128 /* set our timeout to 8 times the expected delay */
129 timeo = chip_op_time * 8;
130 if (!timeo)
131 timeo = 500000;
132 reset_timeo = timeo;
133 sleep_time = chip_op_time / 2;
135 for (;;) {
136 dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
137 if (dsr & DSR_READY_STATUS)
138 break;
139 if (!timeo) {
140 printk(KERN_ERR "%s: Flash timeout error state %d \n",
141 map->name, chip_state);
142 ret = -ETIME;
143 break;
146 /* OK Still waiting. Drop the lock, wait a while and retry. */
147 mutex_unlock(&chip->mutex);
148 if (sleep_time >= 1000000/HZ) {
150 * Half of the normal delay still remaining
151 * can be performed with a sleeping delay instead
152 * of busy waiting.
154 msleep(sleep_time/1000);
155 timeo -= sleep_time;
156 sleep_time = 1000000/HZ;
157 } else {
158 udelay(1);
159 cond_resched();
160 timeo--;
162 mutex_lock(&chip->mutex);
164 while (chip->state != chip_state) {
165 /* Someone's suspended the operation: sleep */
166 DECLARE_WAITQUEUE(wait, current);
167 set_current_state(TASK_UNINTERRUPTIBLE);
168 add_wait_queue(&chip->wq, &wait);
169 mutex_unlock(&chip->mutex);
170 schedule();
171 remove_wait_queue(&chip->wq, &wait);
172 mutex_lock(&chip->mutex);
174 if (chip->erase_suspended || chip->write_suspended) {
175 /* Suspend has occurred while sleep: reset timeout */
176 timeo = reset_timeo;
177 chip->erase_suspended = chip->write_suspended = 0;
180 /* check status for errors */
181 if (dsr & DSR_ERR) {
182 /* Clear DSR*/
183 map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
184 printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
185 map->name, dsr);
186 print_drs_error(dsr);
187 ret = -EIO;
189 chip->state = FL_READY;
190 return ret;
193 static int get_chip(struct map_info *map, struct flchip *chip, int mode)
195 int ret;
196 DECLARE_WAITQUEUE(wait, current);
198 retry:
199 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
200 && chip->state != FL_SYNCING) {
202 * OK. We have possibility for contension on the write/erase
203 * operations which are global to the real chip and not per
204 * partition. So let's fight it over in the partition which
205 * currently has authority on the operation.
207 * The rules are as follows:
209 * - any write operation must own shared->writing.
211 * - any erase operation must own _both_ shared->writing and
212 * shared->erasing.
214 * - contension arbitration is handled in the owner's context.
216 * The 'shared' struct can be read and/or written only when
217 * its lock is taken.
219 struct flchip_shared *shared = chip->priv;
220 struct flchip *contender;
221 mutex_lock(&shared->lock);
222 contender = shared->writing;
223 if (contender && contender != chip) {
225 * The engine to perform desired operation on this
226 * partition is already in use by someone else.
227 * Let's fight over it in the context of the chip
228 * currently using it. If it is possible to suspend,
229 * that other partition will do just that, otherwise
230 * it'll happily send us to sleep. In any case, when
231 * get_chip returns success we're clear to go ahead.
233 ret = mutex_trylock(&contender->mutex);
234 mutex_unlock(&shared->lock);
235 if (!ret)
236 goto retry;
237 mutex_unlock(&chip->mutex);
238 ret = chip_ready(map, contender, mode);
239 mutex_lock(&chip->mutex);
241 if (ret == -EAGAIN) {
242 mutex_unlock(&contender->mutex);
243 goto retry;
245 if (ret) {
246 mutex_unlock(&contender->mutex);
247 return ret;
249 mutex_lock(&shared->lock);
251 /* We should not own chip if it is already in FL_SYNCING
252 * state. Put contender and retry. */
253 if (chip->state == FL_SYNCING) {
254 put_chip(map, contender);
255 mutex_unlock(&contender->mutex);
256 goto retry;
258 mutex_unlock(&contender->mutex);
261 /* Check if we have suspended erase on this chip.
262 Must sleep in such a case. */
263 if (mode == FL_ERASING && shared->erasing
264 && shared->erasing->oldstate == FL_ERASING) {
265 mutex_unlock(&shared->lock);
266 set_current_state(TASK_UNINTERRUPTIBLE);
267 add_wait_queue(&chip->wq, &wait);
268 mutex_unlock(&chip->mutex);
269 schedule();
270 remove_wait_queue(&chip->wq, &wait);
271 mutex_lock(&chip->mutex);
272 goto retry;
275 /* We now own it */
276 shared->writing = chip;
277 if (mode == FL_ERASING)
278 shared->erasing = chip;
279 mutex_unlock(&shared->lock);
282 ret = chip_ready(map, chip, mode);
283 if (ret == -EAGAIN)
284 goto retry;
286 return ret;
289 static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
291 struct lpddr_private *lpddr = map->fldrv_priv;
292 int ret = 0;
293 DECLARE_WAITQUEUE(wait, current);
295 /* Prevent setting state FL_SYNCING for chip in suspended state. */
296 if (FL_SYNCING == mode && FL_READY != chip->oldstate)
297 goto sleep;
299 switch (chip->state) {
300 case FL_READY:
301 case FL_JEDEC_QUERY:
302 return 0;
304 case FL_ERASING:
305 if (!lpddr->qinfo->SuspEraseSupp ||
306 !(mode == FL_READY || mode == FL_POINT))
307 goto sleep;
309 map_write(map, CMD(LPDDR_SUSPEND),
310 map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
311 chip->oldstate = FL_ERASING;
312 chip->state = FL_ERASE_SUSPENDING;
313 ret = wait_for_ready(map, chip, 0);
314 if (ret) {
315 /* Oops. something got wrong. */
316 /* Resume and pretend we weren't here. */
317 put_chip(map, chip);
318 printk(KERN_ERR "%s: suspend operation failed."
319 "State may be wrong \n", map->name);
320 return -EIO;
322 chip->erase_suspended = 1;
323 chip->state = FL_READY;
324 return 0;
325 /* Erase suspend */
326 case FL_POINT:
327 /* Only if there's no operation suspended... */
328 if (mode == FL_READY && chip->oldstate == FL_READY)
329 return 0;
331 default:
332 sleep:
333 set_current_state(TASK_UNINTERRUPTIBLE);
334 add_wait_queue(&chip->wq, &wait);
335 mutex_unlock(&chip->mutex);
336 schedule();
337 remove_wait_queue(&chip->wq, &wait);
338 mutex_lock(&chip->mutex);
339 return -EAGAIN;
343 static void put_chip(struct map_info *map, struct flchip *chip)
345 if (chip->priv) {
346 struct flchip_shared *shared = chip->priv;
347 mutex_lock(&shared->lock);
348 if (shared->writing == chip && chip->oldstate == FL_READY) {
349 /* We own the ability to write, but we're done */
350 shared->writing = shared->erasing;
351 if (shared->writing && shared->writing != chip) {
352 /* give back the ownership */
353 struct flchip *loaner = shared->writing;
354 mutex_lock(&loaner->mutex);
355 mutex_unlock(&shared->lock);
356 mutex_unlock(&chip->mutex);
357 put_chip(map, loaner);
358 mutex_lock(&chip->mutex);
359 mutex_unlock(&loaner->mutex);
360 wake_up(&chip->wq);
361 return;
363 shared->erasing = NULL;
364 shared->writing = NULL;
365 } else if (shared->erasing == chip && shared->writing != chip) {
367 * We own the ability to erase without the ability
368 * to write, which means the erase was suspended
369 * and some other partition is currently writing.
370 * Don't let the switch below mess things up since
371 * we don't have ownership to resume anything.
373 mutex_unlock(&shared->lock);
374 wake_up(&chip->wq);
375 return;
377 mutex_unlock(&shared->lock);
380 switch (chip->oldstate) {
381 case FL_ERASING:
382 map_write(map, CMD(LPDDR_RESUME),
383 map->pfow_base + PFOW_COMMAND_CODE);
384 map_write(map, CMD(LPDDR_START_EXECUTION),
385 map->pfow_base + PFOW_COMMAND_EXECUTE);
386 chip->oldstate = FL_READY;
387 chip->state = FL_ERASING;
388 break;
389 case FL_READY:
390 break;
391 default:
392 printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
393 map->name, chip->oldstate);
395 wake_up(&chip->wq);
398 int do_write_buffer(struct map_info *map, struct flchip *chip,
399 unsigned long adr, const struct kvec **pvec,
400 unsigned long *pvec_seek, int len)
402 struct lpddr_private *lpddr = map->fldrv_priv;
403 map_word datum;
404 int ret, wbufsize, word_gap, words;
405 const struct kvec *vec;
406 unsigned long vec_seek;
407 unsigned long prog_buf_ofs;
409 wbufsize = 1 << lpddr->qinfo->BufSizeShift;
411 mutex_lock(&chip->mutex);
412 ret = get_chip(map, chip, FL_WRITING);
413 if (ret) {
414 mutex_unlock(&chip->mutex);
415 return ret;
417 /* Figure out the number of words to write */
418 word_gap = (-adr & (map_bankwidth(map)-1));
419 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
420 if (!word_gap) {
421 words--;
422 } else {
423 word_gap = map_bankwidth(map) - word_gap;
424 adr -= word_gap;
425 datum = map_word_ff(map);
427 /* Write data */
428 /* Get the program buffer offset from PFOW register data first*/
429 prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
430 map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
431 vec = *pvec;
432 vec_seek = *pvec_seek;
433 do {
434 int n = map_bankwidth(map) - word_gap;
436 if (n > vec->iov_len - vec_seek)
437 n = vec->iov_len - vec_seek;
438 if (n > len)
439 n = len;
441 if (!word_gap && (len < map_bankwidth(map)))
442 datum = map_word_ff(map);
444 datum = map_word_load_partial(map, datum,
445 vec->iov_base + vec_seek, word_gap, n);
447 len -= n;
448 word_gap += n;
449 if (!len || word_gap == map_bankwidth(map)) {
450 map_write(map, datum, prog_buf_ofs);
451 prog_buf_ofs += map_bankwidth(map);
452 word_gap = 0;
455 vec_seek += n;
456 if (vec_seek == vec->iov_len) {
457 vec++;
458 vec_seek = 0;
460 } while (len);
461 *pvec = vec;
462 *pvec_seek = vec_seek;
464 /* GO GO GO */
465 send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
466 chip->state = FL_WRITING;
467 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
468 if (ret) {
469 printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
470 map->name, ret, adr);
471 goto out;
474 out: put_chip(map, chip);
475 mutex_unlock(&chip->mutex);
476 return ret;
479 int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
481 struct map_info *map = mtd->priv;
482 struct lpddr_private *lpddr = map->fldrv_priv;
483 int chipnum = adr >> lpddr->chipshift;
484 struct flchip *chip = &lpddr->chips[chipnum];
485 int ret;
487 mutex_lock(&chip->mutex);
488 ret = get_chip(map, chip, FL_ERASING);
489 if (ret) {
490 mutex_unlock(&chip->mutex);
491 return ret;
493 send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
494 chip->state = FL_ERASING;
495 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
496 if (ret) {
497 printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
498 map->name, ret, adr);
499 goto out;
501 out: put_chip(map, chip);
502 mutex_unlock(&chip->mutex);
503 return ret;
506 static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
507 size_t *retlen, u_char *buf)
509 struct map_info *map = mtd->priv;
510 struct lpddr_private *lpddr = map->fldrv_priv;
511 int chipnum = adr >> lpddr->chipshift;
512 struct flchip *chip = &lpddr->chips[chipnum];
513 int ret = 0;
515 mutex_lock(&chip->mutex);
516 ret = get_chip(map, chip, FL_READY);
517 if (ret) {
518 mutex_unlock(&chip->mutex);
519 return ret;
522 map_copy_from(map, buf, adr, len);
523 *retlen = len;
525 put_chip(map, chip);
526 mutex_unlock(&chip->mutex);
527 return ret;
530 static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
531 size_t *retlen, void **mtdbuf, resource_size_t *phys)
533 struct map_info *map = mtd->priv;
534 struct lpddr_private *lpddr = map->fldrv_priv;
535 int chipnum = adr >> lpddr->chipshift;
536 unsigned long ofs, last_end = 0;
537 struct flchip *chip = &lpddr->chips[chipnum];
538 int ret = 0;
540 if (!map->virt || (adr + len > mtd->size))
541 return -EINVAL;
543 /* ofs: offset within the first chip that the first read should start */
544 ofs = adr - (chipnum << lpddr->chipshift);
546 *mtdbuf = (void *)map->virt + chip->start + ofs;
547 *retlen = 0;
549 while (len) {
550 unsigned long thislen;
552 if (chipnum >= lpddr->numchips)
553 break;
555 /* We cannot point across chips that are virtually disjoint */
556 if (!last_end)
557 last_end = chip->start;
558 else if (chip->start != last_end)
559 break;
561 if ((len + ofs - 1) >> lpddr->chipshift)
562 thislen = (1<<lpddr->chipshift) - ofs;
563 else
564 thislen = len;
565 /* get the chip */
566 mutex_lock(&chip->mutex);
567 ret = get_chip(map, chip, FL_POINT);
568 mutex_unlock(&chip->mutex);
569 if (ret)
570 break;
572 chip->state = FL_POINT;
573 chip->ref_point_counter++;
574 *retlen += thislen;
575 len -= thislen;
577 ofs = 0;
578 last_end += 1 << lpddr->chipshift;
579 chipnum++;
580 chip = &lpddr->chips[chipnum];
582 return 0;
585 static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
587 struct map_info *map = mtd->priv;
588 struct lpddr_private *lpddr = map->fldrv_priv;
589 int chipnum = adr >> lpddr->chipshift;
590 unsigned long ofs;
592 /* ofs: offset within the first chip that the first read should start */
593 ofs = adr - (chipnum << lpddr->chipshift);
595 while (len) {
596 unsigned long thislen;
597 struct flchip *chip;
599 chip = &lpddr->chips[chipnum];
600 if (chipnum >= lpddr->numchips)
601 break;
603 if ((len + ofs - 1) >> lpddr->chipshift)
604 thislen = (1<<lpddr->chipshift) - ofs;
605 else
606 thislen = len;
608 mutex_lock(&chip->mutex);
609 if (chip->state == FL_POINT) {
610 chip->ref_point_counter--;
611 if (chip->ref_point_counter == 0)
612 chip->state = FL_READY;
613 } else
614 printk(KERN_WARNING "%s: Warning: unpoint called on non"
615 "pointed region\n", map->name);
617 put_chip(map, chip);
618 mutex_unlock(&chip->mutex);
620 len -= thislen;
621 ofs = 0;
622 chipnum++;
626 static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
627 size_t *retlen, const u_char *buf)
629 struct kvec vec;
631 vec.iov_base = (void *) buf;
632 vec.iov_len = len;
634 return lpddr_writev(mtd, &vec, 1, to, retlen);
638 static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
639 unsigned long count, loff_t to, size_t *retlen)
641 struct map_info *map = mtd->priv;
642 struct lpddr_private *lpddr = map->fldrv_priv;
643 int ret = 0;
644 int chipnum;
645 unsigned long ofs, vec_seek, i;
646 int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
648 size_t len = 0;
650 for (i = 0; i < count; i++)
651 len += vecs[i].iov_len;
653 *retlen = 0;
654 if (!len)
655 return 0;
657 chipnum = to >> lpddr->chipshift;
659 ofs = to;
660 vec_seek = 0;
662 do {
663 /* We must not cross write block boundaries */
664 int size = wbufsize - (ofs & (wbufsize-1));
666 if (size > len)
667 size = len;
669 ret = do_write_buffer(map, &lpddr->chips[chipnum],
670 ofs, &vecs, &vec_seek, size);
671 if (ret)
672 return ret;
674 ofs += size;
675 (*retlen) += size;
676 len -= size;
678 /* Be nice and reschedule with the chip in a usable
679 * state for other processes */
680 cond_resched();
682 } while (len);
684 return 0;
687 static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
689 unsigned long ofs, len;
690 int ret;
691 struct map_info *map = mtd->priv;
692 struct lpddr_private *lpddr = map->fldrv_priv;
693 int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
695 ofs = instr->addr;
696 len = instr->len;
698 if (ofs > mtd->size || (len + ofs) > mtd->size)
699 return -EINVAL;
701 while (len > 0) {
702 ret = do_erase_oneblock(mtd, ofs);
703 if (ret)
704 return ret;
705 ofs += size;
706 len -= size;
708 instr->state = MTD_ERASE_DONE;
709 mtd_erase_callback(instr);
711 return 0;
714 #define DO_XXLOCK_LOCK 1
715 #define DO_XXLOCK_UNLOCK 2
716 int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
718 int ret = 0;
719 struct map_info *map = mtd->priv;
720 struct lpddr_private *lpddr = map->fldrv_priv;
721 int chipnum = adr >> lpddr->chipshift;
722 struct flchip *chip = &lpddr->chips[chipnum];
724 mutex_lock(&chip->mutex);
725 ret = get_chip(map, chip, FL_LOCKING);
726 if (ret) {
727 mutex_unlock(&chip->mutex);
728 return ret;
731 if (thunk == DO_XXLOCK_LOCK) {
732 send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
733 chip->state = FL_LOCKING;
734 } else if (thunk == DO_XXLOCK_UNLOCK) {
735 send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
736 chip->state = FL_UNLOCKING;
737 } else
738 BUG();
740 ret = wait_for_ready(map, chip, 1);
741 if (ret) {
742 printk(KERN_ERR "%s: block unlock error status %d \n",
743 map->name, ret);
744 goto out;
746 out: put_chip(map, chip);
747 mutex_unlock(&chip->mutex);
748 return ret;
751 static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
753 return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
756 static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
758 return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
761 int word_program(struct map_info *map, loff_t adr, uint32_t curval)
763 int ret;
764 struct lpddr_private *lpddr = map->fldrv_priv;
765 int chipnum = adr >> lpddr->chipshift;
766 struct flchip *chip = &lpddr->chips[chipnum];
768 mutex_lock(&chip->mutex);
769 ret = get_chip(map, chip, FL_WRITING);
770 if (ret) {
771 mutex_unlock(&chip->mutex);
772 return ret;
775 send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval);
777 ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime));
778 if (ret) {
779 printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n",
780 map->name, adr, curval);
781 goto out;
784 out: put_chip(map, chip);
785 mutex_unlock(&chip->mutex);
786 return ret;
789 MODULE_LICENSE("GPL");
790 MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
791 MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");