staging: lustre: Change connect peer failed cleanup order
[linux-2.6/btrfs-unstable.git] / drivers / lightnvm / sysblk.c
blob321de1f154c545a31710236f9850ac6ab9deef87
1 /*
2 * Copyright (C) 2015 Matias Bjorling. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16 * USA.
20 #include <linux/lightnvm.h>
22 #define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
23 #define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
24 * enables ~1.5M updates per sysblk unit
27 struct sysblk_scan {
28 /* A row is a collection of flash blocks for a system block. */
29 int nr_rows;
30 int row;
31 int act_blk[MAX_SYSBLKS];
33 int nr_ppas;
34 struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
37 static inline int scan_ppa_idx(int row, int blkid)
39 return (row * MAX_BLKS_PR_SYSBLK) + blkid;
42 void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
44 info->seqnr = be32_to_cpu(sb->seqnr);
45 info->erase_cnt = be32_to_cpu(sb->erase_cnt);
46 info->version = be16_to_cpu(sb->version);
47 strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
48 info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
51 void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info)
53 sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
54 sb->seqnr = cpu_to_be32(info->seqnr);
55 sb->erase_cnt = cpu_to_be32(info->erase_cnt);
56 sb->version = cpu_to_be16(info->version);
57 strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
58 sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
61 static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
63 int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
64 int i;
66 for (i = 0; i < nr_rows; i++)
67 sysblk_ppas[i].ppa = 0;
69 /* if possible, place sysblk at first channel, middle channel and last
70 * channel of the device. If not, create only one or two sys blocks
72 switch (dev->nr_chnls) {
73 case 2:
74 sysblk_ppas[1].g.ch = 1;
75 /* fall-through */
76 case 1:
77 sysblk_ppas[0].g.ch = 0;
78 break;
79 default:
80 sysblk_ppas[0].g.ch = 0;
81 sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
82 sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
83 break;
86 return nr_rows;
89 void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
90 struct ppa_addr *sysblk_ppas)
92 memset(s, 0, sizeof(struct sysblk_scan));
93 s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
96 static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
97 void *private)
99 struct sysblk_scan *s = private;
100 int i, nr_sysblk = 0;
102 for (i = 0; i < nr_blks; i++) {
103 if (blks[i] != NVM_BLK_T_HOST)
104 continue;
106 if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
107 pr_err("nvm: too many host blks\n");
108 return -EINVAL;
111 ppa.g.blk = i;
113 s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
114 s->nr_ppas++;
115 nr_sysblk++;
118 return 0;
121 static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
122 struct ppa_addr *ppas, nvm_bb_update_fn *fn)
124 struct ppa_addr dppa;
125 int i, ret;
127 s->nr_ppas = 0;
129 for (i = 0; i < s->nr_rows; i++) {
130 dppa = generic_to_dev_addr(dev, ppas[i]);
131 s->row = i;
133 ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
134 if (ret) {
135 pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
136 ppas[i].g.ch,
137 ppas[i].g.blk);
138 return ret;
142 return ret;
146 * scans a block for latest sysblk.
147 * Returns:
148 * 0 - newer sysblk not found. PPA is updated to latest page.
149 * 1 - newer sysblk found and stored in *cur. PPA is updated to
150 * next valid page.
151 * <0- error.
153 static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
154 struct nvm_system_block *sblk)
156 struct nvm_system_block *cur;
157 int pg, cursz, ret, found = 0;
159 /* the full buffer for a flash page is allocated. Only the first of it
160 * contains the system block information
162 cursz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
163 cur = kmalloc(cursz, GFP_KERNEL);
164 if (!cur)
165 return -ENOMEM;
167 /* perform linear scan through the block */
168 for (pg = 0; pg < dev->lps_per_blk; pg++) {
169 ppa->g.pg = ppa_to_slc(dev, pg);
171 ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
172 cur, cursz);
173 if (ret) {
174 if (ret == NVM_RSP_ERR_EMPTYPAGE) {
175 pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
176 ppa->g.ch,
177 ppa->g.lun,
178 ppa->g.blk,
179 ppa->g.pg);
180 break;
182 pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
183 ret,
184 ppa->g.ch,
185 ppa->g.lun,
186 ppa->g.blk,
187 ppa->g.pg);
188 break; /* if we can't read a page, continue to the
189 * next blk
193 if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
194 pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
195 ppa->g.ch,
196 ppa->g.lun,
197 ppa->g.blk,
198 ppa->g.pg);
199 break; /* last valid page already found */
202 if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
203 continue;
205 memcpy(sblk, cur, sizeof(struct nvm_system_block));
206 found = 1;
209 kfree(cur);
211 return found;
214 static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
216 struct nvm_rq rqd;
217 int ret;
219 if (s->nr_ppas > dev->ops->max_phys_sect) {
220 pr_err("nvm: unable to update all sysblocks atomically\n");
221 return -EINVAL;
224 memset(&rqd, 0, sizeof(struct nvm_rq));
226 nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas);
227 nvm_generic_to_addr_mode(dev, &rqd);
229 ret = dev->ops->set_bb_tbl(dev, &rqd, type);
230 nvm_free_rqd_ppalist(dev, &rqd);
231 if (ret) {
232 pr_err("nvm: sysblk failed bb mark\n");
233 return -EINVAL;
236 return 0;
239 static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
240 void *private)
242 struct sysblk_scan *s = private;
243 struct ppa_addr *sppa;
244 int i, blkid = 0;
246 for (i = 0; i < nr_blks; i++) {
247 if (blks[i] == NVM_BLK_T_HOST)
248 return -EEXIST;
250 if (blks[i] != NVM_BLK_T_FREE)
251 continue;
253 sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
254 sppa->g.ch = ppa.g.ch;
255 sppa->g.lun = ppa.g.lun;
256 sppa->g.blk = i;
257 s->nr_ppas++;
258 blkid++;
260 pr_debug("nvm: use (%u %u %u) as sysblk\n",
261 sppa->g.ch, sppa->g.lun, sppa->g.blk);
262 if (blkid > MAX_BLKS_PR_SYSBLK - 1)
263 return 0;
266 pr_err("nvm: sysblk failed get sysblk\n");
267 return -EINVAL;
270 static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
271 struct sysblk_scan *s)
273 struct nvm_system_block nvmsb;
274 void *buf;
275 int i, sect, ret, bufsz;
276 struct ppa_addr *ppas;
278 nvm_cpu_to_sysblk(&nvmsb, info);
280 /* buffer for flash page */
281 bufsz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
282 buf = kzalloc(bufsz, GFP_KERNEL);
283 if (!buf)
284 return -ENOMEM;
285 memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
287 ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
288 if (!ppas) {
289 ret = -ENOMEM;
290 goto err;
293 /* Write and verify */
294 for (i = 0; i < s->nr_rows; i++) {
295 ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
297 pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
298 ppas[0].g.ch,
299 ppas[0].g.lun,
300 ppas[0].g.blk,
301 ppas[0].g.pg);
303 /* Expand to all sectors within a flash page */
304 if (dev->sec_per_pg > 1) {
305 for (sect = 1; sect < dev->sec_per_pg; sect++) {
306 ppas[sect].ppa = ppas[0].ppa;
307 ppas[sect].g.sec = sect;
311 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
312 NVM_IO_SLC_MODE, buf, bufsz);
313 if (ret) {
314 pr_err("nvm: sysblk failed program (%u %u %u)\n",
315 ppas[0].g.ch,
316 ppas[0].g.lun,
317 ppas[0].g.blk);
318 break;
321 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
322 NVM_IO_SLC_MODE, buf, bufsz);
323 if (ret) {
324 pr_err("nvm: sysblk failed read (%u %u %u)\n",
325 ppas[0].g.ch,
326 ppas[0].g.lun,
327 ppas[0].g.blk);
328 break;
331 if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
332 pr_err("nvm: sysblk failed verify (%u %u %u)\n",
333 ppas[0].g.ch,
334 ppas[0].g.lun,
335 ppas[0].g.blk);
336 ret = -EINVAL;
337 break;
341 kfree(ppas);
342 err:
343 kfree(buf);
345 return ret;
348 static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
350 int i, ret;
351 unsigned long nxt_blk;
352 struct ppa_addr *ppa;
354 for (i = 0; i < s->nr_rows; i++) {
355 nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
356 ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
357 ppa->g.pg = ppa_to_slc(dev, 0);
359 ret = nvm_erase_ppa(dev, ppa, 1);
360 if (ret)
361 return ret;
363 s->act_blk[i] = nxt_blk;
366 return 0;
369 int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
371 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
372 struct sysblk_scan s;
373 struct nvm_system_block *cur;
374 int i, j, found = 0;
375 int ret = -ENOMEM;
378 * 1. setup sysblk locations
379 * 2. get bad block list
380 * 3. filter on host-specific (type 3)
381 * 4. iterate through all and find the highest seq nr.
382 * 5. return superblock information
385 if (!dev->ops->get_bb_tbl)
386 return -EINVAL;
388 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
390 mutex_lock(&dev->mlock);
391 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
392 if (ret)
393 goto err_sysblk;
395 /* no sysblocks initialized */
396 if (!s.nr_ppas)
397 goto err_sysblk;
399 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
400 if (!cur)
401 goto err_sysblk;
403 /* find the latest block across all sysblocks */
404 for (i = 0; i < s.nr_rows; i++) {
405 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
406 struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
408 ret = nvm_scan_block(dev, &ppa, cur);
409 if (ret > 0)
410 found = 1;
411 else if (ret < 0)
412 break;
416 nvm_sysblk_to_cpu(info, cur);
418 kfree(cur);
419 err_sysblk:
420 mutex_unlock(&dev->mlock);
422 if (found)
423 return 1;
424 return ret;
427 int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
429 /* 1. for each latest superblock
430 * 2. if room
431 * a. write new flash page entry with the updated information
432 * 3. if no room
433 * a. find next available block on lun (linear search)
434 * if none, continue to next lun
435 * if none at all, report error. also report that it wasn't
436 * possible to write to all superblocks.
437 * c. write data to block.
439 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
440 struct sysblk_scan s;
441 struct nvm_system_block *cur;
442 int i, j, ppaidx, found = 0;
443 int ret = -ENOMEM;
445 if (!dev->ops->get_bb_tbl)
446 return -EINVAL;
448 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
450 mutex_lock(&dev->mlock);
451 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
452 if (ret)
453 goto err_sysblk;
455 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
456 if (!cur)
457 goto err_sysblk;
459 /* Get the latest sysblk for each sysblk row */
460 for (i = 0; i < s.nr_rows; i++) {
461 found = 0;
462 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
463 ppaidx = scan_ppa_idx(i, j);
464 ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
465 if (ret > 0) {
466 s.act_blk[i] = j;
467 found = 1;
468 } else if (ret < 0)
469 break;
473 if (!found) {
474 pr_err("nvm: no valid sysblks found to update\n");
475 ret = -EINVAL;
476 goto err_cur;
480 * All sysblocks found. Check that they have same page id in their flash
481 * blocks
483 for (i = 1; i < s.nr_rows; i++) {
484 struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
485 struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
487 if (l.g.pg != r.g.pg) {
488 pr_err("nvm: sysblks not on same page. Previous update failed.\n");
489 ret = -EINVAL;
490 goto err_cur;
495 * Check that there haven't been another update to the seqnr since we
496 * began
498 if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
499 pr_err("nvm: seq is not sequential\n");
500 ret = -EINVAL;
501 goto err_cur;
505 * When all pages in a block has been written, a new block is selected
506 * and writing is performed on the new block.
508 if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
509 dev->lps_per_blk - 1) {
510 ret = nvm_prepare_new_sysblks(dev, &s);
511 if (ret)
512 goto err_cur;
515 ret = nvm_write_and_verify(dev, new, &s);
516 err_cur:
517 kfree(cur);
518 err_sysblk:
519 mutex_unlock(&dev->mlock);
521 return ret;
524 int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
526 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
527 struct sysblk_scan s;
528 int ret;
531 * 1. select master blocks and select first available blks
532 * 2. get bad block list
533 * 3. mark MAX_SYSBLKS block as host-based device allocated.
534 * 4. write and verify data to block
537 if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
538 return -EINVAL;
540 if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
541 pr_err("nvm: memory does not support SLC access\n");
542 return -EINVAL;
545 /* Index all sysblocks and mark them as host-driven */
546 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
548 mutex_lock(&dev->mlock);
549 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks);
550 if (ret)
551 goto err_mark;
553 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
554 if (ret)
555 goto err_mark;
557 /* Write to the first block of each row */
558 ret = nvm_write_and_verify(dev, info, &s);
559 err_mark:
560 mutex_unlock(&dev->mlock);
561 return ret;
564 struct factory_blks {
565 struct nvm_dev *dev;
566 int flags;
567 unsigned long *blks;
570 static int factory_nblks(int nblks)
572 /* Round up to nearest BITS_PER_LONG */
573 return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
576 static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
578 int nblks = factory_nblks(dev->blks_per_lun);
580 return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) /
581 BITS_PER_LONG;
584 static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
585 void *private)
587 struct factory_blks *f = private;
588 struct nvm_dev *dev = f->dev;
589 int i, lunoff;
591 lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
593 /* non-set bits correspond to the block must be erased */
594 for (i = 0; i < nr_blks; i++) {
595 switch (blks[i]) {
596 case NVM_BLK_T_FREE:
597 if (f->flags & NVM_FACTORY_ERASE_ONLY_USER)
598 set_bit(i, &f->blks[lunoff]);
599 break;
600 case NVM_BLK_T_HOST:
601 if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS))
602 set_bit(i, &f->blks[lunoff]);
603 break;
604 case NVM_BLK_T_GRWN_BAD:
605 if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS))
606 set_bit(i, &f->blks[lunoff]);
607 break;
608 default:
609 set_bit(i, &f->blks[lunoff]);
610 break;
614 return 0;
617 static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
618 int max_ppas, struct factory_blks *f)
620 struct ppa_addr ppa;
621 int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
622 unsigned long *offset;
624 while (!done) {
625 done = 1;
626 for (ch = 0; ch < dev->nr_chnls; ch++) {
627 for (lun = 0; lun < dev->luns_per_chnl; lun++) {
628 idx = factory_blk_offset(dev, ch, lun);
629 offset = &f->blks[idx];
631 blkid = find_first_zero_bit(offset,
632 dev->blks_per_lun);
633 if (blkid >= dev->blks_per_lun)
634 continue;
635 set_bit(blkid, offset);
637 ppa.ppa = 0;
638 ppa.g.ch = ch;
639 ppa.g.lun = lun;
640 ppa.g.blk = blkid;
641 pr_debug("nvm: erase ppa (%u %u %u)\n",
642 ppa.g.ch,
643 ppa.g.lun,
644 ppa.g.blk);
646 erase_list[ppa_cnt] = ppa;
647 ppa_cnt++;
648 done = 0;
650 if (ppa_cnt == max_ppas)
651 return ppa_cnt;
656 return ppa_cnt;
659 static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
660 nvm_bb_update_fn *fn, void *priv)
662 struct ppa_addr dev_ppa;
663 int ret;
665 dev_ppa = generic_to_dev_addr(dev, ppa);
667 ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv);
668 if (ret)
669 pr_err("nvm: failed bb tbl for ch%u lun%u\n",
670 ppa.g.ch, ppa.g.blk);
671 return ret;
674 static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
676 int ch, lun, ret;
677 struct ppa_addr ppa;
679 ppa.ppa = 0;
680 for (ch = 0; ch < dev->nr_chnls; ch++) {
681 for (lun = 0; lun < dev->luns_per_chnl; lun++) {
682 ppa.g.ch = ch;
683 ppa.g.lun = lun;
685 ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks,
687 if (ret)
688 return ret;
692 return 0;
695 int nvm_dev_factory(struct nvm_dev *dev, int flags)
697 struct factory_blks f;
698 struct ppa_addr *ppas;
699 int ppa_cnt, ret = -ENOMEM;
700 int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
701 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
702 struct sysblk_scan s;
704 f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
705 GFP_KERNEL);
706 if (!f.blks)
707 return ret;
709 ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
710 if (!ppas)
711 goto err_blks;
713 f.dev = dev;
714 f.flags = flags;
716 /* create list of blks to be erased */
717 ret = nvm_fact_select_blks(dev, &f);
718 if (ret)
719 goto err_ppas;
721 /* continue to erase until list of blks until empty */
722 while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0)
723 nvm_erase_ppa(dev, ppas, ppa_cnt);
725 /* mark host reserved blocks free */
726 if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
727 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
728 mutex_lock(&dev->mlock);
729 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas,
730 sysblk_get_host_blks);
731 if (!ret)
732 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
733 mutex_unlock(&dev->mlock);
735 err_ppas:
736 kfree(ppas);
737 err_blks:
738 kfree(f.blks);
739 return ret;
741 EXPORT_SYMBOL(nvm_dev_factory);