Linux 4.19-rc7
[linux-2.6/btrfs-unstable.git] / drivers / nvdimm / pmem.c
blob6071e2942053c903564d6f08f278d3735a619308
1 /*
2 * Persistent Memory Driver
4 * Copyright (c) 2014-2015, Intel Corporation.
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/set_memory.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/badblocks.h>
27 #include <linux/memremap.h>
28 #include <linux/vmalloc.h>
29 #include <linux/blk-mq.h>
30 #include <linux/pfn_t.h>
31 #include <linux/slab.h>
32 #include <linux/uio.h>
33 #include <linux/dax.h>
34 #include <linux/nd.h>
35 #include <linux/backing-dev.h>
36 #include "pmem.h"
37 #include "pfn.h"
38 #include "nd.h"
39 #include "nd-core.h"
41 static struct device *to_dev(struct pmem_device *pmem)
44 * nvdimm bus services need a 'dev' parameter, and we record the device
45 * at init in bb.dev.
47 return pmem->bb.dev;
50 static struct nd_region *to_region(struct pmem_device *pmem)
52 return to_nd_region(to_dev(pmem)->parent);
55 static void hwpoison_clear(struct pmem_device *pmem,
56 phys_addr_t phys, unsigned int len)
58 unsigned long pfn_start, pfn_end, pfn;
60 /* only pmem in the linear map supports HWPoison */
61 if (is_vmalloc_addr(pmem->virt_addr))
62 return;
64 pfn_start = PHYS_PFN(phys);
65 pfn_end = pfn_start + PHYS_PFN(len);
66 for (pfn = pfn_start; pfn < pfn_end; pfn++) {
67 struct page *page = pfn_to_page(pfn);
70 * Note, no need to hold a get_dev_pagemap() reference
71 * here since we're in the driver I/O path and
72 * outstanding I/O requests pin the dev_pagemap.
74 if (test_and_clear_pmem_poison(page))
75 clear_mce_nospec(pfn);
79 static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
80 phys_addr_t offset, unsigned int len)
82 struct device *dev = to_dev(pmem);
83 sector_t sector;
84 long cleared;
85 blk_status_t rc = BLK_STS_OK;
87 sector = (offset - pmem->data_offset) / 512;
89 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
90 if (cleared < len)
91 rc = BLK_STS_IOERR;
92 if (cleared > 0 && cleared / 512) {
93 hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
94 cleared /= 512;
95 dev_dbg(dev, "%#llx clear %ld sector%s\n",
96 (unsigned long long) sector, cleared,
97 cleared > 1 ? "s" : "");
98 badblocks_clear(&pmem->bb, sector, cleared);
99 if (pmem->bb_state)
100 sysfs_notify_dirent(pmem->bb_state);
103 arch_invalidate_pmem(pmem->virt_addr + offset, len);
105 return rc;
108 static void write_pmem(void *pmem_addr, struct page *page,
109 unsigned int off, unsigned int len)
111 unsigned int chunk;
112 void *mem;
114 while (len) {
115 mem = kmap_atomic(page);
116 chunk = min_t(unsigned int, len, PAGE_SIZE);
117 memcpy_flushcache(pmem_addr, mem + off, chunk);
118 kunmap_atomic(mem);
119 len -= chunk;
120 off = 0;
121 page++;
122 pmem_addr += PAGE_SIZE;
126 static blk_status_t read_pmem(struct page *page, unsigned int off,
127 void *pmem_addr, unsigned int len)
129 unsigned int chunk;
130 unsigned long rem;
131 void *mem;
133 while (len) {
134 mem = kmap_atomic(page);
135 chunk = min_t(unsigned int, len, PAGE_SIZE);
136 rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
137 kunmap_atomic(mem);
138 if (rem)
139 return BLK_STS_IOERR;
140 len -= chunk;
141 off = 0;
142 page++;
143 pmem_addr += PAGE_SIZE;
145 return BLK_STS_OK;
148 static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
149 unsigned int len, unsigned int off, unsigned int op,
150 sector_t sector)
152 blk_status_t rc = BLK_STS_OK;
153 bool bad_pmem = false;
154 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
155 void *pmem_addr = pmem->virt_addr + pmem_off;
157 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
158 bad_pmem = true;
160 if (!op_is_write(op)) {
161 if (unlikely(bad_pmem))
162 rc = BLK_STS_IOERR;
163 else {
164 rc = read_pmem(page, off, pmem_addr, len);
165 flush_dcache_page(page);
167 } else {
169 * Note that we write the data both before and after
170 * clearing poison. The write before clear poison
171 * handles situations where the latest written data is
172 * preserved and the clear poison operation simply marks
173 * the address range as valid without changing the data.
174 * In this case application software can assume that an
175 * interrupted write will either return the new good
176 * data or an error.
178 * However, if pmem_clear_poison() leaves the data in an
179 * indeterminate state we need to perform the write
180 * after clear poison.
182 flush_dcache_page(page);
183 write_pmem(pmem_addr, page, off, len);
184 if (unlikely(bad_pmem)) {
185 rc = pmem_clear_poison(pmem, pmem_off, len);
186 write_pmem(pmem_addr, page, off, len);
190 return rc;
193 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
195 blk_status_t rc = 0;
196 bool do_acct;
197 unsigned long start;
198 struct bio_vec bvec;
199 struct bvec_iter iter;
200 struct pmem_device *pmem = q->queuedata;
201 struct nd_region *nd_region = to_region(pmem);
203 if (bio->bi_opf & REQ_PREFLUSH)
204 nvdimm_flush(nd_region);
206 do_acct = nd_iostat_start(bio, &start);
207 bio_for_each_segment(bvec, bio, iter) {
208 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
209 bvec.bv_offset, bio_op(bio), iter.bi_sector);
210 if (rc) {
211 bio->bi_status = rc;
212 break;
215 if (do_acct)
216 nd_iostat_end(bio, start);
218 if (bio->bi_opf & REQ_FUA)
219 nvdimm_flush(nd_region);
221 bio_endio(bio);
222 return BLK_QC_T_NONE;
225 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
226 struct page *page, unsigned int op)
228 struct pmem_device *pmem = bdev->bd_queue->queuedata;
229 blk_status_t rc;
231 rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
232 0, op, sector);
235 * The ->rw_page interface is subtle and tricky. The core
236 * retries on any error, so we can only invoke page_endio() in
237 * the successful completion case. Otherwise, we'll see crashes
238 * caused by double completion.
240 if (rc == 0)
241 page_endio(page, op_is_write(op), 0);
243 return blk_status_to_errno(rc);
246 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
247 __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
248 long nr_pages, void **kaddr, pfn_t *pfn)
250 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
252 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
253 PFN_PHYS(nr_pages))))
254 return -EIO;
256 if (kaddr)
257 *kaddr = pmem->virt_addr + offset;
258 if (pfn)
259 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
262 * If badblocks are present, limit known good range to the
263 * requested range.
265 if (unlikely(pmem->bb.count))
266 return nr_pages;
267 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
270 static const struct block_device_operations pmem_fops = {
271 .owner = THIS_MODULE,
272 .rw_page = pmem_rw_page,
273 .revalidate_disk = nvdimm_revalidate_disk,
276 static long pmem_dax_direct_access(struct dax_device *dax_dev,
277 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
279 struct pmem_device *pmem = dax_get_private(dax_dev);
281 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
284 static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
285 void *addr, size_t bytes, struct iov_iter *i)
287 return copy_from_iter_flushcache(addr, bytes, i);
290 static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
291 void *addr, size_t bytes, struct iov_iter *i)
293 return copy_to_iter_mcsafe(addr, bytes, i);
296 static const struct dax_operations pmem_dax_ops = {
297 .direct_access = pmem_dax_direct_access,
298 .copy_from_iter = pmem_copy_from_iter,
299 .copy_to_iter = pmem_copy_to_iter,
302 static const struct attribute_group *pmem_attribute_groups[] = {
303 &dax_attribute_group,
304 NULL,
307 static void pmem_release_queue(void *q)
309 blk_cleanup_queue(q);
312 static void pmem_freeze_queue(void *q)
314 blk_freeze_queue_start(q);
317 static void pmem_release_disk(void *__pmem)
319 struct pmem_device *pmem = __pmem;
321 kill_dax(pmem->dax_dev);
322 put_dax(pmem->dax_dev);
323 del_gendisk(pmem->disk);
324 put_disk(pmem->disk);
327 static void pmem_release_pgmap_ops(void *__pgmap)
329 dev_pagemap_put_ops();
332 static void fsdax_pagefree(struct page *page, void *data)
334 wake_up_var(&page->_refcount);
337 static int setup_pagemap_fsdax(struct device *dev, struct dev_pagemap *pgmap)
339 dev_pagemap_get_ops();
340 if (devm_add_action_or_reset(dev, pmem_release_pgmap_ops, pgmap))
341 return -ENOMEM;
342 pgmap->type = MEMORY_DEVICE_FS_DAX;
343 pgmap->page_free = fsdax_pagefree;
345 return 0;
348 static int pmem_attach_disk(struct device *dev,
349 struct nd_namespace_common *ndns)
351 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
352 struct nd_region *nd_region = to_nd_region(dev->parent);
353 int nid = dev_to_node(dev), fua;
354 struct resource *res = &nsio->res;
355 struct resource bb_res;
356 struct nd_pfn *nd_pfn = NULL;
357 struct dax_device *dax_dev;
358 struct nd_pfn_sb *pfn_sb;
359 struct pmem_device *pmem;
360 struct request_queue *q;
361 struct device *gendev;
362 struct gendisk *disk;
363 void *addr;
364 int rc;
366 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
367 if (!pmem)
368 return -ENOMEM;
370 /* while nsio_rw_bytes is active, parse a pfn info block if present */
371 if (is_nd_pfn(dev)) {
372 nd_pfn = to_nd_pfn(dev);
373 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
374 if (rc)
375 return rc;
378 /* we're attaching a block device, disable raw namespace access */
379 devm_nsio_disable(dev, nsio);
381 dev_set_drvdata(dev, pmem);
382 pmem->phys_addr = res->start;
383 pmem->size = resource_size(res);
384 fua = nvdimm_has_flush(nd_region);
385 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
386 dev_warn(dev, "unable to guarantee persistence of writes\n");
387 fua = 0;
390 if (!devm_request_mem_region(dev, res->start, resource_size(res),
391 dev_name(&ndns->dev))) {
392 dev_warn(dev, "could not reserve region %pR\n", res);
393 return -EBUSY;
396 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL);
397 if (!q)
398 return -ENOMEM;
400 if (devm_add_action_or_reset(dev, pmem_release_queue, q))
401 return -ENOMEM;
403 pmem->pfn_flags = PFN_DEV;
404 pmem->pgmap.ref = &q->q_usage_counter;
405 if (is_nd_pfn(dev)) {
406 if (setup_pagemap_fsdax(dev, &pmem->pgmap))
407 return -ENOMEM;
408 addr = devm_memremap_pages(dev, &pmem->pgmap);
409 pfn_sb = nd_pfn->pfn_sb;
410 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
411 pmem->pfn_pad = resource_size(res) -
412 resource_size(&pmem->pgmap.res);
413 pmem->pfn_flags |= PFN_MAP;
414 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
415 bb_res.start += pmem->data_offset;
416 } else if (pmem_should_map_pages(dev)) {
417 memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
418 pmem->pgmap.altmap_valid = false;
419 if (setup_pagemap_fsdax(dev, &pmem->pgmap))
420 return -ENOMEM;
421 addr = devm_memremap_pages(dev, &pmem->pgmap);
422 pmem->pfn_flags |= PFN_MAP;
423 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
424 } else
425 addr = devm_memremap(dev, pmem->phys_addr,
426 pmem->size, ARCH_MEMREMAP_PMEM);
429 * At release time the queue must be frozen before
430 * devm_memremap_pages is unwound
432 if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
433 return -ENOMEM;
435 if (IS_ERR(addr))
436 return PTR_ERR(addr);
437 pmem->virt_addr = addr;
439 blk_queue_write_cache(q, true, fua);
440 blk_queue_make_request(q, pmem_make_request);
441 blk_queue_physical_block_size(q, PAGE_SIZE);
442 blk_queue_logical_block_size(q, pmem_sector_size(ndns));
443 blk_queue_max_hw_sectors(q, UINT_MAX);
444 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
445 if (pmem->pfn_flags & PFN_MAP)
446 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
447 q->queuedata = pmem;
449 disk = alloc_disk_node(0, nid);
450 if (!disk)
451 return -ENOMEM;
452 pmem->disk = disk;
454 disk->fops = &pmem_fops;
455 disk->queue = q;
456 disk->flags = GENHD_FL_EXT_DEVT;
457 disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
458 nvdimm_namespace_disk_name(ndns, disk->disk_name);
459 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
460 / 512);
461 if (devm_init_badblocks(dev, &pmem->bb))
462 return -ENOMEM;
463 nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
464 disk->bb = &pmem->bb;
466 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
467 if (!dax_dev) {
468 put_disk(disk);
469 return -ENOMEM;
471 dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
472 pmem->dax_dev = dax_dev;
474 gendev = disk_to_dev(disk);
475 gendev->groups = pmem_attribute_groups;
477 device_add_disk(dev, disk);
478 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
479 return -ENOMEM;
481 revalidate_disk(disk);
483 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
484 "badblocks");
485 if (!pmem->bb_state)
486 dev_warn(dev, "'badblocks' notification disabled\n");
488 return 0;
491 static int nd_pmem_probe(struct device *dev)
493 struct nd_namespace_common *ndns;
495 ndns = nvdimm_namespace_common_probe(dev);
496 if (IS_ERR(ndns))
497 return PTR_ERR(ndns);
499 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
500 return -ENXIO;
502 if (is_nd_btt(dev))
503 return nvdimm_namespace_attach_btt(ndns);
505 if (is_nd_pfn(dev))
506 return pmem_attach_disk(dev, ndns);
508 /* if we find a valid info-block we'll come back as that personality */
509 if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
510 || nd_dax_probe(dev, ndns) == 0)
511 return -ENXIO;
513 /* ...otherwise we're just a raw pmem device */
514 return pmem_attach_disk(dev, ndns);
517 static int nd_pmem_remove(struct device *dev)
519 struct pmem_device *pmem = dev_get_drvdata(dev);
521 if (is_nd_btt(dev))
522 nvdimm_namespace_detach_btt(to_nd_btt(dev));
523 else {
525 * Note, this assumes device_lock() context to not race
526 * nd_pmem_notify()
528 sysfs_put(pmem->bb_state);
529 pmem->bb_state = NULL;
531 nvdimm_flush(to_nd_region(dev->parent));
533 return 0;
536 static void nd_pmem_shutdown(struct device *dev)
538 nvdimm_flush(to_nd_region(dev->parent));
541 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
543 struct nd_region *nd_region;
544 resource_size_t offset = 0, end_trunc = 0;
545 struct nd_namespace_common *ndns;
546 struct nd_namespace_io *nsio;
547 struct resource res;
548 struct badblocks *bb;
549 struct kernfs_node *bb_state;
551 if (event != NVDIMM_REVALIDATE_POISON)
552 return;
554 if (is_nd_btt(dev)) {
555 struct nd_btt *nd_btt = to_nd_btt(dev);
557 ndns = nd_btt->ndns;
558 nd_region = to_nd_region(ndns->dev.parent);
559 nsio = to_nd_namespace_io(&ndns->dev);
560 bb = &nsio->bb;
561 bb_state = NULL;
562 } else {
563 struct pmem_device *pmem = dev_get_drvdata(dev);
565 nd_region = to_region(pmem);
566 bb = &pmem->bb;
567 bb_state = pmem->bb_state;
569 if (is_nd_pfn(dev)) {
570 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
571 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
573 ndns = nd_pfn->ndns;
574 offset = pmem->data_offset +
575 __le32_to_cpu(pfn_sb->start_pad);
576 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
577 } else {
578 ndns = to_ndns(dev);
581 nsio = to_nd_namespace_io(&ndns->dev);
584 res.start = nsio->res.start + offset;
585 res.end = nsio->res.end - end_trunc;
586 nvdimm_badblocks_populate(nd_region, bb, &res);
587 if (bb_state)
588 sysfs_notify_dirent(bb_state);
591 MODULE_ALIAS("pmem");
592 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
593 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
594 static struct nd_device_driver nd_pmem_driver = {
595 .probe = nd_pmem_probe,
596 .remove = nd_pmem_remove,
597 .notify = nd_pmem_notify,
598 .shutdown = nd_pmem_shutdown,
599 .drv = {
600 .name = "nd_pmem",
602 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
605 module_nd_driver(nd_pmem_driver);
607 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
608 MODULE_LICENSE("GPL v2");