2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/scatterlist.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/sort.h>
25 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
28 #include <linux/io-64-nonatomic-hi-lo.h>
30 static DEFINE_IDA(region_ida
);
31 static DEFINE_PER_CPU(int, flush_idx
);
33 static int nvdimm_map_flush(struct device
*dev
, struct nvdimm
*nvdimm
, int dimm
,
34 struct nd_region_data
*ndrd
)
38 dev_dbg(dev
, "%s: map %d flush address%s\n", nvdimm_name(nvdimm
),
39 nvdimm
->num_flush
, nvdimm
->num_flush
== 1 ? "" : "es");
40 for (i
= 0; i
< (1 << ndrd
->hints_shift
); i
++) {
41 struct resource
*res
= &nvdimm
->flush_wpq
[i
];
42 unsigned long pfn
= PHYS_PFN(res
->start
);
43 void __iomem
*flush_page
;
45 /* check if flush hints share a page */
46 for (j
= 0; j
< i
; j
++) {
47 struct resource
*res_j
= &nvdimm
->flush_wpq
[j
];
48 unsigned long pfn_j
= PHYS_PFN(res_j
->start
);
55 flush_page
= (void __iomem
*) ((unsigned long)
56 ndrd_get_flush_wpq(ndrd
, dimm
, j
)
59 flush_page
= devm_nvdimm_ioremap(dev
,
60 PFN_PHYS(pfn
), PAGE_SIZE
);
63 ndrd_set_flush_wpq(ndrd
, dimm
, i
, flush_page
64 + (res
->start
& ~PAGE_MASK
));
70 int nd_region_activate(struct nd_region
*nd_region
)
72 int i
, j
, num_flush
= 0;
73 struct nd_region_data
*ndrd
;
74 struct device
*dev
= &nd_region
->dev
;
75 size_t flush_data_size
= sizeof(void *);
77 nvdimm_bus_lock(&nd_region
->dev
);
78 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
79 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
80 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
82 /* at least one null hint slot per-dimm for the "no-hint" case */
83 flush_data_size
+= sizeof(void *);
84 num_flush
= min_not_zero(num_flush
, nvdimm
->num_flush
);
85 if (!nvdimm
->num_flush
)
87 flush_data_size
+= nvdimm
->num_flush
* sizeof(void *);
89 nvdimm_bus_unlock(&nd_region
->dev
);
91 ndrd
= devm_kzalloc(dev
, sizeof(*ndrd
) + flush_data_size
, GFP_KERNEL
);
94 dev_set_drvdata(dev
, ndrd
);
99 ndrd
->hints_shift
= ilog2(num_flush
);
100 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
101 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
102 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
103 int rc
= nvdimm_map_flush(&nd_region
->dev
, nvdimm
, i
, ndrd
);
110 * Clear out entries that are duplicates. This should prevent the
113 for (i
= 0; i
< nd_region
->ndr_mappings
- 1; i
++) {
114 /* ignore if NULL already */
115 if (!ndrd_get_flush_wpq(ndrd
, i
, 0))
118 for (j
= i
+ 1; j
< nd_region
->ndr_mappings
; j
++)
119 if (ndrd_get_flush_wpq(ndrd
, i
, 0) ==
120 ndrd_get_flush_wpq(ndrd
, j
, 0))
121 ndrd_set_flush_wpq(ndrd
, j
, 0, NULL
);
127 static void nd_region_release(struct device
*dev
)
129 struct nd_region
*nd_region
= to_nd_region(dev
);
132 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
133 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
134 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
136 put_device(&nvdimm
->dev
);
138 free_percpu(nd_region
->lane
);
139 ida_simple_remove(®ion_ida
, nd_region
->id
);
141 kfree(to_nd_blk_region(dev
));
146 static struct device_type nd_blk_device_type
= {
148 .release
= nd_region_release
,
151 static struct device_type nd_pmem_device_type
= {
153 .release
= nd_region_release
,
156 static struct device_type nd_volatile_device_type
= {
157 .name
= "nd_volatile",
158 .release
= nd_region_release
,
161 bool is_nd_pmem(struct device
*dev
)
163 return dev
? dev
->type
== &nd_pmem_device_type
: false;
166 bool is_nd_blk(struct device
*dev
)
168 return dev
? dev
->type
== &nd_blk_device_type
: false;
171 bool is_nd_volatile(struct device
*dev
)
173 return dev
? dev
->type
== &nd_volatile_device_type
: false;
176 struct nd_region
*to_nd_region(struct device
*dev
)
178 struct nd_region
*nd_region
= container_of(dev
, struct nd_region
, dev
);
180 WARN_ON(dev
->type
->release
!= nd_region_release
);
183 EXPORT_SYMBOL_GPL(to_nd_region
);
185 struct nd_blk_region
*to_nd_blk_region(struct device
*dev
)
187 struct nd_region
*nd_region
= to_nd_region(dev
);
189 WARN_ON(!is_nd_blk(dev
));
190 return container_of(nd_region
, struct nd_blk_region
, nd_region
);
192 EXPORT_SYMBOL_GPL(to_nd_blk_region
);
194 void *nd_region_provider_data(struct nd_region
*nd_region
)
196 return nd_region
->provider_data
;
198 EXPORT_SYMBOL_GPL(nd_region_provider_data
);
200 void *nd_blk_region_provider_data(struct nd_blk_region
*ndbr
)
202 return ndbr
->blk_provider_data
;
204 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data
);
206 void nd_blk_region_set_provider_data(struct nd_blk_region
*ndbr
, void *data
)
208 ndbr
->blk_provider_data
= data
;
210 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data
);
213 * nd_region_to_nstype() - region to an integer namespace type
214 * @nd_region: region-device to interrogate
216 * This is the 'nstype' attribute of a region as well, an input to the
217 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
218 * namespace devices with namespace drivers.
220 int nd_region_to_nstype(struct nd_region
*nd_region
)
222 if (is_memory(&nd_region
->dev
)) {
225 for (i
= 0, alias
= 0; i
< nd_region
->ndr_mappings
; i
++) {
226 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
227 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
229 if (test_bit(NDD_ALIASING
, &nvdimm
->flags
))
233 return ND_DEVICE_NAMESPACE_PMEM
;
235 return ND_DEVICE_NAMESPACE_IO
;
236 } else if (is_nd_blk(&nd_region
->dev
)) {
237 return ND_DEVICE_NAMESPACE_BLK
;
242 EXPORT_SYMBOL(nd_region_to_nstype
);
244 static ssize_t
size_show(struct device
*dev
,
245 struct device_attribute
*attr
, char *buf
)
247 struct nd_region
*nd_region
= to_nd_region(dev
);
248 unsigned long long size
= 0;
250 if (is_memory(dev
)) {
251 size
= nd_region
->ndr_size
;
252 } else if (nd_region
->ndr_mappings
== 1) {
253 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
255 size
= nd_mapping
->size
;
258 return sprintf(buf
, "%llu\n", size
);
260 static DEVICE_ATTR_RO(size
);
262 static ssize_t
deep_flush_show(struct device
*dev
,
263 struct device_attribute
*attr
, char *buf
)
265 struct nd_region
*nd_region
= to_nd_region(dev
);
268 * NOTE: in the nvdimm_has_flush() error case this attribute is
271 return sprintf(buf
, "%d\n", nvdimm_has_flush(nd_region
));
274 static ssize_t
deep_flush_store(struct device
*dev
, struct device_attribute
*attr
,
275 const char *buf
, size_t len
)
278 int rc
= strtobool(buf
, &flush
);
279 struct nd_region
*nd_region
= to_nd_region(dev
);
285 nvdimm_flush(nd_region
);
289 static DEVICE_ATTR_RW(deep_flush
);
291 static ssize_t
mappings_show(struct device
*dev
,
292 struct device_attribute
*attr
, char *buf
)
294 struct nd_region
*nd_region
= to_nd_region(dev
);
296 return sprintf(buf
, "%d\n", nd_region
->ndr_mappings
);
298 static DEVICE_ATTR_RO(mappings
);
300 static ssize_t
nstype_show(struct device
*dev
,
301 struct device_attribute
*attr
, char *buf
)
303 struct nd_region
*nd_region
= to_nd_region(dev
);
305 return sprintf(buf
, "%d\n", nd_region_to_nstype(nd_region
));
307 static DEVICE_ATTR_RO(nstype
);
309 static ssize_t
set_cookie_show(struct device
*dev
,
310 struct device_attribute
*attr
, char *buf
)
312 struct nd_region
*nd_region
= to_nd_region(dev
);
313 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
316 if (is_memory(dev
) && nd_set
)
317 /* pass, should be precluded by region_visible */;
322 * The cookie to show depends on which specification of the
323 * labels we are using. If there are not labels then default to
324 * the v1.1 namespace label cookie definition. To read all this
325 * data we need to wait for probing to settle.
328 nvdimm_bus_lock(dev
);
329 wait_nvdimm_bus_probe_idle(dev
);
330 if (nd_region
->ndr_mappings
) {
331 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
332 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
335 struct nd_namespace_index
*nsindex
;
337 nsindex
= to_namespace_index(ndd
, ndd
->ns_current
);
338 rc
= sprintf(buf
, "%#llx\n",
339 nd_region_interleave_set_cookie(nd_region
,
343 nvdimm_bus_unlock(dev
);
348 return sprintf(buf
, "%#llx\n", nd_set
->cookie1
);
350 static DEVICE_ATTR_RO(set_cookie
);
352 resource_size_t
nd_region_available_dpa(struct nd_region
*nd_region
)
354 resource_size_t blk_max_overlap
= 0, available
, overlap
;
357 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
361 overlap
= blk_max_overlap
;
362 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
363 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
364 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
366 /* if a dimm is disabled the available capacity is zero */
370 if (is_memory(&nd_region
->dev
)) {
371 available
+= nd_pmem_available_dpa(nd_region
,
372 nd_mapping
, &overlap
);
373 if (overlap
> blk_max_overlap
) {
374 blk_max_overlap
= overlap
;
377 } else if (is_nd_blk(&nd_region
->dev
))
378 available
+= nd_blk_available_dpa(nd_region
);
384 static ssize_t
available_size_show(struct device
*dev
,
385 struct device_attribute
*attr
, char *buf
)
387 struct nd_region
*nd_region
= to_nd_region(dev
);
388 unsigned long long available
= 0;
391 * Flush in-flight updates and grab a snapshot of the available
392 * size. Of course, this value is potentially invalidated the
393 * memory nvdimm_bus_lock() is dropped, but that's userspace's
394 * problem to not race itself.
396 nvdimm_bus_lock(dev
);
397 wait_nvdimm_bus_probe_idle(dev
);
398 available
= nd_region_available_dpa(nd_region
);
399 nvdimm_bus_unlock(dev
);
401 return sprintf(buf
, "%llu\n", available
);
403 static DEVICE_ATTR_RO(available_size
);
405 static ssize_t
init_namespaces_show(struct device
*dev
,
406 struct device_attribute
*attr
, char *buf
)
408 struct nd_region_data
*ndrd
= dev_get_drvdata(dev
);
411 nvdimm_bus_lock(dev
);
413 rc
= sprintf(buf
, "%d/%d\n", ndrd
->ns_active
, ndrd
->ns_count
);
416 nvdimm_bus_unlock(dev
);
420 static DEVICE_ATTR_RO(init_namespaces
);
422 static ssize_t
namespace_seed_show(struct device
*dev
,
423 struct device_attribute
*attr
, char *buf
)
425 struct nd_region
*nd_region
= to_nd_region(dev
);
428 nvdimm_bus_lock(dev
);
429 if (nd_region
->ns_seed
)
430 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->ns_seed
));
432 rc
= sprintf(buf
, "\n");
433 nvdimm_bus_unlock(dev
);
436 static DEVICE_ATTR_RO(namespace_seed
);
438 static ssize_t
btt_seed_show(struct device
*dev
,
439 struct device_attribute
*attr
, char *buf
)
441 struct nd_region
*nd_region
= to_nd_region(dev
);
444 nvdimm_bus_lock(dev
);
445 if (nd_region
->btt_seed
)
446 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->btt_seed
));
448 rc
= sprintf(buf
, "\n");
449 nvdimm_bus_unlock(dev
);
453 static DEVICE_ATTR_RO(btt_seed
);
455 static ssize_t
pfn_seed_show(struct device
*dev
,
456 struct device_attribute
*attr
, char *buf
)
458 struct nd_region
*nd_region
= to_nd_region(dev
);
461 nvdimm_bus_lock(dev
);
462 if (nd_region
->pfn_seed
)
463 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->pfn_seed
));
465 rc
= sprintf(buf
, "\n");
466 nvdimm_bus_unlock(dev
);
470 static DEVICE_ATTR_RO(pfn_seed
);
472 static ssize_t
dax_seed_show(struct device
*dev
,
473 struct device_attribute
*attr
, char *buf
)
475 struct nd_region
*nd_region
= to_nd_region(dev
);
478 nvdimm_bus_lock(dev
);
479 if (nd_region
->dax_seed
)
480 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->dax_seed
));
482 rc
= sprintf(buf
, "\n");
483 nvdimm_bus_unlock(dev
);
487 static DEVICE_ATTR_RO(dax_seed
);
489 static ssize_t
read_only_show(struct device
*dev
,
490 struct device_attribute
*attr
, char *buf
)
492 struct nd_region
*nd_region
= to_nd_region(dev
);
494 return sprintf(buf
, "%d\n", nd_region
->ro
);
497 static ssize_t
read_only_store(struct device
*dev
,
498 struct device_attribute
*attr
, const char *buf
, size_t len
)
501 int rc
= strtobool(buf
, &ro
);
502 struct nd_region
*nd_region
= to_nd_region(dev
);
510 static DEVICE_ATTR_RW(read_only
);
512 static ssize_t
region_badblocks_show(struct device
*dev
,
513 struct device_attribute
*attr
, char *buf
)
515 struct nd_region
*nd_region
= to_nd_region(dev
);
520 rc
= badblocks_show(&nd_region
->bb
, buf
, 0);
527 static DEVICE_ATTR(badblocks
, 0444, region_badblocks_show
, NULL
);
529 static ssize_t
resource_show(struct device
*dev
,
530 struct device_attribute
*attr
, char *buf
)
532 struct nd_region
*nd_region
= to_nd_region(dev
);
534 return sprintf(buf
, "%#llx\n", nd_region
->ndr_start
);
536 static DEVICE_ATTR_RO(resource
);
538 static struct attribute
*nd_region_attributes
[] = {
540 &dev_attr_nstype
.attr
,
541 &dev_attr_mappings
.attr
,
542 &dev_attr_btt_seed
.attr
,
543 &dev_attr_pfn_seed
.attr
,
544 &dev_attr_dax_seed
.attr
,
545 &dev_attr_deep_flush
.attr
,
546 &dev_attr_read_only
.attr
,
547 &dev_attr_set_cookie
.attr
,
548 &dev_attr_available_size
.attr
,
549 &dev_attr_namespace_seed
.attr
,
550 &dev_attr_init_namespaces
.attr
,
551 &dev_attr_badblocks
.attr
,
552 &dev_attr_resource
.attr
,
556 static umode_t
region_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
558 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
559 struct nd_region
*nd_region
= to_nd_region(dev
);
560 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
561 int type
= nd_region_to_nstype(nd_region
);
563 if (!is_memory(dev
) && a
== &dev_attr_pfn_seed
.attr
)
566 if (!is_memory(dev
) && a
== &dev_attr_dax_seed
.attr
)
569 if (!is_nd_pmem(dev
) && a
== &dev_attr_badblocks
.attr
)
572 if (a
== &dev_attr_resource
.attr
) {
579 if (a
== &dev_attr_deep_flush
.attr
) {
580 int has_flush
= nvdimm_has_flush(nd_region
);
584 else if (has_flush
== 0)
590 if (a
!= &dev_attr_set_cookie
.attr
591 && a
!= &dev_attr_available_size
.attr
)
594 if ((type
== ND_DEVICE_NAMESPACE_PMEM
595 || type
== ND_DEVICE_NAMESPACE_BLK
)
596 && a
== &dev_attr_available_size
.attr
)
598 else if (is_memory(dev
) && nd_set
)
604 struct attribute_group nd_region_attribute_group
= {
605 .attrs
= nd_region_attributes
,
606 .is_visible
= region_visible
,
608 EXPORT_SYMBOL_GPL(nd_region_attribute_group
);
610 u64
nd_region_interleave_set_cookie(struct nd_region
*nd_region
,
611 struct nd_namespace_index
*nsindex
)
613 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
618 if (nsindex
&& __le16_to_cpu(nsindex
->major
) == 1
619 && __le16_to_cpu(nsindex
->minor
) == 1)
620 return nd_set
->cookie1
;
621 return nd_set
->cookie2
;
624 u64
nd_region_interleave_set_altcookie(struct nd_region
*nd_region
)
626 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
629 return nd_set
->altcookie
;
633 void nd_mapping_free_labels(struct nd_mapping
*nd_mapping
)
635 struct nd_label_ent
*label_ent
, *e
;
637 lockdep_assert_held(&nd_mapping
->lock
);
638 list_for_each_entry_safe(label_ent
, e
, &nd_mapping
->labels
, list
) {
639 list_del(&label_ent
->list
);
645 * Upon successful probe/remove, take/release a reference on the
646 * associated interleave set (if present), and plant new btt + namespace
647 * seeds. Also, on the removal of a BLK region, notify the provider to
648 * disable the region.
650 static void nd_region_notify_driver_action(struct nvdimm_bus
*nvdimm_bus
,
651 struct device
*dev
, bool probe
)
653 struct nd_region
*nd_region
;
655 if (!probe
&& is_nd_region(dev
)) {
658 nd_region
= to_nd_region(dev
);
659 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
660 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
661 struct nvdimm_drvdata
*ndd
= nd_mapping
->ndd
;
662 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
664 mutex_lock(&nd_mapping
->lock
);
665 nd_mapping_free_labels(nd_mapping
);
666 mutex_unlock(&nd_mapping
->lock
);
669 nd_mapping
->ndd
= NULL
;
671 atomic_dec(&nvdimm
->busy
);
674 if (dev
->parent
&& is_nd_region(dev
->parent
) && probe
) {
675 nd_region
= to_nd_region(dev
->parent
);
676 nvdimm_bus_lock(dev
);
677 if (nd_region
->ns_seed
== dev
)
678 nd_region_create_ns_seed(nd_region
);
679 nvdimm_bus_unlock(dev
);
681 if (is_nd_btt(dev
) && probe
) {
682 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
684 nd_region
= to_nd_region(dev
->parent
);
685 nvdimm_bus_lock(dev
);
686 if (nd_region
->btt_seed
== dev
)
687 nd_region_create_btt_seed(nd_region
);
688 if (nd_region
->ns_seed
== &nd_btt
->ndns
->dev
)
689 nd_region_create_ns_seed(nd_region
);
690 nvdimm_bus_unlock(dev
);
692 if (is_nd_pfn(dev
) && probe
) {
693 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
695 nd_region
= to_nd_region(dev
->parent
);
696 nvdimm_bus_lock(dev
);
697 if (nd_region
->pfn_seed
== dev
)
698 nd_region_create_pfn_seed(nd_region
);
699 if (nd_region
->ns_seed
== &nd_pfn
->ndns
->dev
)
700 nd_region_create_ns_seed(nd_region
);
701 nvdimm_bus_unlock(dev
);
703 if (is_nd_dax(dev
) && probe
) {
704 struct nd_dax
*nd_dax
= to_nd_dax(dev
);
706 nd_region
= to_nd_region(dev
->parent
);
707 nvdimm_bus_lock(dev
);
708 if (nd_region
->dax_seed
== dev
)
709 nd_region_create_dax_seed(nd_region
);
710 if (nd_region
->ns_seed
== &nd_dax
->nd_pfn
.ndns
->dev
)
711 nd_region_create_ns_seed(nd_region
);
712 nvdimm_bus_unlock(dev
);
716 void nd_region_probe_success(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
718 nd_region_notify_driver_action(nvdimm_bus
, dev
, true);
721 void nd_region_disable(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
723 nd_region_notify_driver_action(nvdimm_bus
, dev
, false);
726 static ssize_t
mappingN(struct device
*dev
, char *buf
, int n
)
728 struct nd_region
*nd_region
= to_nd_region(dev
);
729 struct nd_mapping
*nd_mapping
;
730 struct nvdimm
*nvdimm
;
732 if (n
>= nd_region
->ndr_mappings
)
734 nd_mapping
= &nd_region
->mapping
[n
];
735 nvdimm
= nd_mapping
->nvdimm
;
737 return sprintf(buf
, "%s,%llu,%llu,%d\n", dev_name(&nvdimm
->dev
),
738 nd_mapping
->start
, nd_mapping
->size
,
739 nd_mapping
->position
);
742 #define REGION_MAPPING(idx) \
743 static ssize_t mapping##idx##_show(struct device *dev, \
744 struct device_attribute *attr, char *buf) \
746 return mappingN(dev, buf, idx); \
748 static DEVICE_ATTR_RO(mapping##idx)
751 * 32 should be enough for a while, even in the presence of socket
752 * interleave a 32-way interleave set is a degenerate case.
787 static umode_t
mapping_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
789 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
790 struct nd_region
*nd_region
= to_nd_region(dev
);
792 if (n
< nd_region
->ndr_mappings
)
797 static struct attribute
*mapping_attributes
[] = {
798 &dev_attr_mapping0
.attr
,
799 &dev_attr_mapping1
.attr
,
800 &dev_attr_mapping2
.attr
,
801 &dev_attr_mapping3
.attr
,
802 &dev_attr_mapping4
.attr
,
803 &dev_attr_mapping5
.attr
,
804 &dev_attr_mapping6
.attr
,
805 &dev_attr_mapping7
.attr
,
806 &dev_attr_mapping8
.attr
,
807 &dev_attr_mapping9
.attr
,
808 &dev_attr_mapping10
.attr
,
809 &dev_attr_mapping11
.attr
,
810 &dev_attr_mapping12
.attr
,
811 &dev_attr_mapping13
.attr
,
812 &dev_attr_mapping14
.attr
,
813 &dev_attr_mapping15
.attr
,
814 &dev_attr_mapping16
.attr
,
815 &dev_attr_mapping17
.attr
,
816 &dev_attr_mapping18
.attr
,
817 &dev_attr_mapping19
.attr
,
818 &dev_attr_mapping20
.attr
,
819 &dev_attr_mapping21
.attr
,
820 &dev_attr_mapping22
.attr
,
821 &dev_attr_mapping23
.attr
,
822 &dev_attr_mapping24
.attr
,
823 &dev_attr_mapping25
.attr
,
824 &dev_attr_mapping26
.attr
,
825 &dev_attr_mapping27
.attr
,
826 &dev_attr_mapping28
.attr
,
827 &dev_attr_mapping29
.attr
,
828 &dev_attr_mapping30
.attr
,
829 &dev_attr_mapping31
.attr
,
833 struct attribute_group nd_mapping_attribute_group
= {
834 .is_visible
= mapping_visible
,
835 .attrs
= mapping_attributes
,
837 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group
);
839 int nd_blk_region_init(struct nd_region
*nd_region
)
841 struct device
*dev
= &nd_region
->dev
;
842 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
847 if (nd_region
->ndr_mappings
< 1) {
848 dev_dbg(dev
, "invalid BLK region\n");
852 return to_nd_blk_region(dev
)->enable(nvdimm_bus
, dev
);
856 * nd_region_acquire_lane - allocate and lock a lane
857 * @nd_region: region id and number of lanes possible
859 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
860 * We optimize for the common case where there are 256 lanes, one
861 * per-cpu. For larger systems we need to lock to share lanes. For now
862 * this implementation assumes the cost of maintaining an allocator for
863 * free lanes is on the order of the lock hold time, so it implements a
864 * static lane = cpu % num_lanes mapping.
866 * In the case of a BTT instance on top of a BLK namespace a lane may be
867 * acquired recursively. We lock on the first instance.
869 * In the case of a BTT instance on top of PMEM, we only acquire a lane
870 * for the BTT metadata updates.
872 unsigned int nd_region_acquire_lane(struct nd_region
*nd_region
)
874 unsigned int cpu
, lane
;
877 if (nd_region
->num_lanes
< nr_cpu_ids
) {
878 struct nd_percpu_lane
*ndl_lock
, *ndl_count
;
880 lane
= cpu
% nd_region
->num_lanes
;
881 ndl_count
= per_cpu_ptr(nd_region
->lane
, cpu
);
882 ndl_lock
= per_cpu_ptr(nd_region
->lane
, lane
);
883 if (ndl_count
->count
++ == 0)
884 spin_lock(&ndl_lock
->lock
);
890 EXPORT_SYMBOL(nd_region_acquire_lane
);
892 void nd_region_release_lane(struct nd_region
*nd_region
, unsigned int lane
)
894 if (nd_region
->num_lanes
< nr_cpu_ids
) {
895 unsigned int cpu
= get_cpu();
896 struct nd_percpu_lane
*ndl_lock
, *ndl_count
;
898 ndl_count
= per_cpu_ptr(nd_region
->lane
, cpu
);
899 ndl_lock
= per_cpu_ptr(nd_region
->lane
, lane
);
900 if (--ndl_count
->count
== 0)
901 spin_unlock(&ndl_lock
->lock
);
906 EXPORT_SYMBOL(nd_region_release_lane
);
908 static struct nd_region
*nd_region_create(struct nvdimm_bus
*nvdimm_bus
,
909 struct nd_region_desc
*ndr_desc
, struct device_type
*dev_type
,
912 struct nd_region
*nd_region
;
918 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
919 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[i
];
920 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
922 if ((mapping
->start
| mapping
->size
) % SZ_4K
) {
923 dev_err(&nvdimm_bus
->dev
, "%s: %s mapping%d is not 4K aligned\n",
924 caller
, dev_name(&nvdimm
->dev
), i
);
929 if (test_bit(NDD_UNARMED
, &nvdimm
->flags
))
933 if (dev_type
== &nd_blk_device_type
) {
934 struct nd_blk_region_desc
*ndbr_desc
;
935 struct nd_blk_region
*ndbr
;
937 ndbr_desc
= to_blk_region_desc(ndr_desc
);
938 ndbr
= kzalloc(sizeof(*ndbr
) + sizeof(struct nd_mapping
)
939 * ndr_desc
->num_mappings
,
942 nd_region
= &ndbr
->nd_region
;
943 ndbr
->enable
= ndbr_desc
->enable
;
944 ndbr
->do_io
= ndbr_desc
->do_io
;
948 nd_region
= kzalloc(sizeof(struct nd_region
)
949 + sizeof(struct nd_mapping
)
950 * ndr_desc
->num_mappings
,
952 region_buf
= nd_region
;
957 nd_region
->id
= ida_simple_get(®ion_ida
, 0, 0, GFP_KERNEL
);
958 if (nd_region
->id
< 0)
961 nd_region
->lane
= alloc_percpu(struct nd_percpu_lane
);
962 if (!nd_region
->lane
)
965 for (i
= 0; i
< nr_cpu_ids
; i
++) {
966 struct nd_percpu_lane
*ndl
;
968 ndl
= per_cpu_ptr(nd_region
->lane
, i
);
969 spin_lock_init(&ndl
->lock
);
973 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
974 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[i
];
975 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
977 nd_region
->mapping
[i
].nvdimm
= nvdimm
;
978 nd_region
->mapping
[i
].start
= mapping
->start
;
979 nd_region
->mapping
[i
].size
= mapping
->size
;
980 nd_region
->mapping
[i
].position
= mapping
->position
;
981 INIT_LIST_HEAD(&nd_region
->mapping
[i
].labels
);
982 mutex_init(&nd_region
->mapping
[i
].lock
);
984 get_device(&nvdimm
->dev
);
986 nd_region
->ndr_mappings
= ndr_desc
->num_mappings
;
987 nd_region
->provider_data
= ndr_desc
->provider_data
;
988 nd_region
->nd_set
= ndr_desc
->nd_set
;
989 nd_region
->num_lanes
= ndr_desc
->num_lanes
;
990 nd_region
->flags
= ndr_desc
->flags
;
992 nd_region
->numa_node
= ndr_desc
->numa_node
;
993 ida_init(&nd_region
->ns_ida
);
994 ida_init(&nd_region
->btt_ida
);
995 ida_init(&nd_region
->pfn_ida
);
996 ida_init(&nd_region
->dax_ida
);
997 dev
= &nd_region
->dev
;
998 dev_set_name(dev
, "region%d", nd_region
->id
);
999 dev
->parent
= &nvdimm_bus
->dev
;
1000 dev
->type
= dev_type
;
1001 dev
->groups
= ndr_desc
->attr_groups
;
1002 nd_region
->ndr_size
= resource_size(ndr_desc
->res
);
1003 nd_region
->ndr_start
= ndr_desc
->res
->start
;
1004 nd_device_register(dev
);
1009 ida_simple_remove(®ion_ida
, nd_region
->id
);
1015 struct nd_region
*nvdimm_pmem_region_create(struct nvdimm_bus
*nvdimm_bus
,
1016 struct nd_region_desc
*ndr_desc
)
1018 ndr_desc
->num_lanes
= ND_MAX_LANES
;
1019 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_pmem_device_type
,
1022 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create
);
1024 struct nd_region
*nvdimm_blk_region_create(struct nvdimm_bus
*nvdimm_bus
,
1025 struct nd_region_desc
*ndr_desc
)
1027 if (ndr_desc
->num_mappings
> 1)
1029 ndr_desc
->num_lanes
= min(ndr_desc
->num_lanes
, ND_MAX_LANES
);
1030 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_blk_device_type
,
1033 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create
);
1035 struct nd_region
*nvdimm_volatile_region_create(struct nvdimm_bus
*nvdimm_bus
,
1036 struct nd_region_desc
*ndr_desc
)
1038 ndr_desc
->num_lanes
= ND_MAX_LANES
;
1039 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_volatile_device_type
,
1042 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create
);
1045 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
1046 * @nd_region: blk or interleaved pmem region
1048 void nvdimm_flush(struct nd_region
*nd_region
)
1050 struct nd_region_data
*ndrd
= dev_get_drvdata(&nd_region
->dev
);
1054 * Try to encourage some diversity in flush hint addresses
1055 * across cpus assuming a limited number of flush hints.
1057 idx
= this_cpu_read(flush_idx
);
1058 idx
= this_cpu_add_return(flush_idx
, hash_32(current
->pid
+ idx
, 8));
1061 * The first wmb() is needed to 'sfence' all previous writes
1062 * such that they are architecturally visible for the platform
1063 * buffer flush. Note that we've already arranged for pmem
1064 * writes to avoid the cache via memcpy_flushcache(). The final
1065 * wmb() ensures ordering for the NVDIMM flush write.
1068 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++)
1069 if (ndrd_get_flush_wpq(ndrd
, i
, 0))
1070 writeq(1, ndrd_get_flush_wpq(ndrd
, i
, idx
));
1073 EXPORT_SYMBOL_GPL(nvdimm_flush
);
1076 * nvdimm_has_flush - determine write flushing requirements
1077 * @nd_region: blk or interleaved pmem region
1079 * Returns 1 if writes require flushing
1080 * Returns 0 if writes do not require flushing
1081 * Returns -ENXIO if flushing capability can not be determined
1083 int nvdimm_has_flush(struct nd_region
*nd_region
)
1087 /* no nvdimm or pmem api == flushing capability unknown */
1088 if (nd_region
->ndr_mappings
== 0
1089 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API
))
1092 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1093 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1094 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
1096 /* flush hints present / available */
1097 if (nvdimm
->num_flush
)
1102 * The platform defines dimm devices without hints, assume
1103 * platform persistence mechanism like ADR
1107 EXPORT_SYMBOL_GPL(nvdimm_has_flush
);
1109 int nvdimm_has_cache(struct nd_region
*nd_region
)
1111 return is_nd_pmem(&nd_region
->dev
);
1113 EXPORT_SYMBOL_GPL(nvdimm_has_cache
);
1115 struct conflict_context
{
1116 struct nd_region
*nd_region
;
1117 resource_size_t start
, size
;
1120 static int region_conflict(struct device
*dev
, void *data
)
1122 struct nd_region
*nd_region
;
1123 struct conflict_context
*ctx
= data
;
1124 resource_size_t res_end
, region_end
, region_start
;
1126 if (!is_memory(dev
))
1129 nd_region
= to_nd_region(dev
);
1130 if (nd_region
== ctx
->nd_region
)
1133 res_end
= ctx
->start
+ ctx
->size
;
1134 region_start
= nd_region
->ndr_start
;
1135 region_end
= region_start
+ nd_region
->ndr_size
;
1136 if (ctx
->start
>= region_start
&& ctx
->start
< region_end
)
1138 if (res_end
> region_start
&& res_end
<= region_end
)
1143 int nd_region_conflict(struct nd_region
*nd_region
, resource_size_t start
,
1144 resource_size_t size
)
1146 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(&nd_region
->dev
);
1147 struct conflict_context ctx
= {
1148 .nd_region
= nd_region
,
1153 return device_for_each_child(&nvdimm_bus
->dev
, &ctx
, region_conflict
);
1156 void __exit
nd_region_devs_exit(void)
1158 ida_destroy(®ion_ida
);