vxlan: implement GPE
[linux-2.6/btrfs-unstable.git] / drivers / nvdimm / region_devs.c
blob139bf71ca5491e20c81a1f11c699bdf0f019632f
1 /*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/scatterlist.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/sort.h>
18 #include <linux/io.h>
19 #include <linux/nd.h>
20 #include "nd-core.h"
21 #include "nd.h"
23 static DEFINE_IDA(region_ida);
25 static void nd_region_release(struct device *dev)
27 struct nd_region *nd_region = to_nd_region(dev);
28 u16 i;
30 for (i = 0; i < nd_region->ndr_mappings; i++) {
31 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
32 struct nvdimm *nvdimm = nd_mapping->nvdimm;
34 put_device(&nvdimm->dev);
36 free_percpu(nd_region->lane);
37 ida_simple_remove(&region_ida, nd_region->id);
38 if (is_nd_blk(dev))
39 kfree(to_nd_blk_region(dev));
40 else
41 kfree(nd_region);
44 static struct device_type nd_blk_device_type = {
45 .name = "nd_blk",
46 .release = nd_region_release,
49 static struct device_type nd_pmem_device_type = {
50 .name = "nd_pmem",
51 .release = nd_region_release,
54 static struct device_type nd_volatile_device_type = {
55 .name = "nd_volatile",
56 .release = nd_region_release,
59 bool is_nd_pmem(struct device *dev)
61 return dev ? dev->type == &nd_pmem_device_type : false;
64 bool is_nd_blk(struct device *dev)
66 return dev ? dev->type == &nd_blk_device_type : false;
69 struct nd_region *to_nd_region(struct device *dev)
71 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
73 WARN_ON(dev->type->release != nd_region_release);
74 return nd_region;
76 EXPORT_SYMBOL_GPL(to_nd_region);
78 struct nd_blk_region *to_nd_blk_region(struct device *dev)
80 struct nd_region *nd_region = to_nd_region(dev);
82 WARN_ON(!is_nd_blk(dev));
83 return container_of(nd_region, struct nd_blk_region, nd_region);
85 EXPORT_SYMBOL_GPL(to_nd_blk_region);
87 void *nd_region_provider_data(struct nd_region *nd_region)
89 return nd_region->provider_data;
91 EXPORT_SYMBOL_GPL(nd_region_provider_data);
93 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
95 return ndbr->blk_provider_data;
97 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
99 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
101 ndbr->blk_provider_data = data;
103 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
106 * nd_region_to_nstype() - region to an integer namespace type
107 * @nd_region: region-device to interrogate
109 * This is the 'nstype' attribute of a region as well, an input to the
110 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
111 * namespace devices with namespace drivers.
113 int nd_region_to_nstype(struct nd_region *nd_region)
115 if (is_nd_pmem(&nd_region->dev)) {
116 u16 i, alias;
118 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
119 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
120 struct nvdimm *nvdimm = nd_mapping->nvdimm;
122 if (nvdimm->flags & NDD_ALIASING)
123 alias++;
125 if (alias)
126 return ND_DEVICE_NAMESPACE_PMEM;
127 else
128 return ND_DEVICE_NAMESPACE_IO;
129 } else if (is_nd_blk(&nd_region->dev)) {
130 return ND_DEVICE_NAMESPACE_BLK;
133 return 0;
135 EXPORT_SYMBOL(nd_region_to_nstype);
137 static ssize_t size_show(struct device *dev,
138 struct device_attribute *attr, char *buf)
140 struct nd_region *nd_region = to_nd_region(dev);
141 unsigned long long size = 0;
143 if (is_nd_pmem(dev)) {
144 size = nd_region->ndr_size;
145 } else if (nd_region->ndr_mappings == 1) {
146 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
148 size = nd_mapping->size;
151 return sprintf(buf, "%llu\n", size);
153 static DEVICE_ATTR_RO(size);
155 static ssize_t mappings_show(struct device *dev,
156 struct device_attribute *attr, char *buf)
158 struct nd_region *nd_region = to_nd_region(dev);
160 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
162 static DEVICE_ATTR_RO(mappings);
164 static ssize_t nstype_show(struct device *dev,
165 struct device_attribute *attr, char *buf)
167 struct nd_region *nd_region = to_nd_region(dev);
169 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
171 static DEVICE_ATTR_RO(nstype);
173 static ssize_t set_cookie_show(struct device *dev,
174 struct device_attribute *attr, char *buf)
176 struct nd_region *nd_region = to_nd_region(dev);
177 struct nd_interleave_set *nd_set = nd_region->nd_set;
179 if (is_nd_pmem(dev) && nd_set)
180 /* pass, should be precluded by region_visible */;
181 else
182 return -ENXIO;
184 return sprintf(buf, "%#llx\n", nd_set->cookie);
186 static DEVICE_ATTR_RO(set_cookie);
188 resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
190 resource_size_t blk_max_overlap = 0, available, overlap;
191 int i;
193 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
195 retry:
196 available = 0;
197 overlap = blk_max_overlap;
198 for (i = 0; i < nd_region->ndr_mappings; i++) {
199 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
200 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
202 /* if a dimm is disabled the available capacity is zero */
203 if (!ndd)
204 return 0;
206 if (is_nd_pmem(&nd_region->dev)) {
207 available += nd_pmem_available_dpa(nd_region,
208 nd_mapping, &overlap);
209 if (overlap > blk_max_overlap) {
210 blk_max_overlap = overlap;
211 goto retry;
213 } else if (is_nd_blk(&nd_region->dev)) {
214 available += nd_blk_available_dpa(nd_mapping);
218 return available;
221 static ssize_t available_size_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
224 struct nd_region *nd_region = to_nd_region(dev);
225 unsigned long long available = 0;
228 * Flush in-flight updates and grab a snapshot of the available
229 * size. Of course, this value is potentially invalidated the
230 * memory nvdimm_bus_lock() is dropped, but that's userspace's
231 * problem to not race itself.
233 nvdimm_bus_lock(dev);
234 wait_nvdimm_bus_probe_idle(dev);
235 available = nd_region_available_dpa(nd_region);
236 nvdimm_bus_unlock(dev);
238 return sprintf(buf, "%llu\n", available);
240 static DEVICE_ATTR_RO(available_size);
242 static ssize_t init_namespaces_show(struct device *dev,
243 struct device_attribute *attr, char *buf)
245 struct nd_region_namespaces *num_ns = dev_get_drvdata(dev);
246 ssize_t rc;
248 nvdimm_bus_lock(dev);
249 if (num_ns)
250 rc = sprintf(buf, "%d/%d\n", num_ns->active, num_ns->count);
251 else
252 rc = -ENXIO;
253 nvdimm_bus_unlock(dev);
255 return rc;
257 static DEVICE_ATTR_RO(init_namespaces);
259 static ssize_t namespace_seed_show(struct device *dev,
260 struct device_attribute *attr, char *buf)
262 struct nd_region *nd_region = to_nd_region(dev);
263 ssize_t rc;
265 nvdimm_bus_lock(dev);
266 if (nd_region->ns_seed)
267 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
268 else
269 rc = sprintf(buf, "\n");
270 nvdimm_bus_unlock(dev);
271 return rc;
273 static DEVICE_ATTR_RO(namespace_seed);
275 static ssize_t btt_seed_show(struct device *dev,
276 struct device_attribute *attr, char *buf)
278 struct nd_region *nd_region = to_nd_region(dev);
279 ssize_t rc;
281 nvdimm_bus_lock(dev);
282 if (nd_region->btt_seed)
283 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
284 else
285 rc = sprintf(buf, "\n");
286 nvdimm_bus_unlock(dev);
288 return rc;
290 static DEVICE_ATTR_RO(btt_seed);
292 static ssize_t pfn_seed_show(struct device *dev,
293 struct device_attribute *attr, char *buf)
295 struct nd_region *nd_region = to_nd_region(dev);
296 ssize_t rc;
298 nvdimm_bus_lock(dev);
299 if (nd_region->pfn_seed)
300 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
301 else
302 rc = sprintf(buf, "\n");
303 nvdimm_bus_unlock(dev);
305 return rc;
307 static DEVICE_ATTR_RO(pfn_seed);
309 static ssize_t read_only_show(struct device *dev,
310 struct device_attribute *attr, char *buf)
312 struct nd_region *nd_region = to_nd_region(dev);
314 return sprintf(buf, "%d\n", nd_region->ro);
317 static ssize_t read_only_store(struct device *dev,
318 struct device_attribute *attr, const char *buf, size_t len)
320 bool ro;
321 int rc = strtobool(buf, &ro);
322 struct nd_region *nd_region = to_nd_region(dev);
324 if (rc)
325 return rc;
327 nd_region->ro = ro;
328 return len;
330 static DEVICE_ATTR_RW(read_only);
332 static struct attribute *nd_region_attributes[] = {
333 &dev_attr_size.attr,
334 &dev_attr_nstype.attr,
335 &dev_attr_mappings.attr,
336 &dev_attr_btt_seed.attr,
337 &dev_attr_pfn_seed.attr,
338 &dev_attr_read_only.attr,
339 &dev_attr_set_cookie.attr,
340 &dev_attr_available_size.attr,
341 &dev_attr_namespace_seed.attr,
342 &dev_attr_init_namespaces.attr,
343 NULL,
346 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
348 struct device *dev = container_of(kobj, typeof(*dev), kobj);
349 struct nd_region *nd_region = to_nd_region(dev);
350 struct nd_interleave_set *nd_set = nd_region->nd_set;
351 int type = nd_region_to_nstype(nd_region);
353 if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
354 return 0;
356 if (a != &dev_attr_set_cookie.attr
357 && a != &dev_attr_available_size.attr)
358 return a->mode;
360 if ((type == ND_DEVICE_NAMESPACE_PMEM
361 || type == ND_DEVICE_NAMESPACE_BLK)
362 && a == &dev_attr_available_size.attr)
363 return a->mode;
364 else if (is_nd_pmem(dev) && nd_set)
365 return a->mode;
367 return 0;
370 struct attribute_group nd_region_attribute_group = {
371 .attrs = nd_region_attributes,
372 .is_visible = region_visible,
374 EXPORT_SYMBOL_GPL(nd_region_attribute_group);
376 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
378 struct nd_interleave_set *nd_set = nd_region->nd_set;
380 if (nd_set)
381 return nd_set->cookie;
382 return 0;
386 * Upon successful probe/remove, take/release a reference on the
387 * associated interleave set (if present), and plant new btt + namespace
388 * seeds. Also, on the removal of a BLK region, notify the provider to
389 * disable the region.
391 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
392 struct device *dev, bool probe)
394 struct nd_region *nd_region;
396 if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
397 int i;
399 nd_region = to_nd_region(dev);
400 for (i = 0; i < nd_region->ndr_mappings; i++) {
401 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
402 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
403 struct nvdimm *nvdimm = nd_mapping->nvdimm;
405 kfree(nd_mapping->labels);
406 nd_mapping->labels = NULL;
407 put_ndd(ndd);
408 nd_mapping->ndd = NULL;
409 if (ndd)
410 atomic_dec(&nvdimm->busy);
413 if (is_nd_pmem(dev))
414 return;
416 to_nd_blk_region(dev)->disable(nvdimm_bus, dev);
418 if (dev->parent && is_nd_blk(dev->parent) && probe) {
419 nd_region = to_nd_region(dev->parent);
420 nvdimm_bus_lock(dev);
421 if (nd_region->ns_seed == dev)
422 nd_region_create_blk_seed(nd_region);
423 nvdimm_bus_unlock(dev);
425 if (is_nd_btt(dev) && probe) {
426 struct nd_btt *nd_btt = to_nd_btt(dev);
428 nd_region = to_nd_region(dev->parent);
429 nvdimm_bus_lock(dev);
430 if (nd_region->btt_seed == dev)
431 nd_region_create_btt_seed(nd_region);
432 if (nd_region->ns_seed == &nd_btt->ndns->dev &&
433 is_nd_blk(dev->parent))
434 nd_region_create_blk_seed(nd_region);
435 nvdimm_bus_unlock(dev);
437 if (is_nd_pfn(dev) && probe) {
438 nd_region = to_nd_region(dev->parent);
439 nvdimm_bus_lock(dev);
440 if (nd_region->pfn_seed == dev)
441 nd_region_create_pfn_seed(nd_region);
442 nvdimm_bus_unlock(dev);
446 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
448 nd_region_notify_driver_action(nvdimm_bus, dev, true);
451 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
453 nd_region_notify_driver_action(nvdimm_bus, dev, false);
456 static ssize_t mappingN(struct device *dev, char *buf, int n)
458 struct nd_region *nd_region = to_nd_region(dev);
459 struct nd_mapping *nd_mapping;
460 struct nvdimm *nvdimm;
462 if (n >= nd_region->ndr_mappings)
463 return -ENXIO;
464 nd_mapping = &nd_region->mapping[n];
465 nvdimm = nd_mapping->nvdimm;
467 return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
468 nd_mapping->start, nd_mapping->size);
471 #define REGION_MAPPING(idx) \
472 static ssize_t mapping##idx##_show(struct device *dev, \
473 struct device_attribute *attr, char *buf) \
475 return mappingN(dev, buf, idx); \
477 static DEVICE_ATTR_RO(mapping##idx)
480 * 32 should be enough for a while, even in the presence of socket
481 * interleave a 32-way interleave set is a degenerate case.
483 REGION_MAPPING(0);
484 REGION_MAPPING(1);
485 REGION_MAPPING(2);
486 REGION_MAPPING(3);
487 REGION_MAPPING(4);
488 REGION_MAPPING(5);
489 REGION_MAPPING(6);
490 REGION_MAPPING(7);
491 REGION_MAPPING(8);
492 REGION_MAPPING(9);
493 REGION_MAPPING(10);
494 REGION_MAPPING(11);
495 REGION_MAPPING(12);
496 REGION_MAPPING(13);
497 REGION_MAPPING(14);
498 REGION_MAPPING(15);
499 REGION_MAPPING(16);
500 REGION_MAPPING(17);
501 REGION_MAPPING(18);
502 REGION_MAPPING(19);
503 REGION_MAPPING(20);
504 REGION_MAPPING(21);
505 REGION_MAPPING(22);
506 REGION_MAPPING(23);
507 REGION_MAPPING(24);
508 REGION_MAPPING(25);
509 REGION_MAPPING(26);
510 REGION_MAPPING(27);
511 REGION_MAPPING(28);
512 REGION_MAPPING(29);
513 REGION_MAPPING(30);
514 REGION_MAPPING(31);
516 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
518 struct device *dev = container_of(kobj, struct device, kobj);
519 struct nd_region *nd_region = to_nd_region(dev);
521 if (n < nd_region->ndr_mappings)
522 return a->mode;
523 return 0;
526 static struct attribute *mapping_attributes[] = {
527 &dev_attr_mapping0.attr,
528 &dev_attr_mapping1.attr,
529 &dev_attr_mapping2.attr,
530 &dev_attr_mapping3.attr,
531 &dev_attr_mapping4.attr,
532 &dev_attr_mapping5.attr,
533 &dev_attr_mapping6.attr,
534 &dev_attr_mapping7.attr,
535 &dev_attr_mapping8.attr,
536 &dev_attr_mapping9.attr,
537 &dev_attr_mapping10.attr,
538 &dev_attr_mapping11.attr,
539 &dev_attr_mapping12.attr,
540 &dev_attr_mapping13.attr,
541 &dev_attr_mapping14.attr,
542 &dev_attr_mapping15.attr,
543 &dev_attr_mapping16.attr,
544 &dev_attr_mapping17.attr,
545 &dev_attr_mapping18.attr,
546 &dev_attr_mapping19.attr,
547 &dev_attr_mapping20.attr,
548 &dev_attr_mapping21.attr,
549 &dev_attr_mapping22.attr,
550 &dev_attr_mapping23.attr,
551 &dev_attr_mapping24.attr,
552 &dev_attr_mapping25.attr,
553 &dev_attr_mapping26.attr,
554 &dev_attr_mapping27.attr,
555 &dev_attr_mapping28.attr,
556 &dev_attr_mapping29.attr,
557 &dev_attr_mapping30.attr,
558 &dev_attr_mapping31.attr,
559 NULL,
562 struct attribute_group nd_mapping_attribute_group = {
563 .is_visible = mapping_visible,
564 .attrs = mapping_attributes,
566 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
568 int nd_blk_region_init(struct nd_region *nd_region)
570 struct device *dev = &nd_region->dev;
571 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
573 if (!is_nd_blk(dev))
574 return 0;
576 if (nd_region->ndr_mappings < 1) {
577 dev_err(dev, "invalid BLK region\n");
578 return -ENXIO;
581 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
585 * nd_region_acquire_lane - allocate and lock a lane
586 * @nd_region: region id and number of lanes possible
588 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
589 * We optimize for the common case where there are 256 lanes, one
590 * per-cpu. For larger systems we need to lock to share lanes. For now
591 * this implementation assumes the cost of maintaining an allocator for
592 * free lanes is on the order of the lock hold time, so it implements a
593 * static lane = cpu % num_lanes mapping.
595 * In the case of a BTT instance on top of a BLK namespace a lane may be
596 * acquired recursively. We lock on the first instance.
598 * In the case of a BTT instance on top of PMEM, we only acquire a lane
599 * for the BTT metadata updates.
601 unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
603 unsigned int cpu, lane;
605 cpu = get_cpu();
606 if (nd_region->num_lanes < nr_cpu_ids) {
607 struct nd_percpu_lane *ndl_lock, *ndl_count;
609 lane = cpu % nd_region->num_lanes;
610 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
611 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
612 if (ndl_count->count++ == 0)
613 spin_lock(&ndl_lock->lock);
614 } else
615 lane = cpu;
617 return lane;
619 EXPORT_SYMBOL(nd_region_acquire_lane);
621 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
623 if (nd_region->num_lanes < nr_cpu_ids) {
624 unsigned int cpu = get_cpu();
625 struct nd_percpu_lane *ndl_lock, *ndl_count;
627 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
628 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
629 if (--ndl_count->count == 0)
630 spin_unlock(&ndl_lock->lock);
631 put_cpu();
633 put_cpu();
635 EXPORT_SYMBOL(nd_region_release_lane);
637 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
638 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
639 const char *caller)
641 struct nd_region *nd_region;
642 struct device *dev;
643 void *region_buf;
644 unsigned int i;
645 int ro = 0;
647 for (i = 0; i < ndr_desc->num_mappings; i++) {
648 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
649 struct nvdimm *nvdimm = nd_mapping->nvdimm;
651 if ((nd_mapping->start | nd_mapping->size) % SZ_4K) {
652 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
653 caller, dev_name(&nvdimm->dev), i);
655 return NULL;
658 if (nvdimm->flags & NDD_UNARMED)
659 ro = 1;
662 if (dev_type == &nd_blk_device_type) {
663 struct nd_blk_region_desc *ndbr_desc;
664 struct nd_blk_region *ndbr;
666 ndbr_desc = to_blk_region_desc(ndr_desc);
667 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
668 * ndr_desc->num_mappings,
669 GFP_KERNEL);
670 if (ndbr) {
671 nd_region = &ndbr->nd_region;
672 ndbr->enable = ndbr_desc->enable;
673 ndbr->disable = ndbr_desc->disable;
674 ndbr->do_io = ndbr_desc->do_io;
676 region_buf = ndbr;
677 } else {
678 nd_region = kzalloc(sizeof(struct nd_region)
679 + sizeof(struct nd_mapping)
680 * ndr_desc->num_mappings,
681 GFP_KERNEL);
682 region_buf = nd_region;
685 if (!region_buf)
686 return NULL;
687 nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
688 if (nd_region->id < 0)
689 goto err_id;
691 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
692 if (!nd_region->lane)
693 goto err_percpu;
695 for (i = 0; i < nr_cpu_ids; i++) {
696 struct nd_percpu_lane *ndl;
698 ndl = per_cpu_ptr(nd_region->lane, i);
699 spin_lock_init(&ndl->lock);
700 ndl->count = 0;
703 memcpy(nd_region->mapping, ndr_desc->nd_mapping,
704 sizeof(struct nd_mapping) * ndr_desc->num_mappings);
705 for (i = 0; i < ndr_desc->num_mappings; i++) {
706 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
707 struct nvdimm *nvdimm = nd_mapping->nvdimm;
709 get_device(&nvdimm->dev);
711 nd_region->ndr_mappings = ndr_desc->num_mappings;
712 nd_region->provider_data = ndr_desc->provider_data;
713 nd_region->nd_set = ndr_desc->nd_set;
714 nd_region->num_lanes = ndr_desc->num_lanes;
715 nd_region->flags = ndr_desc->flags;
716 nd_region->ro = ro;
717 nd_region->numa_node = ndr_desc->numa_node;
718 ida_init(&nd_region->ns_ida);
719 ida_init(&nd_region->btt_ida);
720 ida_init(&nd_region->pfn_ida);
721 dev = &nd_region->dev;
722 dev_set_name(dev, "region%d", nd_region->id);
723 dev->parent = &nvdimm_bus->dev;
724 dev->type = dev_type;
725 dev->groups = ndr_desc->attr_groups;
726 nd_region->ndr_size = resource_size(ndr_desc->res);
727 nd_region->ndr_start = ndr_desc->res->start;
728 nd_device_register(dev);
730 return nd_region;
732 err_percpu:
733 ida_simple_remove(&region_ida, nd_region->id);
734 err_id:
735 kfree(region_buf);
736 return NULL;
739 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
740 struct nd_region_desc *ndr_desc)
742 ndr_desc->num_lanes = ND_MAX_LANES;
743 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
744 __func__);
746 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
748 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
749 struct nd_region_desc *ndr_desc)
751 if (ndr_desc->num_mappings > 1)
752 return NULL;
753 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
754 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
755 __func__);
757 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
759 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
760 struct nd_region_desc *ndr_desc)
762 ndr_desc->num_lanes = ND_MAX_LANES;
763 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
764 __func__);
766 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);