1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/lockdep.h>
4 #include <linux/sysfs.h>
5 #include <linux/kobject.h>
6 #include <linux/memory.h>
7 #include <linux/memory-tiers.h>
8 #include <linux/notifier.h>
13 /* hierarchy of memory tiers */
14 struct list_head list
;
15 /* list of all memory types part of this tier */
16 struct list_head memory_types
;
18 * start value of abstract distance. memory tier maps
19 * an abstract distance range,
20 * adistance_start .. adistance_start + MEMTIER_CHUNK_SIZE
24 /* All the nodes that are part of all the lower memory tiers. */
25 nodemask_t lower_tier_mask
;
28 struct demotion_nodes
{
32 struct node_memory_type_map
{
33 struct memory_dev_type
*memtype
;
37 static DEFINE_MUTEX(memory_tier_lock
);
38 static LIST_HEAD(memory_tiers
);
40 * The list is used to store all memory types that are not created
43 static LIST_HEAD(default_memory_types
);
44 static struct node_memory_type_map node_memory_types
[MAX_NUMNODES
];
45 struct memory_dev_type
*default_dram_type
;
46 nodemask_t default_dram_nodes __initdata
= NODE_MASK_NONE
;
48 static const struct bus_type memory_tier_subsys
= {
49 .name
= "memory_tiering",
50 .dev_name
= "memory_tier",
53 #ifdef CONFIG_MIGRATION
54 static int top_tier_adistance
;
56 * node_demotion[] examples:
60 * Node 0 & 1 are CPU + DRAM nodes, node 2 & 3 are PMEM nodes.
72 * node_demotion[0].preferred = 2
73 * node_demotion[1].preferred = 3
74 * node_demotion[2].preferred = <empty>
75 * node_demotion[3].preferred = <empty>
79 * Node 0 & 1 are CPU + DRAM nodes, node 2 is memory-only DRAM node.
89 * node_demotion[0].preferred = <empty>
90 * node_demotion[1].preferred = <empty>
91 * node_demotion[2].preferred = <empty>
95 * Node 0 is CPU + DRAM nodes, Node 1 is HBM node, node 2 is PMEM node.
107 * node_demotion[0].preferred = 2
108 * node_demotion[1].preferred = 0
109 * node_demotion[2].preferred = <empty>
112 static struct demotion_nodes
*node_demotion __read_mostly
;
113 #endif /* CONFIG_MIGRATION */
115 static BLOCKING_NOTIFIER_HEAD(mt_adistance_algorithms
);
117 /* The lock is used to protect `default_dram_perf*` info and nid. */
118 static DEFINE_MUTEX(default_dram_perf_lock
);
119 static bool default_dram_perf_error
;
120 static struct access_coordinate default_dram_perf
;
121 static int default_dram_perf_ref_nid
= NUMA_NO_NODE
;
122 static const char *default_dram_perf_ref_source
;
124 static inline struct memory_tier
*to_memory_tier(struct device
*device
)
126 return container_of(device
, struct memory_tier
, dev
);
129 static __always_inline nodemask_t
get_memtier_nodemask(struct memory_tier
*memtier
)
131 nodemask_t nodes
= NODE_MASK_NONE
;
132 struct memory_dev_type
*memtype
;
134 list_for_each_entry(memtype
, &memtier
->memory_types
, tier_sibling
)
135 nodes_or(nodes
, nodes
, memtype
->nodes
);
140 static void memory_tier_device_release(struct device
*dev
)
142 struct memory_tier
*tier
= to_memory_tier(dev
);
144 * synchronize_rcu in clear_node_memory_tier makes sure
145 * we don't have rcu access to this memory tier.
150 static ssize_t
nodelist_show(struct device
*dev
,
151 struct device_attribute
*attr
, char *buf
)
156 mutex_lock(&memory_tier_lock
);
157 nmask
= get_memtier_nodemask(to_memory_tier(dev
));
158 ret
= sysfs_emit(buf
, "%*pbl\n", nodemask_pr_args(&nmask
));
159 mutex_unlock(&memory_tier_lock
);
162 static DEVICE_ATTR_RO(nodelist
);
164 static struct attribute
*memtier_dev_attrs
[] = {
165 &dev_attr_nodelist
.attr
,
169 static const struct attribute_group memtier_dev_group
= {
170 .attrs
= memtier_dev_attrs
,
173 static const struct attribute_group
*memtier_dev_groups
[] = {
178 static struct memory_tier
*find_create_memory_tier(struct memory_dev_type
*memtype
)
181 bool found_slot
= false;
182 struct memory_tier
*memtier
, *new_memtier
;
183 int adistance
= memtype
->adistance
;
184 unsigned int memtier_adistance_chunk_size
= MEMTIER_CHUNK_SIZE
;
186 lockdep_assert_held_once(&memory_tier_lock
);
188 adistance
= round_down(adistance
, memtier_adistance_chunk_size
);
190 * If the memtype is already part of a memory tier,
193 if (!list_empty(&memtype
->tier_sibling
)) {
194 list_for_each_entry(memtier
, &memory_tiers
, list
) {
195 if (adistance
== memtier
->adistance_start
)
199 return ERR_PTR(-EINVAL
);
202 list_for_each_entry(memtier
, &memory_tiers
, list
) {
203 if (adistance
== memtier
->adistance_start
) {
205 } else if (adistance
< memtier
->adistance_start
) {
211 new_memtier
= kzalloc(sizeof(struct memory_tier
), GFP_KERNEL
);
213 return ERR_PTR(-ENOMEM
);
215 new_memtier
->adistance_start
= adistance
;
216 INIT_LIST_HEAD(&new_memtier
->list
);
217 INIT_LIST_HEAD(&new_memtier
->memory_types
);
219 list_add_tail(&new_memtier
->list
, &memtier
->list
);
221 list_add_tail(&new_memtier
->list
, &memory_tiers
);
223 new_memtier
->dev
.id
= adistance
>> MEMTIER_CHUNK_BITS
;
224 new_memtier
->dev
.bus
= &memory_tier_subsys
;
225 new_memtier
->dev
.release
= memory_tier_device_release
;
226 new_memtier
->dev
.groups
= memtier_dev_groups
;
228 ret
= device_register(&new_memtier
->dev
);
230 list_del(&new_memtier
->list
);
231 put_device(&new_memtier
->dev
);
234 memtier
= new_memtier
;
237 list_add(&memtype
->tier_sibling
, &memtier
->memory_types
);
241 static struct memory_tier
*__node_get_memory_tier(int node
)
245 pgdat
= NODE_DATA(node
);
249 * Since we hold memory_tier_lock, we can avoid
250 * RCU read locks when accessing the details. No
251 * parallel updates are possible here.
253 return rcu_dereference_check(pgdat
->memtier
,
254 lockdep_is_held(&memory_tier_lock
));
257 #ifdef CONFIG_MIGRATION
258 bool node_is_toptier(int node
)
262 struct memory_tier
*memtier
;
264 pgdat
= NODE_DATA(node
);
269 memtier
= rcu_dereference(pgdat
->memtier
);
274 if (memtier
->adistance_start
<= top_tier_adistance
)
283 void node_get_allowed_targets(pg_data_t
*pgdat
, nodemask_t
*targets
)
285 struct memory_tier
*memtier
;
288 * pg_data_t.memtier updates includes a synchronize_rcu()
289 * which ensures that we either find NULL or a valid memtier
290 * in NODE_DATA. protect the access via rcu_read_lock();
293 memtier
= rcu_dereference(pgdat
->memtier
);
295 *targets
= memtier
->lower_tier_mask
;
297 *targets
= NODE_MASK_NONE
;
302 * next_demotion_node() - Get the next node in the demotion path
303 * @node: The starting node to lookup the next node
305 * Return: node id for next memory node in the demotion path hierarchy
306 * from @node; NUMA_NO_NODE if @node is terminal. This does not keep
307 * @node online or guarantee that it *continues* to be the next demotion
310 int next_demotion_node(int node
)
312 struct demotion_nodes
*nd
;
318 nd
= &node_demotion
[node
];
321 * node_demotion[] is updated without excluding this
322 * function from running.
324 * Make sure to use RCU over entire code blocks if
325 * node_demotion[] reads need to be consistent.
329 * If there are multiple target nodes, just select one
330 * target node randomly.
332 * In addition, we can also use round-robin to select
333 * target node, but we should introduce another variable
334 * for node_demotion[] to record last selected target node,
335 * that may cause cache ping-pong due to the changing of
336 * last target node. Or introducing per-cpu data to avoid
337 * caching issue, which seems more complicated. So selecting
338 * target node randomly seems better until now.
340 target
= node_random(&nd
->preferred
);
346 static void disable_all_demotion_targets(void)
348 struct memory_tier
*memtier
;
351 for_each_node_state(node
, N_MEMORY
) {
352 node_demotion
[node
].preferred
= NODE_MASK_NONE
;
354 * We are holding memory_tier_lock, it is safe
355 * to access pgda->memtier.
357 memtier
= __node_get_memory_tier(node
);
359 memtier
->lower_tier_mask
= NODE_MASK_NONE
;
362 * Ensure that the "disable" is visible across the system.
363 * Readers will see either a combination of before+disable
364 * state or disable+after. They will never see before and
365 * after state together.
370 static void dump_demotion_targets(void)
374 for_each_node_state(node
, N_MEMORY
) {
375 struct memory_tier
*memtier
= __node_get_memory_tier(node
);
376 nodemask_t preferred
= node_demotion
[node
].preferred
;
381 if (nodes_empty(preferred
))
382 pr_info("Demotion targets for Node %d: null\n", node
);
384 pr_info("Demotion targets for Node %d: preferred: %*pbl, fallback: %*pbl\n",
385 node
, nodemask_pr_args(&preferred
),
386 nodemask_pr_args(&memtier
->lower_tier_mask
));
391 * Find an automatic demotion target for all memory
392 * nodes. Failing here is OK. It might just indicate
393 * being at the end of a chain.
395 static void establish_demotion_targets(void)
397 struct memory_tier
*memtier
;
398 struct demotion_nodes
*nd
;
399 int target
= NUMA_NO_NODE
, node
;
400 int distance
, best_distance
;
401 nodemask_t tier_nodes
, lower_tier
;
403 lockdep_assert_held_once(&memory_tier_lock
);
408 disable_all_demotion_targets();
410 for_each_node_state(node
, N_MEMORY
) {
412 nd
= &node_demotion
[node
];
414 memtier
= __node_get_memory_tier(node
);
415 if (!memtier
|| list_is_last(&memtier
->list
, &memory_tiers
))
418 * Get the lower memtier to find the demotion node list.
420 memtier
= list_next_entry(memtier
, list
);
421 tier_nodes
= get_memtier_nodemask(memtier
);
423 * find_next_best_node, use 'used' nodemask as a skip list.
424 * Add all memory nodes except the selected memory tier
425 * nodelist to skip list so that we find the best node from the
428 nodes_andnot(tier_nodes
, node_states
[N_MEMORY
], tier_nodes
);
431 * Find all the nodes in the memory tier node list of same best distance.
432 * add them to the preferred mask. We randomly select between nodes
433 * in the preferred mask when allocating pages during demotion.
436 target
= find_next_best_node(node
, &tier_nodes
);
437 if (target
== NUMA_NO_NODE
)
440 distance
= node_distance(node
, target
);
441 if (distance
== best_distance
|| best_distance
== -1) {
442 best_distance
= distance
;
443 node_set(target
, nd
->preferred
);
450 * Promotion is allowed from a memory tier to higher
451 * memory tier only if the memory tier doesn't include
452 * compute. We want to skip promotion from a memory tier,
453 * if any node that is part of the memory tier have CPUs.
454 * Once we detect such a memory tier, we consider that tier
455 * as top tiper from which promotion is not allowed.
457 list_for_each_entry_reverse(memtier
, &memory_tiers
, list
) {
458 tier_nodes
= get_memtier_nodemask(memtier
);
459 nodes_and(tier_nodes
, node_states
[N_CPU
], tier_nodes
);
460 if (!nodes_empty(tier_nodes
)) {
462 * abstract distance below the max value of this memtier
463 * is considered toptier.
465 top_tier_adistance
= memtier
->adistance_start
+
466 MEMTIER_CHUNK_SIZE
- 1;
471 * Now build the lower_tier mask for each node collecting node mask from
472 * all memory tier below it. This allows us to fallback demotion page
473 * allocation to a set of nodes that is closer the above selected
476 lower_tier
= node_states
[N_MEMORY
];
477 list_for_each_entry(memtier
, &memory_tiers
, list
) {
479 * Keep removing current tier from lower_tier nodes,
480 * This will remove all nodes in current and above
481 * memory tier from the lower_tier mask.
483 tier_nodes
= get_memtier_nodemask(memtier
);
484 nodes_andnot(lower_tier
, lower_tier
, tier_nodes
);
485 memtier
->lower_tier_mask
= lower_tier
;
488 dump_demotion_targets();
492 static inline void establish_demotion_targets(void) {}
493 #endif /* CONFIG_MIGRATION */
495 static inline void __init_node_memory_type(int node
, struct memory_dev_type
*memtype
)
497 if (!node_memory_types
[node
].memtype
)
498 node_memory_types
[node
].memtype
= memtype
;
500 * for each device getting added in the same NUMA node
501 * with this specific memtype, bump the map count. We
502 * Only take memtype device reference once, so that
503 * changing a node memtype can be done by droping the
504 * only reference count taken here.
507 if (node_memory_types
[node
].memtype
== memtype
) {
508 if (!node_memory_types
[node
].map_count
++)
509 kref_get(&memtype
->kref
);
513 static struct memory_tier
*set_node_memory_tier(int node
)
515 struct memory_tier
*memtier
;
516 struct memory_dev_type
*memtype
= default_dram_type
;
517 int adist
= MEMTIER_ADISTANCE_DRAM
;
518 pg_data_t
*pgdat
= NODE_DATA(node
);
521 lockdep_assert_held_once(&memory_tier_lock
);
523 if (!node_state(node
, N_MEMORY
))
524 return ERR_PTR(-EINVAL
);
526 mt_calc_adistance(node
, &adist
);
527 if (!node_memory_types
[node
].memtype
) {
528 memtype
= mt_find_alloc_memory_type(adist
, &default_memory_types
);
529 if (IS_ERR(memtype
)) {
530 memtype
= default_dram_type
;
531 pr_info("Failed to allocate a memory type. Fall back.\n");
535 __init_node_memory_type(node
, memtype
);
537 memtype
= node_memory_types
[node
].memtype
;
538 node_set(node
, memtype
->nodes
);
539 memtier
= find_create_memory_tier(memtype
);
540 if (!IS_ERR(memtier
))
541 rcu_assign_pointer(pgdat
->memtier
, memtier
);
545 static void destroy_memory_tier(struct memory_tier
*memtier
)
547 list_del(&memtier
->list
);
548 device_unregister(&memtier
->dev
);
551 static bool clear_node_memory_tier(int node
)
553 bool cleared
= false;
555 struct memory_tier
*memtier
;
557 pgdat
= NODE_DATA(node
);
562 * Make sure that anybody looking at NODE_DATA who finds
563 * a valid memtier finds memory_dev_types with nodes still
564 * linked to the memtier. We achieve this by waiting for
565 * rcu read section to finish using synchronize_rcu.
566 * This also enables us to free the destroyed memory tier
567 * with kfree instead of kfree_rcu
569 memtier
= __node_get_memory_tier(node
);
571 struct memory_dev_type
*memtype
;
573 rcu_assign_pointer(pgdat
->memtier
, NULL
);
575 memtype
= node_memory_types
[node
].memtype
;
576 node_clear(node
, memtype
->nodes
);
577 if (nodes_empty(memtype
->nodes
)) {
578 list_del_init(&memtype
->tier_sibling
);
579 if (list_empty(&memtier
->memory_types
))
580 destroy_memory_tier(memtier
);
587 static void release_memtype(struct kref
*kref
)
589 struct memory_dev_type
*memtype
;
591 memtype
= container_of(kref
, struct memory_dev_type
, kref
);
595 struct memory_dev_type
*alloc_memory_type(int adistance
)
597 struct memory_dev_type
*memtype
;
599 memtype
= kmalloc(sizeof(*memtype
), GFP_KERNEL
);
601 return ERR_PTR(-ENOMEM
);
603 memtype
->adistance
= adistance
;
604 INIT_LIST_HEAD(&memtype
->tier_sibling
);
605 memtype
->nodes
= NODE_MASK_NONE
;
606 kref_init(&memtype
->kref
);
609 EXPORT_SYMBOL_GPL(alloc_memory_type
);
611 void put_memory_type(struct memory_dev_type
*memtype
)
613 kref_put(&memtype
->kref
, release_memtype
);
615 EXPORT_SYMBOL_GPL(put_memory_type
);
617 void init_node_memory_type(int node
, struct memory_dev_type
*memtype
)
620 mutex_lock(&memory_tier_lock
);
621 __init_node_memory_type(node
, memtype
);
622 mutex_unlock(&memory_tier_lock
);
624 EXPORT_SYMBOL_GPL(init_node_memory_type
);
626 void clear_node_memory_type(int node
, struct memory_dev_type
*memtype
)
628 mutex_lock(&memory_tier_lock
);
629 if (node_memory_types
[node
].memtype
== memtype
|| !memtype
)
630 node_memory_types
[node
].map_count
--;
632 * If we umapped all the attached devices to this node,
633 * clear the node memory type.
635 if (!node_memory_types
[node
].map_count
) {
636 memtype
= node_memory_types
[node
].memtype
;
637 node_memory_types
[node
].memtype
= NULL
;
638 put_memory_type(memtype
);
640 mutex_unlock(&memory_tier_lock
);
642 EXPORT_SYMBOL_GPL(clear_node_memory_type
);
644 struct memory_dev_type
*mt_find_alloc_memory_type(int adist
, struct list_head
*memory_types
)
646 struct memory_dev_type
*mtype
;
648 list_for_each_entry(mtype
, memory_types
, list
)
649 if (mtype
->adistance
== adist
)
652 mtype
= alloc_memory_type(adist
);
656 list_add(&mtype
->list
, memory_types
);
660 EXPORT_SYMBOL_GPL(mt_find_alloc_memory_type
);
662 void mt_put_memory_types(struct list_head
*memory_types
)
664 struct memory_dev_type
*mtype
, *mtn
;
666 list_for_each_entry_safe(mtype
, mtn
, memory_types
, list
) {
667 list_del(&mtype
->list
);
668 put_memory_type(mtype
);
671 EXPORT_SYMBOL_GPL(mt_put_memory_types
);
674 * This is invoked via `late_initcall()` to initialize memory tiers for
675 * memory nodes, both with and without CPUs. After the initialization of
676 * firmware and devices, adistance algorithms are expected to be provided.
678 static int __init
memory_tier_late_init(void)
681 struct memory_tier
*memtier
;
684 guard(mutex
)(&memory_tier_lock
);
686 /* Assign each uninitialized N_MEMORY node to a memory tier. */
687 for_each_node_state(nid
, N_MEMORY
) {
689 * Some device drivers may have initialized
690 * memory tiers, potentially bringing memory nodes
691 * online and configuring memory tiers.
694 if (node_memory_types
[nid
].memtype
)
697 memtier
= set_node_memory_tier(nid
);
702 establish_demotion_targets();
707 late_initcall(memory_tier_late_init
);
709 static void dump_hmem_attrs(struct access_coordinate
*coord
, const char *prefix
)
712 "%sread_latency: %u, write_latency: %u, read_bandwidth: %u, write_bandwidth: %u\n",
713 prefix
, coord
->read_latency
, coord
->write_latency
,
714 coord
->read_bandwidth
, coord
->write_bandwidth
);
717 int mt_set_default_dram_perf(int nid
, struct access_coordinate
*perf
,
720 guard(mutex
)(&default_dram_perf_lock
);
721 if (default_dram_perf_error
)
724 if (perf
->read_latency
+ perf
->write_latency
== 0 ||
725 perf
->read_bandwidth
+ perf
->write_bandwidth
== 0)
728 if (default_dram_perf_ref_nid
== NUMA_NO_NODE
) {
729 default_dram_perf
= *perf
;
730 default_dram_perf_ref_nid
= nid
;
731 default_dram_perf_ref_source
= kstrdup(source
, GFP_KERNEL
);
736 * The performance of all default DRAM nodes is expected to be
737 * same (that is, the variation is less than 10%). And it
738 * will be used as base to calculate the abstract distance of
739 * other memory nodes.
741 if (abs(perf
->read_latency
- default_dram_perf
.read_latency
) * 10 >
742 default_dram_perf
.read_latency
||
743 abs(perf
->write_latency
- default_dram_perf
.write_latency
) * 10 >
744 default_dram_perf
.write_latency
||
745 abs(perf
->read_bandwidth
- default_dram_perf
.read_bandwidth
) * 10 >
746 default_dram_perf
.read_bandwidth
||
747 abs(perf
->write_bandwidth
- default_dram_perf
.write_bandwidth
) * 10 >
748 default_dram_perf
.write_bandwidth
) {
750 "memory-tiers: the performance of DRAM node %d mismatches that of the reference\n"
751 "DRAM node %d.\n", nid
, default_dram_perf_ref_nid
);
752 pr_info(" performance of reference DRAM node %d:\n",
753 default_dram_perf_ref_nid
);
754 dump_hmem_attrs(&default_dram_perf
, " ");
755 pr_info(" performance of DRAM node %d:\n", nid
);
756 dump_hmem_attrs(perf
, " ");
758 " disable default DRAM node performance based abstract distance algorithm.\n");
759 default_dram_perf_error
= true;
766 int mt_perf_to_adistance(struct access_coordinate
*perf
, int *adist
)
768 guard(mutex
)(&default_dram_perf_lock
);
769 if (default_dram_perf_error
)
772 if (perf
->read_latency
+ perf
->write_latency
== 0 ||
773 perf
->read_bandwidth
+ perf
->write_bandwidth
== 0)
776 if (default_dram_perf_ref_nid
== NUMA_NO_NODE
)
780 * The abstract distance of a memory node is in direct proportion to
781 * its memory latency (read + write) and inversely proportional to its
782 * memory bandwidth (read + write). The abstract distance, memory
783 * latency, and memory bandwidth of the default DRAM nodes are used as
786 *adist
= MEMTIER_ADISTANCE_DRAM
*
787 (perf
->read_latency
+ perf
->write_latency
) /
788 (default_dram_perf
.read_latency
+ default_dram_perf
.write_latency
) *
789 (default_dram_perf
.read_bandwidth
+ default_dram_perf
.write_bandwidth
) /
790 (perf
->read_bandwidth
+ perf
->write_bandwidth
);
794 EXPORT_SYMBOL_GPL(mt_perf_to_adistance
);
797 * register_mt_adistance_algorithm() - Register memory tiering abstract distance algorithm
798 * @nb: The notifier block which describe the algorithm
800 * Return: 0 on success, errno on error.
802 * Every memory tiering abstract distance algorithm provider needs to
803 * register the algorithm with register_mt_adistance_algorithm(). To
804 * calculate the abstract distance for a specified memory node, the
805 * notifier function will be called unless some high priority
806 * algorithm has provided result. The prototype of the notifier
807 * function is as follows,
809 * int (*algorithm_notifier)(struct notifier_block *nb,
810 * unsigned long nid, void *data);
812 * Where "nid" specifies the memory node, "data" is the pointer to the
813 * returned abstract distance (that is, "int *adist"). If the
814 * algorithm provides the result, NOTIFY_STOP should be returned.
815 * Otherwise, return_value & %NOTIFY_STOP_MASK == 0 to allow the next
816 * algorithm in the chain to provide the result.
818 int register_mt_adistance_algorithm(struct notifier_block
*nb
)
820 return blocking_notifier_chain_register(&mt_adistance_algorithms
, nb
);
822 EXPORT_SYMBOL_GPL(register_mt_adistance_algorithm
);
825 * unregister_mt_adistance_algorithm() - Unregister memory tiering abstract distance algorithm
826 * @nb: the notifier block which describe the algorithm
828 * Return: 0 on success, errno on error.
830 int unregister_mt_adistance_algorithm(struct notifier_block
*nb
)
832 return blocking_notifier_chain_unregister(&mt_adistance_algorithms
, nb
);
834 EXPORT_SYMBOL_GPL(unregister_mt_adistance_algorithm
);
837 * mt_calc_adistance() - Calculate abstract distance with registered algorithms
838 * @node: the node to calculate abstract distance for
839 * @adist: the returned abstract distance
841 * Return: if return_value & %NOTIFY_STOP_MASK != 0, then some
842 * abstract distance algorithm provides the result, and return it via
843 * @adist. Otherwise, no algorithm can provide the result and @adist
844 * will be kept as it is.
846 int mt_calc_adistance(int node
, int *adist
)
848 return blocking_notifier_call_chain(&mt_adistance_algorithms
, node
, adist
);
850 EXPORT_SYMBOL_GPL(mt_calc_adistance
);
852 static int __meminit
memtier_hotplug_callback(struct notifier_block
*self
,
853 unsigned long action
, void *_arg
)
855 struct memory_tier
*memtier
;
856 struct memory_notify
*arg
= _arg
;
859 * Only update the node migration order when a node is
860 * changing status, like online->offline.
862 if (arg
->status_change_nid
< 0)
863 return notifier_from_errno(0);
867 mutex_lock(&memory_tier_lock
);
868 if (clear_node_memory_tier(arg
->status_change_nid
))
869 establish_demotion_targets();
870 mutex_unlock(&memory_tier_lock
);
873 mutex_lock(&memory_tier_lock
);
874 memtier
= set_node_memory_tier(arg
->status_change_nid
);
875 if (!IS_ERR(memtier
))
876 establish_demotion_targets();
877 mutex_unlock(&memory_tier_lock
);
881 return notifier_from_errno(0);
884 static int __init
memory_tier_init(void)
888 ret
= subsys_virtual_register(&memory_tier_subsys
, NULL
);
890 panic("%s() failed to register memory tier subsystem\n", __func__
);
892 #ifdef CONFIG_MIGRATION
893 node_demotion
= kcalloc(nr_node_ids
, sizeof(struct demotion_nodes
),
895 WARN_ON(!node_demotion
);
898 guard(mutex
)(&memory_tier_lock
);
900 * For now we can have 4 faster memory tiers with smaller adistance
901 * than default DRAM tier.
903 default_dram_type
= mt_find_alloc_memory_type(MEMTIER_ADISTANCE_DRAM
,
904 &default_memory_types
);
905 if (IS_ERR(default_dram_type
))
906 panic("%s() failed to allocate default DRAM tier\n", __func__
);
908 /* Record nodes with memory and CPU to set default DRAM performance. */
909 nodes_and(default_dram_nodes
, node_states
[N_MEMORY
],
912 hotplug_memory_notifier(memtier_hotplug_callback
, MEMTIER_HOTPLUG_PRI
);
915 subsys_initcall(memory_tier_init
);
917 bool numa_demotion_enabled
= false;
919 #ifdef CONFIG_MIGRATION
921 static ssize_t
demotion_enabled_show(struct kobject
*kobj
,
922 struct kobj_attribute
*attr
, char *buf
)
924 return sysfs_emit(buf
, "%s\n",
925 numa_demotion_enabled
? "true" : "false");
928 static ssize_t
demotion_enabled_store(struct kobject
*kobj
,
929 struct kobj_attribute
*attr
,
930 const char *buf
, size_t count
)
934 ret
= kstrtobool(buf
, &numa_demotion_enabled
);
941 static struct kobj_attribute numa_demotion_enabled_attr
=
942 __ATTR_RW(demotion_enabled
);
944 static struct attribute
*numa_attrs
[] = {
945 &numa_demotion_enabled_attr
.attr
,
949 static const struct attribute_group numa_attr_group
= {
953 static int __init
numa_init_sysfs(void)
956 struct kobject
*numa_kobj
;
958 numa_kobj
= kobject_create_and_add("numa", mm_kobj
);
960 pr_err("failed to create numa kobject\n");
963 err
= sysfs_create_group(numa_kobj
, &numa_attr_group
);
965 pr_err("failed to register numa group\n");
971 kobject_put(numa_kobj
);
974 subsys_initcall(numa_init_sysfs
);
975 #endif /* CONFIG_SYSFS */