1 /* internal.h: mm/ internal definitions
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #ifndef __MM_INTERNAL_H
12 #define __MM_INTERNAL_H
16 void free_pgtables(struct mmu_gather
*tlb
, struct vm_area_struct
*start_vma
,
17 unsigned long floor
, unsigned long ceiling
);
19 extern void prep_compound_page(struct page
*page
, unsigned long order
);
20 extern void prep_compound_gigantic_page(struct page
*page
, unsigned long order
);
22 static inline void set_page_count(struct page
*page
, int v
)
24 atomic_set(&page
->_count
, v
);
28 * Turn a non-refcounted page (->_count == 0) into refcounted with
31 static inline void set_page_refcounted(struct page
*page
)
33 VM_BUG_ON(PageTail(page
));
34 VM_BUG_ON(atomic_read(&page
->_count
));
35 set_page_count(page
, 1);
38 static inline void __put_page(struct page
*page
)
40 atomic_dec(&page
->_count
);
43 extern void __free_pages_bootmem(struct page
*page
, unsigned int order
);
46 * function for dealing with page's order in buddy system.
47 * zone->lock is already acquired when we use these.
48 * So, we don't need atomic page->flags operations here.
50 static inline unsigned long page_order(struct page
*page
)
52 VM_BUG_ON(!PageBuddy(page
));
53 return page_private(page
);
57 * Return the mem_map entry representing the 'offset' subpage within
58 * the maximally aligned gigantic page 'base'. Handle any discontiguity
59 * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
61 static inline struct page
*mem_map_offset(struct page
*base
, int offset
)
63 if (unlikely(offset
>= MAX_ORDER_NR_PAGES
))
64 return pfn_to_page(page_to_pfn(base
) + offset
);
69 * Iterator over all subpages withing the maximally aligned gigantic
70 * page 'base'. Handle any discontiguity in the mem_map.
72 static inline struct page
*mem_map_next(struct page
*iter
,
73 struct page
*base
, int offset
)
75 if (unlikely((offset
& (MAX_ORDER_NR_PAGES
- 1)) == 0)) {
76 unsigned long pfn
= page_to_pfn(base
) + offset
;
79 return pfn_to_page(pfn
);
85 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
86 * so all functions starting at paging_init should be marked __init
87 * in those cases. SPARSEMEM, however, allows for memory hotplug,
88 * and alloc_bootmem_node is not used.
90 #ifdef CONFIG_SPARSEMEM
91 #define __paginginit __meminit
93 #define __paginginit __init
96 /* Memory initialisation debug and verification */
103 #ifdef CONFIG_DEBUG_MEMORY_INIT
105 extern int mminit_loglevel
;
107 #define mminit_dprintk(level, prefix, fmt, arg...) \
109 if (level < mminit_loglevel) { \
110 printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
111 printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
115 extern void mminit_verify_pageflags_layout(void);
116 extern void mminit_verify_page_links(struct page
*page
,
117 enum zone_type zone
, unsigned long nid
, unsigned long pfn
);
118 extern void mminit_verify_zonelist(void);
122 static inline void mminit_dprintk(enum mminit_level level
,
123 const char *prefix
, const char *fmt
, ...)
127 static inline void mminit_verify_pageflags_layout(void)
131 static inline void mminit_verify_page_links(struct page
*page
,
132 enum zone_type zone
, unsigned long nid
, unsigned long pfn
)
136 static inline void mminit_verify_zonelist(void)
139 #endif /* CONFIG_DEBUG_MEMORY_INIT */
141 /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
142 #if defined(CONFIG_SPARSEMEM)
143 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn
,
144 unsigned long *end_pfn
);
146 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn
,
147 unsigned long *end_pfn
)
150 #endif /* CONFIG_SPARSEMEM */