x86/xen: Fix xen head ELF annotations
[linux-2.6/btrfs-unstable.git] / mm / percpu-internal.h
blob7065faf74b46b20e5f9647cd09d228ad307d711f
1 #ifndef _MM_PERCPU_INTERNAL_H
2 #define _MM_PERCPU_INTERNAL_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
7 /*
8 * pcpu_block_md is the metadata block struct.
9 * Each chunk's bitmap is split into a number of full blocks.
10 * All units are in terms of bits.
12 struct pcpu_block_md {
13 int contig_hint; /* contig hint for block */
14 int contig_hint_start; /* block relative starting
15 position of the contig hint */
16 int left_free; /* size of free space along
17 the left side of the block */
18 int right_free; /* size of free space along
19 the right side of the block */
20 int first_free; /* block position of first free */
23 struct pcpu_chunk {
24 #ifdef CONFIG_PERCPU_STATS
25 int nr_alloc; /* # of allocations */
26 size_t max_alloc_size; /* largest allocation size */
27 #endif
29 struct list_head list; /* linked to pcpu_slot lists */
30 int free_bytes; /* free bytes in the chunk */
31 int contig_bits; /* max contiguous size hint */
32 int contig_bits_start; /* contig_bits starting
33 offset */
34 void *base_addr; /* base address of this chunk */
36 unsigned long *alloc_map; /* allocation map */
37 unsigned long *bound_map; /* boundary map */
38 struct pcpu_block_md *md_blocks; /* metadata blocks */
40 void *data; /* chunk data */
41 int first_bit; /* no free below this */
42 bool immutable; /* no [de]population allowed */
43 int start_offset; /* the overlap with the previous
44 region to have a page aligned
45 base_addr */
46 int end_offset; /* additional area required to
47 have the region end page
48 aligned */
50 int nr_pages; /* # of pages served by this chunk */
51 int nr_populated; /* # of populated pages */
52 int nr_empty_pop_pages; /* # of empty populated pages */
53 unsigned long populated[]; /* populated bitmap */
56 extern spinlock_t pcpu_lock;
58 extern struct list_head *pcpu_slot;
59 extern int pcpu_nr_slots;
60 extern int pcpu_nr_empty_pop_pages;
62 extern struct pcpu_chunk *pcpu_first_chunk;
63 extern struct pcpu_chunk *pcpu_reserved_chunk;
65 /**
66 * pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks
67 * @chunk: chunk of interest
69 * This conversion is from the number of physical pages that the chunk
70 * serves to the number of bitmap blocks used.
72 static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk)
74 return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
77 /**
78 * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap
79 * @pages: number of physical pages
81 * This conversion is from physical pages to the number of bits
82 * required in the bitmap.
84 static inline int pcpu_nr_pages_to_map_bits(int pages)
86 return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
89 /**
90 * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap
91 * @chunk: chunk of interest
93 * This conversion is from the number of physical pages that the chunk
94 * serves to the number of bits in the bitmap.
96 static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
98 return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
101 #ifdef CONFIG_PERCPU_STATS
103 #include <linux/spinlock.h>
105 struct percpu_stats {
106 u64 nr_alloc; /* lifetime # of allocations */
107 u64 nr_dealloc; /* lifetime # of deallocations */
108 u64 nr_cur_alloc; /* current # of allocations */
109 u64 nr_max_alloc; /* max # of live allocations */
110 u32 nr_chunks; /* current # of live chunks */
111 u32 nr_max_chunks; /* max # of live chunks */
112 size_t min_alloc_size; /* min allocaiton size */
113 size_t max_alloc_size; /* max allocation size */
116 extern struct percpu_stats pcpu_stats;
117 extern struct pcpu_alloc_info pcpu_stats_ai;
120 * For debug purposes. We don't care about the flexible array.
122 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
124 memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
126 /* initialize min_alloc_size to unit_size */
127 pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
131 * pcpu_stats_area_alloc - increment area allocation stats
132 * @chunk: the location of the area being allocated
133 * @size: size of area to allocate in bytes
135 * CONTEXT:
136 * pcpu_lock.
138 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
140 lockdep_assert_held(&pcpu_lock);
142 pcpu_stats.nr_alloc++;
143 pcpu_stats.nr_cur_alloc++;
144 pcpu_stats.nr_max_alloc =
145 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
146 pcpu_stats.min_alloc_size =
147 min(pcpu_stats.min_alloc_size, size);
148 pcpu_stats.max_alloc_size =
149 max(pcpu_stats.max_alloc_size, size);
151 chunk->nr_alloc++;
152 chunk->max_alloc_size = max(chunk->max_alloc_size, size);
156 * pcpu_stats_area_dealloc - decrement allocation stats
157 * @chunk: the location of the area being deallocated
159 * CONTEXT:
160 * pcpu_lock.
162 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
164 lockdep_assert_held(&pcpu_lock);
166 pcpu_stats.nr_dealloc++;
167 pcpu_stats.nr_cur_alloc--;
169 chunk->nr_alloc--;
173 * pcpu_stats_chunk_alloc - increment chunk stats
175 static inline void pcpu_stats_chunk_alloc(void)
177 unsigned long flags;
178 spin_lock_irqsave(&pcpu_lock, flags);
180 pcpu_stats.nr_chunks++;
181 pcpu_stats.nr_max_chunks =
182 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
184 spin_unlock_irqrestore(&pcpu_lock, flags);
188 * pcpu_stats_chunk_dealloc - decrement chunk stats
190 static inline void pcpu_stats_chunk_dealloc(void)
192 unsigned long flags;
193 spin_lock_irqsave(&pcpu_lock, flags);
195 pcpu_stats.nr_chunks--;
197 spin_unlock_irqrestore(&pcpu_lock, flags);
200 #else
202 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
206 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
210 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
214 static inline void pcpu_stats_chunk_alloc(void)
218 static inline void pcpu_stats_chunk_dealloc(void)
222 #endif /* !CONFIG_PERCPU_STATS */
224 #endif