md: fix 'safemode' handling for external metadata.
[linux-2.6/mini2440.git] / include / asm-x86 / topology.h
blob4f35a0fb4f22ac5ba6d459a45f4eb8971eb50cac
1 /*
2 * Written by: Matthew Dobson, IBM Corporation
4 * Copyright (C) 2002, IBM Corp.
6 * All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * Send feedback to <colpatch@us.ibm.com>
25 #ifndef _ASM_X86_TOPOLOGY_H
26 #define _ASM_X86_TOPOLOGY_H
28 #ifdef CONFIG_NUMA
29 #include <linux/cpumask.h>
30 #include <asm/mpspec.h>
32 /* Mappings between logical cpu number and node number */
33 #ifdef CONFIG_X86_32
34 extern int cpu_to_node_map[];
35 #else
36 /* Returns the number of the current Node. */
37 #define numa_node_id() (early_cpu_to_node(raw_smp_processor_id()))
38 #endif
40 DECLARE_PER_CPU(int, x86_cpu_to_node_map);
42 #ifdef CONFIG_SMP
43 extern int x86_cpu_to_node_map_init[];
44 extern void *x86_cpu_to_node_map_early_ptr;
45 #else
46 #define x86_cpu_to_node_map_early_ptr NULL
47 #endif
49 extern cpumask_t node_to_cpumask_map[];
51 #define NUMA_NO_NODE (-1)
53 /* Returns the number of the node containing CPU 'cpu' */
54 #ifdef CONFIG_X86_32
55 #define early_cpu_to_node(cpu) cpu_to_node(cpu)
56 static inline int cpu_to_node(int cpu)
58 return cpu_to_node_map[cpu];
61 #else /* CONFIG_X86_64 */
63 #ifdef CONFIG_SMP
64 static inline int early_cpu_to_node(int cpu)
66 int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr;
68 if (cpu_to_node_map)
69 return cpu_to_node_map[cpu];
70 else if (per_cpu_offset(cpu))
71 return per_cpu(x86_cpu_to_node_map, cpu);
72 else
73 return NUMA_NO_NODE;
75 #else
76 #define early_cpu_to_node(cpu) cpu_to_node(cpu)
77 #endif
79 static inline int cpu_to_node(int cpu)
81 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
82 if (x86_cpu_to_node_map_early_ptr) {
83 printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n",
84 (int)cpu);
85 dump_stack();
86 return ((int *)x86_cpu_to_node_map_early_ptr)[cpu];
88 #endif
89 return per_cpu(x86_cpu_to_node_map, cpu);
92 #ifdef CONFIG_NUMA
94 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */
95 #define node_to_cpumask_ptr(v, node) \
96 cpumask_t *v = &(node_to_cpumask_map[node])
98 #define node_to_cpumask_ptr_next(v, node) \
99 v = &(node_to_cpumask_map[node])
100 #endif
102 #endif /* CONFIG_X86_64 */
105 * Returns the number of the node containing Node 'node'. This
106 * architecture is flat, so it is a pretty simple function!
108 #define parent_node(node) (node)
110 /* Returns a bitmask of CPUs on Node 'node'. */
111 static inline cpumask_t node_to_cpumask(int node)
113 return node_to_cpumask_map[node];
116 /* Returns the number of the first CPU on Node 'node'. */
117 static inline int node_to_first_cpu(int node)
119 cpumask_t mask = node_to_cpumask(node);
121 return first_cpu(mask);
124 #define pcibus_to_node(bus) __pcibus_to_node(bus)
125 #define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus)
127 #ifdef CONFIG_X86_32
128 extern unsigned long node_start_pfn[];
129 extern unsigned long node_end_pfn[];
130 extern unsigned long node_remap_size[];
131 #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid])
133 # ifdef CONFIG_X86_HT
134 # define ENABLE_TOPO_DEFINES
135 # endif
137 # define SD_CACHE_NICE_TRIES 1
138 # define SD_IDLE_IDX 1
139 # define SD_NEWIDLE_IDX 2
140 # define SD_FORKEXEC_IDX 0
142 #else
144 # ifdef CONFIG_SMP
145 # define ENABLE_TOPO_DEFINES
146 # endif
148 # define SD_CACHE_NICE_TRIES 2
149 # define SD_IDLE_IDX 2
150 # define SD_NEWIDLE_IDX 2
151 # define SD_FORKEXEC_IDX 1
153 #endif
155 /* sched_domains SD_NODE_INIT for NUMAQ machines */
156 #define SD_NODE_INIT (struct sched_domain) { \
157 .min_interval = 8, \
158 .max_interval = 32, \
159 .busy_factor = 32, \
160 .imbalance_pct = 125, \
161 .cache_nice_tries = SD_CACHE_NICE_TRIES, \
162 .busy_idx = 3, \
163 .idle_idx = SD_IDLE_IDX, \
164 .newidle_idx = SD_NEWIDLE_IDX, \
165 .wake_idx = 1, \
166 .forkexec_idx = SD_FORKEXEC_IDX, \
167 .flags = SD_LOAD_BALANCE \
168 | SD_BALANCE_EXEC \
169 | SD_BALANCE_FORK \
170 | SD_SERIALIZE \
171 | SD_WAKE_BALANCE, \
172 .last_balance = jiffies, \
173 .balance_interval = 1, \
176 #ifdef CONFIG_X86_64_ACPI_NUMA
177 extern int __node_distance(int, int);
178 #define node_distance(a, b) __node_distance(a, b)
179 #endif
181 #else /* CONFIG_NUMA */
183 #endif
185 #include <asm-generic/topology.h>
187 extern cpumask_t cpu_coregroup_map(int cpu);
189 #ifdef ENABLE_TOPO_DEFINES
190 #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
191 #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
192 #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
193 #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
194 #endif
196 static inline void arch_fix_phys_package_id(int num, u32 slot)
200 struct pci_bus;
201 void set_pci_bus_resources_arch_default(struct pci_bus *b);
203 #ifdef CONFIG_SMP
204 #define mc_capable() (boot_cpu_data.x86_max_cores > 1)
205 #define smt_capable() (smp_num_siblings > 1)
206 #endif
208 #ifdef CONFIG_NUMA
209 extern int get_mp_bus_to_node(int busnum);
210 extern void set_mp_bus_to_node(int busnum, int node);
211 #else
212 static inline int get_mp_bus_to_node(int busnum)
214 return 0;
216 static inline void set_mp_bus_to_node(int busnum, int node)
219 #endif
221 #endif