4 * Copyright (C) 1999 VA Linux Systems
5 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
6 * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
7 * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
33 #include <acpi/pdc_intel.h>
35 #include <linux/init.h>
36 #include <linux/numa.h>
37 #include <asm/system.h>
40 #define COMPILER_DEPENDENT_INT64 long
41 #define COMPILER_DEPENDENT_UINT64 unsigned long
44 * Calling conventions:
46 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
47 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
48 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
49 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
51 #define ACPI_SYSTEM_XFACE
52 #define ACPI_EXTERNAL_XFACE
53 #define ACPI_INTERNAL_XFACE
54 #define ACPI_INTERNAL_VAR_XFACE
58 #define ACPI_ASM_MACROS
60 #define ACPI_DISABLE_IRQS() local_irq_disable()
61 #define ACPI_ENABLE_IRQS() local_irq_enable()
62 #define ACPI_FLUSH_CPU_CACHE()
65 ia64_acpi_acquire_global_lock (unsigned int *lock
)
67 unsigned int old
, new, val
;
70 new = (((old
& ~0x3) + 2) + ((old
>> 1) & 0x1));
71 val
= ia64_cmpxchg4_acq(lock
, new, old
);
72 } while (unlikely (val
!= old
));
73 return (new < 3) ? -1 : 0;
77 ia64_acpi_release_global_lock (unsigned int *lock
)
79 unsigned int old
, new, val
;
83 val
= ia64_cmpxchg4_acq(lock
, new, old
);
84 } while (unlikely (val
!= old
));
88 #define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
89 ((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
91 #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
92 ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
94 #define acpi_disabled 0 /* ACPI always enabled on IA64 */
95 #define acpi_noirq 0 /* ACPI always enabled on IA64 */
96 #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
97 #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
98 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
99 static inline void disable_acpi(void) { }
101 const char *acpi_get_sysname (void);
102 int acpi_request_vector (u32 int_type
);
103 int acpi_gsi_to_irq (u32 gsi
, unsigned int *irq
);
105 /* routines for saving/restoring kernel state */
106 extern int acpi_save_state_mem(void);
107 extern void acpi_restore_state_mem(void);
108 extern unsigned long acpi_wakeup_address
;
111 * Record the cpei override flag and current logical cpu. This is
112 * useful for CPU removal.
114 extern unsigned int can_cpei_retarget(void);
115 extern unsigned int is_cpu_cpei_target(unsigned int cpu
);
116 extern void set_cpei_target_cpu(unsigned int cpu
);
117 extern unsigned int get_cpei_target_cpu(void);
118 extern void prefill_possible_map(void);
119 #ifdef CONFIG_ACPI_HOTPLUG_CPU
120 extern int additional_cpus
;
122 #define additional_cpus 0
125 #ifdef CONFIG_ACPI_NUMA
126 #if MAX_NUMNODES > 256
127 #define MAX_PXM_DOMAINS MAX_NUMNODES
129 #define MAX_PXM_DOMAINS (256)
131 extern int __devinitdata pxm_to_nid_map
[MAX_PXM_DOMAINS
];
132 extern int __initdata nid_to_pxm_map
[MAX_NUMNODES
];
135 #define acpi_unlazy_tlb(x)
137 #ifdef CONFIG_ACPI_NUMA
138 extern cpumask_t early_cpu_possible_map
;
139 #define for_each_possible_early_cpu(cpu) \
140 for_each_cpu_mask((cpu), early_cpu_possible_map)
142 static inline void per_cpu_scan_finalize(int min_cpus
, int reserve_cpus
)
144 int low_cpu
, high_cpu
;
148 low_cpu
= cpus_weight(early_cpu_possible_map
);
150 high_cpu
= max(low_cpu
, min_cpus
);
151 high_cpu
= min(high_cpu
+ reserve_cpus
, NR_CPUS
);
153 for (cpu
= low_cpu
; cpu
< high_cpu
; cpu
++) {
154 cpu_set(cpu
, early_cpu_possible_map
);
155 if (node_cpuid
[cpu
].nid
== NUMA_NO_NODE
) {
156 node_cpuid
[cpu
].nid
= next_nid
;
158 if (next_nid
>= num_online_nodes())
163 #endif /* CONFIG_ACPI_NUMA */
165 #endif /*__KERNEL__*/
167 #endif /*_ASM_ACPI_H*/