x86, delay: tsc based udelay should have rdtsc_barrier
[linux-2.6/mini2440.git] / arch / x86 / lib / msr.c
blob1440b9c0547e9bd668a7bedf683686c57f5d1db0
1 #include <linux/module.h>
2 #include <linux/preempt.h>
3 #include <linux/smp.h>
4 #include <asm/msr.h>
6 struct msr_info {
7 u32 msr_no;
8 struct msr reg;
9 struct msr *msrs;
10 int off;
11 int err;
14 static void __rdmsr_on_cpu(void *info)
16 struct msr_info *rv = info;
17 struct msr *reg;
18 int this_cpu = raw_smp_processor_id();
20 if (rv->msrs)
21 reg = &rv->msrs[this_cpu - rv->off];
22 else
23 reg = &rv->reg;
25 rdmsr(rv->msr_no, reg->l, reg->h);
28 static void __wrmsr_on_cpu(void *info)
30 struct msr_info *rv = info;
31 struct msr *reg;
32 int this_cpu = raw_smp_processor_id();
34 if (rv->msrs)
35 reg = &rv->msrs[this_cpu - rv->off];
36 else
37 reg = &rv->reg;
39 wrmsr(rv->msr_no, reg->l, reg->h);
42 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
44 int err;
45 struct msr_info rv;
47 memset(&rv, 0, sizeof(rv));
49 rv.msr_no = msr_no;
50 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
51 *l = rv.reg.l;
52 *h = rv.reg.h;
54 return err;
56 EXPORT_SYMBOL(rdmsr_on_cpu);
58 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
60 int err;
61 struct msr_info rv;
63 memset(&rv, 0, sizeof(rv));
65 rv.msr_no = msr_no;
66 rv.reg.l = l;
67 rv.reg.h = h;
68 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
70 return err;
72 EXPORT_SYMBOL(wrmsr_on_cpu);
74 /* rdmsr on a bunch of CPUs
76 * @mask: which CPUs
77 * @msr_no: which MSR
78 * @msrs: array of MSR values
81 void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
83 struct msr_info rv;
84 int this_cpu;
86 memset(&rv, 0, sizeof(rv));
88 rv.off = cpumask_first(mask);
89 rv.msrs = msrs;
90 rv.msr_no = msr_no;
92 preempt_disable();
94 * FIXME: handle the CPU we're executing on separately for now until
95 * smp_call_function_many has been fixed to not skip it.
97 this_cpu = raw_smp_processor_id();
98 smp_call_function_single(this_cpu, __rdmsr_on_cpu, &rv, 1);
100 smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
101 preempt_enable();
103 EXPORT_SYMBOL(rdmsr_on_cpus);
106 * wrmsr on a bunch of CPUs
108 * @mask: which CPUs
109 * @msr_no: which MSR
110 * @msrs: array of MSR values
113 void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
115 struct msr_info rv;
116 int this_cpu;
118 memset(&rv, 0, sizeof(rv));
120 rv.off = cpumask_first(mask);
121 rv.msrs = msrs;
122 rv.msr_no = msr_no;
124 preempt_disable();
126 * FIXME: handle the CPU we're executing on separately for now until
127 * smp_call_function_many has been fixed to not skip it.
129 this_cpu = raw_smp_processor_id();
130 smp_call_function_single(this_cpu, __wrmsr_on_cpu, &rv, 1);
132 smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
133 preempt_enable();
135 EXPORT_SYMBOL(wrmsr_on_cpus);
137 /* These "safe" variants are slower and should be used when the target MSR
138 may not actually exist. */
139 static void __rdmsr_safe_on_cpu(void *info)
141 struct msr_info *rv = info;
143 rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
146 static void __wrmsr_safe_on_cpu(void *info)
148 struct msr_info *rv = info;
150 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
153 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
155 int err;
156 struct msr_info rv;
158 memset(&rv, 0, sizeof(rv));
160 rv.msr_no = msr_no;
161 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
162 *l = rv.reg.l;
163 *h = rv.reg.h;
165 return err ? err : rv.err;
167 EXPORT_SYMBOL(rdmsr_safe_on_cpu);
169 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
171 int err;
172 struct msr_info rv;
174 memset(&rv, 0, sizeof(rv));
176 rv.msr_no = msr_no;
177 rv.reg.l = l;
178 rv.reg.h = h;
179 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
181 return err ? err : rv.err;
183 EXPORT_SYMBOL(wrmsr_safe_on_cpu);