kernel - Fix some rare pmap races in i386 and x86_64.
[dragonfly.git] / sys / platform / pc32 / i386 / cpufreq_machdep.c
blob21a48eca357b585d3396069fca67fd99f60c71cf
1 /*
2 * Copyright (c) 2004 Martin V\xe9giard.
3 * Copyright (c) 2004-2005 Bruno Ducrot
4 * Copyright (c) 2004 FUKUDA Nobuhiko <nfukuda@spa.is.uec.ac.jp>
5 * Copyright (c) 2004, 2006 The NetBSD Foundation, Inc.
6 * All rights reserved.
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Juan Romero Pardines and Martin Vegiard.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/param.h>
32 #include <sys/systm.h>
34 #include <machine/cpufunc.h>
35 #include <machine/cpufreq.h>
37 #define AMD0F_MSR_FIDVID_CTL 0xc0010041
38 #define AMD0F_MSR_FIDVID_STATUS 0xc0010042
40 /* AMD0F_MSR_FIDVID_STATUS */
41 #define AMD0F_STA_CFID(x) ((x) & 0x3f)
42 #define AMD0F_STA_SFID(x) (((x) >> 8) & 0x3f)
43 #define AMD0F_STA_MFID(x) (((x) >> 16) & 0x3f)
44 #define AMD0F_STA_PENDING(x) (((x) >> 31) & 0x01)
45 #define AMD0F_STA_CVID(x) (((x) >> 32) & 0x1f)
46 #define AMD0F_STA_SVID(x) (((x) >> 40) & 0x1f)
47 #define AMD0F_STA_MVID(x) (((x) >> 48) & 0x1f)
49 #define AMD0F_WRITE_FIDVID(fid, vid, ctrl) \
50 wrmsr(AMD0F_MSR_FIDVID_CTL, \
51 (((ctrl) << 32) | (1ULL << 16) | ((vid) << 8) | (fid)))
53 #define AMD0F_WAIT_FIDVID_CHG(status) \
54 do { \
55 (status) = rdmsr(AMD0F_MSR_FIDVID_STATUS); \
56 } while (AMD0F_STA_PENDING(status))
58 #define AMD0F_FID2VCO(fid) \
59 (((fid) < 8) ? (8 + ((fid) << 1)) : (fid))
61 #define AMD0F_DELAY_VST(vst) DELAY(20 * (vst))
62 #define AMD0F_DELAY_IRT(irt) DELAY(10 * (1 << (irt)))
64 /* XXX */
65 #define abs(x) ((x) < 0 ? -(x) : (x))
67 int
68 amd0f_set_fidvid(const struct amd0f_fidvid *fv, const struct amd0f_xsit *xsit)
70 uint32_t val, cfid, cvid, tvid;
71 uint64_t status;
74 * We don't wait change pending bit here, need to ensure
75 * that it isn't stuck.
77 status = rdmsr(AMD0F_MSR_FIDVID_STATUS);
78 if (AMD0F_STA_PENDING(status))
79 return EBUSY;
81 cfid = AMD0F_STA_CFID(status);
82 cvid = AMD0F_STA_CVID(status);
83 if (fv->fid == cfid && fv->vid == cvid)
84 return 0;
87 * Phase 1: Raise core voltage to the TargetVID
89 if ((fv->fid & ~0x1) > (cfid & ~0x1) || cvid > fv->vid) {
90 KKASSERT(fv->vid >= xsit->rvo);
91 tvid = fv->vid - xsit->rvo;
92 } else {
93 KKASSERT(cvid >= xsit->rvo);
94 tvid = cvid - xsit->rvo;
96 while (cvid > tvid) {
97 if (cvid > (1 << xsit->mvs))
98 val = cvid - (1 << xsit->mvs);
99 else
100 val = 0;
101 AMD0F_WRITE_FIDVID(cfid, val, 0ULL);
102 AMD0F_WAIT_FIDVID_CHG(status);
103 cvid = AMD0F_STA_CVID(status);
104 AMD0F_DELAY_VST(xsit->vst);
108 * Phase 2: Change to requested core frequency
110 if (cfid != fv->fid) {
111 /* NOTE: Keep type as int, else following 'abs' will break */
112 int vco_fid, vco_cfid;
114 vco_fid = AMD0F_FID2VCO(fv->fid);
115 vco_cfid = AMD0F_FID2VCO(cfid);
116 while (abs(vco_fid - vco_cfid) > 2) {
117 if (fv->fid > cfid) {
118 if (cfid > 6)
119 val = cfid + 2;
120 else
121 val = AMD0F_FID2VCO(cfid) + 2;
122 } else {
123 KKASSERT(cfid >= 2);
124 val = cfid - 2;
126 AMD0F_WRITE_FIDVID(val, cvid,
127 (uint64_t)xsit->pll_time * 1000 / 5);
128 AMD0F_WAIT_FIDVID_CHG(status);
129 cfid = AMD0F_STA_CFID(status);
130 AMD0F_DELAY_IRT(xsit->irt);
131 vco_cfid = AMD0F_FID2VCO(cfid);
133 if (cfid != fv->fid) {
134 AMD0F_WRITE_FIDVID(fv->fid, cvid,
135 (uint64_t)xsit->pll_time * 1000 / 5);
136 AMD0F_WAIT_FIDVID_CHG(status);
137 cfid = AMD0F_STA_CFID(status);
138 AMD0F_DELAY_IRT(xsit->irt);
143 * Phase 3: Change to requested voltage
145 if (cvid != fv->vid) {
146 AMD0F_WRITE_FIDVID(cfid, fv->vid, 0ULL);
147 AMD0F_WAIT_FIDVID_CHG(status);
148 cvid = AMD0F_STA_CVID(status);
149 AMD0F_DELAY_VST(xsit->vst);
151 return 0;
155 amd0f_get_fidvid(struct amd0f_fidvid *fv)
157 uint64_t status;
159 status = rdmsr(AMD0F_MSR_FIDVID_STATUS);
160 if (AMD0F_STA_PENDING(status))
161 return EBUSY;
163 fv->fid = AMD0F_STA_CFID(status);
164 fv->vid = AMD0F_STA_CVID(status);
165 return 0;
168 void
169 amd0f_fidvid_limit(struct amd0f_fidvid *fv_min, struct amd0f_fidvid *fv_max)
171 uint32_t max_fid, max_vid, start_fid, start_vid;
172 uint64_t status;
174 status = rdmsr(AMD0F_MSR_FIDVID_STATUS);
176 start_fid = AMD0F_STA_SFID(status);
177 max_fid = AMD0F_STA_MFID(status);
178 start_vid = AMD0F_STA_SVID(status);
179 max_vid = AMD0F_STA_MVID(status);
181 if (max_fid == 0x2a && max_vid != 0) {
182 fv_max->fid = start_fid + 0xa;
183 fv_max->vid = max_vid + 0x2;
184 fv_min->fid = 0x2;
185 fv_min->vid = start_vid;
186 } else {
187 fv_max->fid = max_fid;
188 fv_max->vid = max_vid + 0x2;
189 fv_min->fid = start_fid;
190 fv_min->vid = start_vid;