1 /* calibrate.c: default delay calibration
3 * Excised from init/main.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/jiffies.h>
8 #include <linux/delay.h>
9 #include <linux/init.h>
10 #include <linux/timex.h>
11 #include <linux/smp.h>
12 #include <linux/percpu.h>
14 unsigned long lpj_fine
;
15 unsigned long preset_lpj
;
16 static int __init
lpj_setup(char *str
)
18 preset_lpj
= simple_strtoul(str
,NULL
,0);
22 __setup("lpj=", lpj_setup
);
24 #ifdef ARCH_HAS_READ_CURRENT_TIMER
26 /* This routine uses the read_current_timer() routine and gets the
27 * loops per jiffy directly, instead of guessing it using delay().
28 * Also, this code tries to handle non-maskable asynchronous events
31 #define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100))
32 #define MAX_DIRECT_CALIBRATION_RETRIES 5
34 static unsigned long calibrate_delay_direct(void)
36 unsigned long pre_start
, start
, post_start
;
37 unsigned long pre_end
, end
, post_end
;
38 unsigned long start_jiffies
;
39 unsigned long timer_rate_min
, timer_rate_max
;
40 unsigned long good_timer_sum
= 0;
41 unsigned long good_timer_count
= 0;
42 unsigned long measured_times
[MAX_DIRECT_CALIBRATION_RETRIES
];
43 int max
= -1; /* index of measured_times with max/min values or not set */
47 if (read_current_timer(&pre_start
) < 0 )
52 * while ( jiffies < start_jiffies+1)
53 * start = read_current_timer();
54 * will not do. As we don't really know whether jiffy switch
55 * happened first or timer_value was read first. And some asynchronous
56 * event can happen between these two events introducing errors in lpj.
59 * 1. pre_start <- When we are sure that jiffy switch hasn't happened
60 * 2. check jiffy switch
61 * 3. start <- timer value before or after jiffy switch
62 * 4. post_start <- When we are sure that jiffy switch has happened
64 * Note, we don't know anything about order of 2 and 3.
65 * Now, by looking at post_start and pre_start difference, we can
66 * check whether any asynchronous event happened or not
69 for (i
= 0; i
< MAX_DIRECT_CALIBRATION_RETRIES
; i
++) {
71 read_current_timer(&start
);
72 start_jiffies
= jiffies
;
73 while (time_before_eq(jiffies
, start_jiffies
+ 1)) {
75 read_current_timer(&start
);
77 read_current_timer(&post_start
);
81 while (time_before_eq(jiffies
, start_jiffies
+ 1 +
82 DELAY_CALIBRATION_TICKS
)) {
84 read_current_timer(&end
);
86 read_current_timer(&post_end
);
88 timer_rate_max
= (post_end
- pre_start
) /
89 DELAY_CALIBRATION_TICKS
;
90 timer_rate_min
= (pre_end
- post_start
) /
91 DELAY_CALIBRATION_TICKS
;
94 * If the upper limit and lower limit of the timer_rate is
95 * >= 12.5% apart, redo calibration.
97 if (start
>= post_end
)
98 printk(KERN_NOTICE
"calibrate_delay_direct() ignoring "
99 "timer_rate as we had a TSC wrap around"
100 " start=%lu >=post_end=%lu\n",
102 if (start
< post_end
&& pre_start
!= 0 && pre_end
!= 0 &&
103 (timer_rate_max
- timer_rate_min
) < (timer_rate_max
>> 3)) {
105 good_timer_sum
+= timer_rate_max
;
106 measured_times
[i
] = timer_rate_max
;
107 if (max
< 0 || timer_rate_max
> measured_times
[max
])
109 if (min
< 0 || timer_rate_max
< measured_times
[min
])
112 measured_times
[i
] = 0;
117 * Find the maximum & minimum - if they differ too much throw out the
118 * one with the largest difference from the mean and try again...
120 while (good_timer_count
> 1) {
121 unsigned long estimate
;
122 unsigned long maxdiff
;
124 /* compute the estimate */
125 estimate
= (good_timer_sum
/good_timer_count
);
126 maxdiff
= estimate
>> 3;
128 /* if range is within 12% let's take it */
129 if ((measured_times
[max
] - measured_times
[min
]) < maxdiff
)
132 /* ok - drop the worse value and try again... */
134 good_timer_count
= 0;
135 if ((measured_times
[max
] - estimate
) <
136 (estimate
- measured_times
[min
])) {
137 printk(KERN_NOTICE
"calibrate_delay_direct() dropping "
138 "min bogoMips estimate %d = %lu\n",
139 min
, measured_times
[min
]);
140 measured_times
[min
] = 0;
143 printk(KERN_NOTICE
"calibrate_delay_direct() dropping "
144 "max bogoMips estimate %d = %lu\n",
145 max
, measured_times
[max
]);
146 measured_times
[max
] = 0;
150 for (i
= 0; i
< MAX_DIRECT_CALIBRATION_RETRIES
; i
++) {
151 if (measured_times
[i
] == 0)
154 good_timer_sum
+= measured_times
[i
];
155 if (measured_times
[i
] < measured_times
[min
])
157 if (measured_times
[i
] > measured_times
[max
])
163 printk(KERN_NOTICE
"calibrate_delay_direct() failed to get a good "
164 "estimate for loops_per_jiffy.\nProbably due to long platform "
165 "interrupts. Consider using \"lpj=\" boot option.\n");
169 static unsigned long calibrate_delay_direct(void)
176 * This is the number of bits of precision for the loops_per_jiffy. Each
177 * time we refine our estimate after the first takes 1.5/HZ seconds, so try
178 * to start with a good estimate.
179 * For the boot cpu we can skip the delay calibration and assign it a value
180 * calculated based on the timer frequency.
181 * For the rest of the CPUs we cannot assume that the timer frequency is same as
182 * the cpu frequency, hence do the calibration for those.
186 static unsigned long calibrate_delay_converge(void)
188 /* First stage - slowly accelerate to find initial bounds */
189 unsigned long lpj
, lpj_base
, ticks
, loopadd
, loopadd_base
, chop_limit
;
190 int trials
= 0, band
= 0, trial_in_band
= 0;
194 /* wait for "start of" clock tick */
196 while (ticks
== jiffies
)
201 if (++trial_in_band
== (1<<band
)) {
207 } while (ticks
== jiffies
);
209 * We overshot, so retreat to a clear underestimate. Then estimate
210 * the largest likely undershoot. This defines our chop bounds.
213 loopadd_base
= lpj
* band
;
214 lpj_base
= lpj
* trials
;
218 loopadd
= loopadd_base
;
221 * Do a binary approximation to get lpj set to
222 * equal one clock (up to LPS_PREC bits)
224 chop_limit
= lpj
>> LPS_PREC
;
225 while (loopadd
> chop_limit
) {
228 while (ticks
== jiffies
)
232 if (jiffies
!= ticks
) /* longer than 1 tick */
237 * If we incremented every single time possible, presume we've
238 * massively underestimated initially, and retry with a higher
239 * start, and larger range. (Only seen on x86_64, due to SMIs)
241 if (lpj
+ loopadd
* 2 == lpj_base
+ loopadd_base
* 2) {
250 static DEFINE_PER_CPU(unsigned long, cpu_loops_per_jiffy
) = { 0 };
253 * Check if cpu calibration delay is already known. For example,
254 * some processors with multi-core sockets may have all cores
255 * with the same calibration delay.
257 * Architectures should override this function if a faster calibration
258 * method is available.
260 unsigned long __attribute__((weak
)) calibrate_delay_is_known(void)
265 void calibrate_delay(void)
269 int this_cpu
= smp_processor_id();
271 if (per_cpu(cpu_loops_per_jiffy
, this_cpu
)) {
272 lpj
= per_cpu(cpu_loops_per_jiffy
, this_cpu
);
274 pr_info("Calibrating delay loop (skipped) "
275 "already calibrated this CPU");
276 } else if (preset_lpj
) {
279 pr_info("Calibrating delay loop (skipped) "
281 } else if ((!printed
) && lpj_fine
) {
283 pr_info("Calibrating delay loop (skipped), "
284 "value calculated using timer frequency.. ");
285 } else if ((lpj
= calibrate_delay_is_known())) {
287 } else if ((lpj
= calibrate_delay_direct()) != 0) {
289 pr_info("Calibrating delay using timer "
290 "specific routine.. ");
293 pr_info("Calibrating delay loop... ");
294 lpj
= calibrate_delay_converge();
296 per_cpu(cpu_loops_per_jiffy
, this_cpu
) = lpj
;
298 pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
300 (lpj
/(5000/HZ
)) % 100, lpj
);
302 loops_per_jiffy
= lpj
;