2 * NTP client/server, based on OpenNTPD 3.9p1
4 * Busybox port author: Adam Tkac (C) 2009 <vonsch@gmail.com>
6 * OpenNTPd 3.9p1 copyright holders:
7 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
8 * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
10 * OpenNTPd code is licensed under ISC-style licence:
12 * Permission to use, copy, modify, and distribute this software for any
13 * purpose with or without fee is hereby granted, provided that the above
14 * copyright notice and this permission notice appear in all copies.
16 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
17 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
19 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
20 * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER
21 * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
22 * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ***********************************************************************
25 * Parts of OpenNTPD clock syncronization code is replaced by
26 * code which is based on ntp-4.2.6, which carries the following
29 * Copyright (c) University of Delaware 1992-2009
31 * Permission to use, copy, modify, and distribute this software and
32 * its documentation for any purpose with or without fee is hereby
33 * granted, provided that the above copyright notice appears in all
34 * copies and that both the copyright notice and this permission
35 * notice appear in supporting documentation, and that the name
36 * University of Delaware not be used in advertising or publicity
37 * pertaining to distribution of the software without specific,
38 * written prior permission. The University of Delaware makes no
39 * representations about the suitability this software for any
40 * purpose. It is provided "as is" without express or implied warranty.
41 ***********************************************************************
44 //usage:#define ntpd_trivial_usage
45 //usage: "[-dnqNw"IF_FEATURE_NTPD_SERVER("l -I IFACE")"] [-S PROG] [-p PEER]..."
46 //usage:#define ntpd_full_usage "\n\n"
47 //usage: "NTP client/server\n"
48 //usage: "\n -d Verbose"
49 //usage: "\n -n Do not daemonize"
50 //usage: "\n -q Quit after clock is set"
51 //usage: "\n -N Run at high priority"
52 //usage: "\n -w Do not set time (only query peers), implies -n"
53 //usage: "\n -S PROG Run PROG after stepping time, stratum change, and every 11 mins"
54 //usage: "\n -p PEER Obtain time from PEER (may be repeated)"
55 //usage: IF_FEATURE_NTPD_CONF(
56 //usage: "\n If -p is not given, 'server HOST' lines"
57 //usage: "\n from /etc/ntp.conf are used"
59 //usage: IF_FEATURE_NTPD_SERVER(
60 //usage: "\n -l Also run as server on port 123"
61 //usage: "\n -I IFACE Bind server to IFACE, implies -l"
64 // -l and -p options are not compatible with "standard" ntpd:
65 // it has them as "-l logfile" and "-p pidfile".
66 // -S and -w are not compat either, "standard" ntpd has no such opts.
70 #include <netinet/ip.h> /* For IPTOS_LOWDELAY definition */
71 #include <sys/resource.h> /* setpriority */
72 #include <sys/timex.h>
73 #ifndef IPTOS_LOWDELAY
74 # define IPTOS_LOWDELAY 0x10
78 /* Verbosity control (max level of -dddd options accepted).
79 * max 6 is very talkative (and bloated). 3 is non-bloated,
80 * production level setting.
85 /* High-level description of the algorithm:
87 * We start running with very small poll_exp, BURSTPOLL,
88 * in order to quickly accumulate INITIAL_SAMPLES datapoints
89 * for each peer. Then, time is stepped if the offset is larger
90 * than STEP_THRESHOLD, otherwise it isn't; anyway, we enlarge
91 * poll_exp to MINPOLL and enter frequency measurement step:
92 * we collect new datapoints but ignore them for WATCH_THRESHOLD
93 * seconds. After WATCH_THRESHOLD seconds we look at accumulated
94 * offset and estimate frequency drift.
96 * (frequency measurement step seems to not be strictly needed,
97 * it is conditionally disabled with USING_INITIAL_FREQ_ESTIMATION
100 * After this, we enter "steady state": we collect a datapoint,
101 * we select the best peer, if this datapoint is not a new one
102 * (IOW: if this datapoint isn't for selected peer), sleep
103 * and collect another one; otherwise, use its offset to update
104 * frequency drift, if offset is somewhat large, reduce poll_exp,
105 * otherwise increase poll_exp.
107 * If offset is larger than STEP_THRESHOLD, which shouldn't normally
108 * happen, we assume that something "bad" happened (computer
109 * was hibernated, someone set totally wrong date, etc),
110 * then the time is stepped, all datapoints are discarded,
111 * and we go back to steady state.
113 * Made some changes to speed up re-syncing after our clock goes bad
114 * (tested with suspending my laptop):
115 * - if largish offset (>= STEP_THRESHOLD == 1 sec) is seen
116 * from a peer, schedule next query for this peer soon
117 * without drastically lowering poll interval for everybody.
118 * This makes us collect enough data for step much faster:
119 * e.g. at poll = 10 (1024 secs), step was done within 5 minutes
120 * after first reply which indicated that our clock is 14 seconds off.
121 * - on step, do not discard d_dispersion data of the existing datapoints,
122 * do not clear reachable_bits. This prevents discarding first ~8
123 * datapoints after the step.
126 #define INITIAL_SAMPLES 4 /* how many samples do we want for init */
127 #define BAD_DELAY_GROWTH 4 /* drop packet if its delay grew by more than this */
129 #define RETRY_INTERVAL 32 /* on send/recv error, retry in N secs (need to be power of 2) */
130 #define NOREPLY_INTERVAL 512 /* sent, but got no reply: cap next query by this many seconds */
131 #define RESPONSE_INTERVAL 16 /* wait for reply up to N secs */
133 /* Step threshold (sec). std ntpd uses 0.128.
135 #define STEP_THRESHOLD 1
136 /* Slew threshold (sec): adjtimex() won't accept offsets larger than this.
137 * Using exact power of 2 (1/8) results in smaller code
139 #define SLEW_THRESHOLD 0.125
140 /* Stepout threshold (sec). std ntpd uses 900 (11 mins (!)) */
141 #define WATCH_THRESHOLD 128
142 /* NB: set WATCH_THRESHOLD to ~60 when debugging to save time) */
143 //UNUSED: #define PANIC_THRESHOLD 1000 /* panic threshold (sec) */
146 * If we got |offset| > BIGOFF from a peer, cap next query interval
147 * for this peer by this many seconds:
149 #define BIGOFF STEP_THRESHOLD
150 #define BIGOFF_INTERVAL (1 << 7) /* 128 s */
152 #define FREQ_TOLERANCE 0.000015 /* frequency tolerance (15 PPM) */
153 #define BURSTPOLL 0 /* initial poll */
154 #define MINPOLL 5 /* minimum poll interval. std ntpd uses 6 (6: 64 sec) */
156 * If offset > discipline_jitter * POLLADJ_GATE, and poll interval is > 2^BIGPOLL,
157 * then it is decreased _at once_. (If <= 2^BIGPOLL, it will be decreased _eventually_).
159 #define BIGPOLL 9 /* 2^9 sec ~= 8.5 min */
160 #define MAXPOLL 12 /* maximum poll interval (12: 1.1h, 17: 36.4h). std ntpd uses 17 */
162 * Actively lower poll when we see such big offsets.
163 * With SLEW_THRESHOLD = 0.125, it means we try to sync more aggressively
164 * if offset increases over ~0.04 sec
166 //#define POLLDOWN_OFFSET (SLEW_THRESHOLD / 3)
167 #define MINDISP 0.01 /* minimum dispersion (sec) */
168 #define MAXDISP 16 /* maximum dispersion (sec) */
169 #define MAXSTRAT 16 /* maximum stratum (infinity metric) */
170 #define MAXDIST 1 /* distance threshold (sec) */
171 #define MIN_SELECTED 1 /* minimum intersection survivors */
172 #define MIN_CLUSTERED 3 /* minimum cluster survivors */
174 #define MAXDRIFT 0.000500 /* frequency drift we can correct (500 PPM) */
176 /* Poll-adjust threshold.
177 * When we see that offset is small enough compared to discipline jitter,
178 * we grow a counter: += MINPOLL. When counter goes over POLLADJ_LIMIT,
179 * we poll_exp++. If offset isn't small, counter -= poll_exp*2,
180 * and when it goes below -POLLADJ_LIMIT, we poll_exp--.
181 * (Bumped from 30 to 40 since otherwise I often see poll_exp going *2* steps down)
183 #define POLLADJ_LIMIT 40
184 /* If offset < discipline_jitter * POLLADJ_GATE, then we decide to increase
185 * poll interval (we think we can't improve timekeeping
186 * by staying at smaller poll).
188 #define POLLADJ_GATE 4
189 #define TIMECONST_HACK_GATE 2
190 /* Compromise Allan intercept (sec). doc uses 1500, std ntpd uses 512 */
194 /* FLL loop gain [why it depends on MAXPOLL??] */
195 #define FLL (MAXPOLL + 1)
196 /* Parameter averaging constant */
205 NTP_MSGSIZE_NOAUTH
= 48,
206 NTP_MSGSIZE
= (NTP_MSGSIZE_NOAUTH
+ 4 + NTP_DIGESTSIZE
),
209 MODE_MASK
= (7 << 0),
210 VERSION_MASK
= (7 << 3),
214 /* Leap Second Codes (high order two bits of m_status) */
215 LI_NOWARNING
= (0 << 6), /* no warning */
216 LI_PLUSSEC
= (1 << 6), /* add a second (61 seconds) */
217 LI_MINUSSEC
= (2 << 6), /* minus a second (59 seconds) */
218 LI_ALARM
= (3 << 6), /* alarm condition */
221 MODE_RES0
= 0, /* reserved */
222 MODE_SYM_ACT
= 1, /* symmetric active */
223 MODE_SYM_PAS
= 2, /* symmetric passive */
224 MODE_CLIENT
= 3, /* client */
225 MODE_SERVER
= 4, /* server */
226 MODE_BROADCAST
= 5, /* broadcast */
227 MODE_RES1
= 6, /* reserved for NTP control message */
228 MODE_RES2
= 7, /* reserved for private use */
231 //TODO: better base selection
232 #define OFFSET_1900_1970 2208988800UL /* 1970 - 1900 in seconds */
234 #define NUM_DATAPOINTS 8
247 uint8_t m_status
; /* status of local clock and leap info */
249 uint8_t m_ppoll
; /* poll value */
250 int8_t m_precision_exp
;
251 s_fixedpt_t m_rootdelay
;
252 s_fixedpt_t m_rootdisp
;
254 l_fixedpt_t m_reftime
;
255 l_fixedpt_t m_orgtime
;
256 l_fixedpt_t m_rectime
;
257 l_fixedpt_t m_xmttime
;
259 uint8_t m_digest
[NTP_DIGESTSIZE
];
269 len_and_sockaddr
*p_lsa
;
273 uint32_t lastpkt_refid
;
274 uint8_t lastpkt_status
;
275 uint8_t lastpkt_stratum
;
276 uint8_t reachable_bits
;
277 /* when to send new query (if p_fd == -1)
278 * or when receive times out (if p_fd >= 0): */
279 double next_action_time
;
282 /* p_raw_delay is set even by "high delay" packets */
283 /* lastpkt_delay isn't */
284 double lastpkt_recv_time
;
285 double lastpkt_delay
;
286 double lastpkt_rootdelay
;
287 double lastpkt_rootdisp
;
288 /* produced by filter algorithm: */
289 double filter_offset
;
290 double filter_dispersion
;
291 double filter_jitter
;
292 datapoint_t filter_datapoint
[NUM_DATAPOINTS
];
293 /* last sent packet: */
299 #define USING_KERNEL_PLL_LOOP 1
300 #define USING_INITIAL_FREQ_ESTIMATION 0
307 /* Insert new options above this line. */
308 /* Non-compat options: */
312 OPT_l
= (1 << 7) * ENABLE_FEATURE_NTPD_SERVER
,
313 OPT_I
= (1 << 8) * ENABLE_FEATURE_NTPD_SERVER
,
314 /* We hijack some bits for other purposes */
320 /* total round trip delay to currently selected reference clock */
322 /* reference timestamp: time when the system clock was last set or corrected */
324 /* total dispersion to currently selected reference clock */
327 double last_script_run
;
330 #if ENABLE_FEATURE_NTPD_SERVER
333 # define G_listen_fd (G.listen_fd)
335 # define G_listen_fd (-1)
339 /* refid: 32-bit code identifying the particular server or reference clock
340 * in stratum 0 packets this is a four-character ASCII string,
341 * called the kiss code, used for debugging and monitoring
342 * in stratum 1 packets this is a four-character ASCII string
343 * assigned to the reference clock by IANA. Example: "GPS "
344 * in stratum 2+ packets, it's IPv4 address or 4 first bytes
345 * of MD5 hash of IPv6
349 /* precision is defined as the larger of the resolution and time to
350 * read the clock, in log2 units. For instance, the precision of a
351 * mains-frequency clock incrementing at 60 Hz is 16 ms, even when the
352 * system clock hardware representation is to the nanosecond.
354 * Delays, jitters of various kinds are clamped down to precision.
356 * If precision_sec is too large, discipline_jitter gets clamped to it
357 * and if offset is smaller than discipline_jitter * POLLADJ_GATE, poll
358 * interval grows even though we really can benefit from staying at
359 * smaller one, collecting non-lagged datapoits and correcting offset.
360 * (Lagged datapoits exist when poll_exp is large but we still have
361 * systematic offset error - the time distance between datapoints
362 * is significant and older datapoints have smaller offsets.
363 * This makes our offset estimation a bit smaller than reality)
364 * Due to this effect, setting G_precision_sec close to
365 * STEP_THRESHOLD isn't such a good idea - offsets may grow
366 * too big and we will step. I observed it with -6.
368 * OTOH, setting precision_sec far too small would result in futile
369 * attempts to syncronize to an unachievable precision.
371 * -6 is 1/64 sec, -7 is 1/128 sec and so on.
372 * -8 is 1/256 ~= 0.003906 (worked well for me --vda)
373 * -9 is 1/512 ~= 0.001953 (let's try this for some time)
375 #define G_precision_exp -9
377 * G_precision_exp is used only for construction outgoing packets.
378 * It's ok to set G_precision_sec to a slightly different value
379 * (One which is "nicer looking" in logs).
380 * Exact value would be (1.0 / (1 << (- G_precision_exp))):
382 #define G_precision_sec 0.002
385 #define STATE_NSET 0 /* initial state, "nothing is set" */
386 //#define STATE_FSET 1 /* frequency set from file */
387 //#define STATE_SPIK 2 /* spike detected */
388 //#define STATE_FREQ 3 /* initial frequency */
389 #define STATE_SYNC 4 /* clock synchronized (normal operation) */
390 uint8_t discipline_state
; // doc calls it c.state
391 uint8_t poll_exp
; // s.poll
392 int polladj_count
; // c.count
393 long kernel_freq_drift
;
394 peer_t
*last_update_peer
;
395 double last_update_offset
; // c.last
396 double last_update_recv_time
; // s.t
397 double discipline_jitter
; // c.jitter
398 /* Since we only compare it with ints, can simplify code
399 * by not making this variable floating point:
401 unsigned offset_to_jitter_ratio
;
402 //double cluster_offset; // s.offset
403 //double cluster_jitter; // s.jitter
404 #if !USING_KERNEL_PLL_LOOP
405 double discipline_freq_drift
; // c.freq
406 /* Maybe conditionally calculate wander? it's used only for logging */
407 double discipline_wander
; // c.wander
410 #define G (*ptr_to_globals)
413 #define VERB1 if (MAX_VERBOSE && G.verbose)
414 #define VERB2 if (MAX_VERBOSE >= 2 && G.verbose >= 2)
415 #define VERB3 if (MAX_VERBOSE >= 3 && G.verbose >= 3)
416 #define VERB4 if (MAX_VERBOSE >= 4 && G.verbose >= 4)
417 #define VERB5 if (MAX_VERBOSE >= 5 && G.verbose >= 5)
418 #define VERB6 if (MAX_VERBOSE >= 6 && G.verbose >= 6)
421 static double LOG2D(int a
)
424 return 1.0 / (1UL << -a
);
427 static ALWAYS_INLINE
double SQUARE(double x
)
431 static ALWAYS_INLINE
double MAXD(double a
, double b
)
437 static ALWAYS_INLINE
double MIND(double a
, double b
)
443 static NOINLINE
double my_SQRT(double X
)
450 double Xhalf
= X
* 0.5;
452 /* Fast and good approximation to 1/sqrt(X), black magic */
454 /*v.i = 0x5f3759df - (v.i >> 1);*/
455 v
.i
= 0x5f375a86 - (v
.i
>> 1); /* - this constant is slightly better */
456 invsqrt
= v
.f
; /* better than 0.2% accuracy */
458 /* Refining it using Newton's method: x1 = x0 - f(x0)/f'(x0)
459 * f(x) = 1/(x*x) - X (f==0 when x = 1/sqrt(X))
461 * f(x)/f'(x) = (X - 1/(x*x)) / (2/(x*x*x)) = X*x*x*x/2 - x/2
462 * x1 = x0 - (X*x0*x0*x0/2 - x0/2) = 1.5*x0 - X*x0*x0*x0/2 = x0*(1.5 - (X/2)*x0*x0)
464 invsqrt
= invsqrt
* (1.5 - Xhalf
* invsqrt
* invsqrt
); /* ~0.05% accuracy */
465 /* invsqrt = invsqrt * (1.5 - Xhalf * invsqrt * invsqrt); 2nd iter: ~0.0001% accuracy */
466 /* With 4 iterations, more than half results will be exact,
467 * at 6th iterations result stabilizes with about 72% results exact.
468 * We are well satisfied with 0.05% accuracy.
471 return X
* invsqrt
; /* X * 1/sqrt(X) ~= sqrt(X) */
473 static ALWAYS_INLINE
double SQRT(double X
)
475 /* If this arch doesn't use IEEE 754 floats, fall back to using libm */
476 if (sizeof(float) != 4)
479 /* This avoids needing libm, saves about 0.5k on x86-32 */
487 gettimeofday(&tv
, NULL
); /* never fails */
488 G
.cur_time
= tv
.tv_sec
+ (1.0e-6 * tv
.tv_usec
) + OFFSET_1900_1970
;
493 d_to_tv(double d
, struct timeval
*tv
)
495 tv
->tv_sec
= (long)d
;
496 tv
->tv_usec
= (d
- tv
->tv_sec
) * 1000000;
500 lfp_to_d(l_fixedpt_t lfp
)
503 lfp
.int_partl
= ntohl(lfp
.int_partl
);
504 lfp
.fractionl
= ntohl(lfp
.fractionl
);
505 ret
= (double)lfp
.int_partl
+ ((double)lfp
.fractionl
/ UINT_MAX
);
509 sfp_to_d(s_fixedpt_t sfp
)
512 sfp
.int_parts
= ntohs(sfp
.int_parts
);
513 sfp
.fractions
= ntohs(sfp
.fractions
);
514 ret
= (double)sfp
.int_parts
+ ((double)sfp
.fractions
/ USHRT_MAX
);
517 #if ENABLE_FEATURE_NTPD_SERVER
522 lfp
.int_partl
= (uint32_t)d
;
523 lfp
.fractionl
= (uint32_t)((d
- lfp
.int_partl
) * UINT_MAX
);
524 lfp
.int_partl
= htonl(lfp
.int_partl
);
525 lfp
.fractionl
= htonl(lfp
.fractionl
);
532 sfp
.int_parts
= (uint16_t)d
;
533 sfp
.fractions
= (uint16_t)((d
- sfp
.int_parts
) * USHRT_MAX
);
534 sfp
.int_parts
= htons(sfp
.int_parts
);
535 sfp
.fractions
= htons(sfp
.fractions
);
541 dispersion(const datapoint_t
*dp
)
543 return dp
->d_dispersion
+ FREQ_TOLERANCE
* (G
.cur_time
- dp
->d_recv_time
);
547 root_distance(peer_t
*p
)
549 /* The root synchronization distance is the maximum error due to
550 * all causes of the local clock relative to the primary server.
551 * It is defined as half the total delay plus total dispersion
554 return MAXD(MINDISP
, p
->lastpkt_rootdelay
+ p
->lastpkt_delay
) / 2
555 + p
->lastpkt_rootdisp
556 + p
->filter_dispersion
557 + FREQ_TOLERANCE
* (G
.cur_time
- p
->lastpkt_recv_time
)
562 set_next(peer_t
*p
, unsigned t
)
564 p
->next_action_time
= G
.cur_time
+ t
;
568 * Peer clock filter and its helpers
571 filter_datapoints(peer_t
*p
)
578 /* Simulations have shown that use of *averaged* offset for p->filter_offset
579 * is in fact worse than simply using last received one: with large poll intervals
580 * (>= 2048) averaging code uses offset values which are outdated by hours,
581 * and time/frequency correction goes totally wrong when fed essentially bogus offsets.
584 double minoff
, maxoff
, w
;
585 double x
= x
; /* for compiler */
586 double oldest_off
= oldest_off
;
587 double oldest_age
= oldest_age
;
588 double newest_off
= newest_off
;
589 double newest_age
= newest_age
;
591 fdp
= p
->filter_datapoint
;
593 minoff
= maxoff
= fdp
[0].d_offset
;
594 for (i
= 1; i
< NUM_DATAPOINTS
; i
++) {
595 if (minoff
> fdp
[i
].d_offset
)
596 minoff
= fdp
[i
].d_offset
;
597 if (maxoff
< fdp
[i
].d_offset
)
598 maxoff
= fdp
[i
].d_offset
;
601 idx
= p
->datapoint_idx
; /* most recent datapoint's index */
603 * Drop two outliers and take weighted average of the rest:
604 * most_recent/2 + older1/4 + older2/8 ... + older5/32 + older6/32
605 * we use older6/32, not older6/64 since sum of weights should be 1:
606 * 1/2 + 1/4 + 1/8 + 1/16 + 1/32 + 1/32 = 1
612 * filter_dispersion = \ -------------
619 for (i
= 0; i
< NUM_DATAPOINTS
; i
++) {
621 bb_error_msg("datapoint[%d]: off:%f disp:%f(%f) age:%f%s",
624 fdp
[idx
].d_dispersion
, dispersion(&fdp
[idx
]),
625 G
.cur_time
- fdp
[idx
].d_recv_time
,
626 (minoff
== fdp
[idx
].d_offset
|| maxoff
== fdp
[idx
].d_offset
)
627 ? " (outlier by offset)" : ""
631 sum
+= dispersion(&fdp
[idx
]) / (2 << i
);
633 if (minoff
== fdp
[idx
].d_offset
) {
634 minoff
-= 1; /* so that we don't match it ever again */
636 if (maxoff
== fdp
[idx
].d_offset
) {
639 oldest_off
= fdp
[idx
].d_offset
;
640 oldest_age
= G
.cur_time
- fdp
[idx
].d_recv_time
;
643 newest_off
= oldest_off
;
644 newest_age
= oldest_age
;
651 idx
= (idx
- 1) & (NUM_DATAPOINTS
- 1);
653 p
->filter_dispersion
= sum
;
654 wavg
+= x
; /* add another older6/64 to form older6/32 */
655 /* Fix systematic underestimation with large poll intervals.
656 * Imagine that we still have a bit of uncorrected drift,
657 * and poll interval is big (say, 100 sec). Offsets form a progression:
658 * 0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 - 0.7 is most recent.
659 * The algorithm above drops 0.0 and 0.7 as outliers,
660 * and then we have this estimation, ~25% off from 0.7:
661 * 0.1/32 + 0.2/32 + 0.3/16 + 0.4/8 + 0.5/4 + 0.6/2 = 0.503125
663 x
= oldest_age
- newest_age
;
665 x
= newest_age
/ x
; /* in above example, 100 / (600 - 100) */
666 if (x
< 1) { /* paranoia check */
667 x
= (newest_off
- oldest_off
) * x
; /* 0.5 * 100/500 = 0.1 */
671 p
->filter_offset
= wavg
;
675 fdp
= p
->filter_datapoint
;
676 idx
= p
->datapoint_idx
; /* most recent datapoint's index */
678 /* filter_offset: simply use the most recent value */
679 p
->filter_offset
= fdp
[idx
].d_offset
;
683 * filter_dispersion = \ -------------
690 for (i
= 0; i
< NUM_DATAPOINTS
; i
++) {
691 sum
+= dispersion(&fdp
[idx
]) / (2 << i
);
692 wavg
+= fdp
[idx
].d_offset
;
693 idx
= (idx
- 1) & (NUM_DATAPOINTS
- 1);
695 wavg
/= NUM_DATAPOINTS
;
696 p
->filter_dispersion
= sum
;
699 /* +----- -----+ ^ 1/2
703 * filter_jitter = | --- * / (avg-offset_j) |
707 * where n is the number of valid datapoints in the filter (n > 1);
708 * if filter_jitter < precision then filter_jitter = precision
711 for (i
= 0; i
< NUM_DATAPOINTS
; i
++) {
712 sum
+= SQUARE(wavg
- fdp
[i
].d_offset
);
714 sum
= SQRT(sum
/ NUM_DATAPOINTS
);
715 p
->filter_jitter
= sum
> G_precision_sec
? sum
: G_precision_sec
;
717 VERB4
bb_error_msg("filter offset:%+f disp:%f jitter:%f",
719 p
->filter_dispersion
,
724 reset_peer_stats(peer_t
*p
, double offset
)
727 bool small_ofs
= fabs(offset
) < STEP_THRESHOLD
;
729 /* Used to set p->filter_datapoint[i].d_dispersion = MAXDISP
730 * and clear reachable bits, but this proved to be too agressive:
731 * after step (tested with suspending laptop for ~30 secs),
732 * this caused all previous data to be considered invalid,
733 * making us needing to collect full ~8 datapoins per peer
734 * after step in order to start trusting them.
735 * In turn, this was making poll interval decrease even after
736 * step was done. (Poll interval decreases already before step
737 * in this scenario, because we see large offsets and end up with
738 * no good peer to select).
741 for (i
= 0; i
< NUM_DATAPOINTS
; i
++) {
743 p
->filter_datapoint
[i
].d_recv_time
+= offset
;
744 if (p
->filter_datapoint
[i
].d_offset
!= 0) {
745 p
->filter_datapoint
[i
].d_offset
-= offset
;
746 //bb_error_msg("p->filter_datapoint[%d].d_offset %f -> %f",
748 // p->filter_datapoint[i].d_offset + offset,
749 // p->filter_datapoint[i].d_offset);
752 p
->filter_datapoint
[i
].d_recv_time
= G
.cur_time
;
753 p
->filter_datapoint
[i
].d_offset
= 0;
754 /*p->filter_datapoint[i].d_dispersion = MAXDISP;*/
758 p
->lastpkt_recv_time
+= offset
;
760 /*p->reachable_bits = 0;*/
761 p
->lastpkt_recv_time
= G
.cur_time
;
763 filter_datapoints(p
); /* recalc p->filter_xxx */
764 VERB6
bb_error_msg("%s->lastpkt_recv_time=%f", p
->p_dotted
, p
->lastpkt_recv_time
);
768 resolve_peer_hostname(peer_t
*p
, int loop_on_fail
)
770 len_and_sockaddr
*lsa
;
773 lsa
= host2sockaddr(p
->p_hostname
, 123);
775 /* error message already emitted by host2sockaddr() */
778 //FIXME: do this to avoid infinite looping on typo in a hostname?
779 //well... in which case, what is a good value for loop_on_fail?
780 //if (--loop_on_fail == 0)
788 p
->p_dotted
= xmalloc_sockaddr2dotted_noport(&lsa
->u
.sa
);
792 add_peers(const char *s
)
797 p
= xzalloc(sizeof(*p
) + strlen(s
));
798 strcpy(p
->p_hostname
, s
);
799 resolve_peer_hostname(p
, /*loop_on_fail=*/ 1);
801 /* Names like N.<country2chars>.pool.ntp.org are randomly resolved
802 * to a pool of machines. Sometimes different N's resolve to the same IP.
803 * It is not useful to have two peers with same IP. We skip duplicates.
805 for (item
= G
.ntp_peers
; item
!= NULL
; item
= item
->link
) {
806 peer_t
*pp
= (peer_t
*) item
->data
;
807 if (strcmp(p
->p_dotted
, pp
->p_dotted
) == 0) {
808 bb_error_msg("duplicate peer %s (%s)", s
, p
->p_dotted
);
817 p
->p_xmt_msg
.m_status
= MODE_CLIENT
| (NTP_VERSION
<< 3);
818 p
->next_action_time
= G
.cur_time
; /* = set_next(p, 0); */
819 reset_peer_stats(p
, STEP_THRESHOLD
);
821 llist_add_to(&G
.ntp_peers
, p
);
827 const struct sockaddr
*from
, const struct sockaddr
*to
, socklen_t addrlen
,
828 msg_t
*msg
, ssize_t len
)
834 ret
= sendto(fd
, msg
, len
, MSG_DONTWAIT
, to
, addrlen
);
836 ret
= send_to_from(fd
, msg
, len
, MSG_DONTWAIT
, to
, from
, addrlen
);
839 bb_perror_msg("send failed");
846 send_query_to_peer(peer_t
*p
)
848 /* Why do we need to bind()?
849 * See what happens when we don't bind:
851 * socket(PF_INET, SOCK_DGRAM, IPPROTO_IP) = 3
852 * setsockopt(3, SOL_IP, IP_TOS, [16], 4) = 0
853 * gettimeofday({1259071266, 327885}, NULL) = 0
854 * sendto(3, "xxx", 48, MSG_DONTWAIT, {sa_family=AF_INET, sin_port=htons(123), sin_addr=inet_addr("10.34.32.125")}, 16) = 48
855 * ^^^ we sent it from some source port picked by kernel.
856 * time(NULL) = 1259071266
857 * write(2, "ntpd: entering poll 15 secs\n", 28) = 28
858 * poll([{fd=3, events=POLLIN}], 1, 15000) = 1 ([{fd=3, revents=POLLIN}])
859 * recv(3, "yyy", 68, MSG_DONTWAIT) = 48
860 * ^^^ this recv will receive packets to any local port!
862 * Uncomment this and use strace to see it in action:
864 #define PROBE_LOCAL_ADDR /* { len_and_sockaddr lsa; lsa.len = LSA_SIZEOF_SA; getsockname(p->query.fd, &lsa.u.sa, &lsa.len); } */
868 len_and_sockaddr
*local_lsa
;
870 family
= p
->p_lsa
->u
.sa
.sa_family
;
871 p
->p_fd
= fd
= xsocket_type(&local_lsa
, family
, SOCK_DGRAM
);
872 /* local_lsa has "null" address and port 0 now.
873 * bind() ensures we have a *particular port* selected by kernel
874 * and remembered in p->p_fd, thus later recv(p->p_fd)
875 * receives only packets sent to this port.
878 xbind(fd
, &local_lsa
->u
.sa
, local_lsa
->len
);
880 #if ENABLE_FEATURE_IPV6
881 if (family
== AF_INET
)
883 setsockopt_int(fd
, IPPROTO_IP
, IP_TOS
, IPTOS_LOWDELAY
);
887 /* Emit message _before_ attempted send. Think of a very short
888 * roundtrip networks: we need to go back to recv loop ASAP,
889 * to reduce delay. Printing messages after send works against that.
891 VERB1
bb_error_msg("sending query to %s", p
->p_dotted
);
894 * Send out a random 64-bit number as our transmit time. The NTP
895 * server will copy said number into the originate field on the
896 * response that it sends us. This is totally legal per the SNTP spec.
898 * The impact of this is two fold: we no longer send out the current
899 * system time for the world to see (which may aid an attacker), and
900 * it gives us a (not very secure) way of knowing that we're not
901 * getting spoofed by an attacker that can't capture our traffic
902 * but can spoof packets from the NTP server we're communicating with.
904 * Save the real transmit timestamp locally.
906 p
->p_xmt_msg
.m_xmttime
.int_partl
= rand();
907 p
->p_xmt_msg
.m_xmttime
.fractionl
= rand();
908 p
->p_xmttime
= gettime1900d();
910 /* Were doing it only if sendto worked, but
911 * loss of sync detection needs reachable_bits updated
912 * even if sending fails *locally*:
913 * "network is unreachable" because cable was pulled?
914 * We still need to declare "unsync" if this condition persists.
916 p
->reachable_bits
<<= 1;
918 if (do_sendto(p
->p_fd
, /*from:*/ NULL
, /*to:*/ &p
->p_lsa
->u
.sa
, /*addrlen:*/ p
->p_lsa
->len
,
919 &p
->p_xmt_msg
, NTP_MSGSIZE_NOAUTH
) == -1
924 * We know that we sent nothing.
925 * We can retry *soon* without fearing
926 * that we are flooding the peer.
928 set_next(p
, RETRY_INTERVAL
);
932 set_next(p
, RESPONSE_INTERVAL
);
936 /* Note that there is no provision to prevent several run_scripts
937 * to be started in quick succession. In fact, it happens rather often
938 * if initial syncronization results in a step.
939 * You will see "step" and then "stratum" script runs, sometimes
940 * as close as only 0.002 seconds apart.
941 * Script should be ready to deal with this.
943 static void run_script(const char *action
, double offset
)
946 char *env1
, *env2
, *env3
, *env4
;
948 G
.last_script_run
= G
.cur_time
;
953 argv
[0] = (char*) G
.script_name
;
954 argv
[1] = (char*) action
;
957 VERB1
bb_error_msg("executing '%s %s'", G
.script_name
, action
);
959 env1
= xasprintf("%s=%u", "stratum", G
.stratum
);
961 env2
= xasprintf("%s=%ld", "freq_drift_ppm", G
.kernel_freq_drift
);
963 env3
= xasprintf("%s=%u", "poll_interval", 1 << G
.poll_exp
);
965 env4
= xasprintf("%s=%f", "offset", offset
);
967 /* Other items of potential interest: selected peer,
968 * rootdelay, reftime, rootdisp, refid, ntp_status,
969 * last_update_offset, last_update_recv_time, discipline_jitter,
970 * how many peers have reachable_bits = 0?
973 /* Don't want to wait: it may run hwclock --systohc, and that
974 * may take some time (seconds): */
975 /*spawn_and_wait(argv);*/
979 unsetenv("freq_drift_ppm");
980 unsetenv("poll_interval");
989 step_time(double offset
)
993 struct timeval tvc
, tvn
;
994 char buf
[sizeof("yyyy-mm-dd hh:mm:ss") + /*paranoia:*/ 4];
997 gettimeofday(&tvc
, NULL
); /* never fails */
998 dtime
= tvc
.tv_sec
+ (1.0e-6 * tvc
.tv_usec
) + offset
;
999 d_to_tv(dtime
, &tvn
);
1000 if (settimeofday(&tvn
, NULL
) == -1)
1001 bb_perror_msg_and_die("settimeofday");
1005 strftime_YYYYMMDDHHMMSS(buf
, sizeof(buf
), &tval
);
1006 bb_error_msg("current time is %s.%06u", buf
, (unsigned)tvc
.tv_usec
);
1009 strftime_YYYYMMDDHHMMSS(buf
, sizeof(buf
), &tval
);
1010 bb_error_msg("setting time to %s.%06u (offset %+fs)", buf
, (unsigned)tvn
.tv_usec
, offset
);
1012 /* Correct various fields which contain time-relative values: */
1015 G
.cur_time
+= offset
;
1016 G
.last_update_recv_time
+= offset
;
1017 G
.last_script_run
+= offset
;
1019 /* p->lastpkt_recv_time, p->next_action_time and such: */
1020 for (item
= G
.ntp_peers
; item
!= NULL
; item
= item
->link
) {
1021 peer_t
*pp
= (peer_t
*) item
->data
;
1022 reset_peer_stats(pp
, offset
);
1023 //bb_error_msg("offset:%+f pp->next_action_time:%f -> %f",
1024 // offset, pp->next_action_time, pp->next_action_time + offset);
1025 pp
->next_action_time
+= offset
;
1026 if (pp
->p_fd
>= 0) {
1027 /* We wait for reply from this peer too.
1028 * But due to step we are doing, reply's data is no longer
1029 * useful (in fact, it'll be bogus). Stop waiting for it.
1033 set_next(pp
, RETRY_INTERVAL
);
1038 static void clamp_pollexp_and_set_MAXSTRAT(void)
1040 if (G
.poll_exp
< MINPOLL
)
1041 G
.poll_exp
= MINPOLL
;
1042 if (G
.poll_exp
> BIGPOLL
)
1043 G
.poll_exp
= BIGPOLL
;
1044 G
.polladj_count
= 0;
1045 G
.stratum
= MAXSTRAT
;
1050 * Selection and clustering, and their helpers
1056 double opt_rd
; /* optimization */
1059 compare_point_edge(const void *aa
, const void *bb
)
1061 const point_t
*a
= aa
;
1062 const point_t
*b
= bb
;
1063 if (a
->edge
< b
->edge
) {
1066 return (a
->edge
> b
->edge
);
1073 compare_survivor_metric(const void *aa
, const void *bb
)
1075 const survivor_t
*a
= aa
;
1076 const survivor_t
*b
= bb
;
1077 if (a
->metric
< b
->metric
) {
1080 return (a
->metric
> b
->metric
);
1083 fit(peer_t
*p
, double rd
)
1085 if ((p
->reachable_bits
& (p
->reachable_bits
-1)) == 0) {
1086 /* One or zero bits in reachable_bits */
1087 VERB4
bb_error_msg("peer %s unfit for selection: unreachable", p
->p_dotted
);
1090 #if 0 /* we filter out such packets earlier */
1091 if ((p
->lastpkt_status
& LI_ALARM
) == LI_ALARM
1092 || p
->lastpkt_stratum
>= MAXSTRAT
1094 VERB4
bb_error_msg("peer %s unfit for selection: bad status/stratum", p
->p_dotted
);
1098 /* rd is root_distance(p) */
1099 if (rd
> MAXDIST
+ FREQ_TOLERANCE
* (1 << G
.poll_exp
)) {
1100 VERB4
bb_error_msg("peer %s unfit for selection: root distance too high", p
->p_dotted
);
1104 // /* Do we have a loop? */
1105 // if (p->refid == p->dstaddr || p->refid == s.refid)
1110 select_and_cluster(void)
1115 int size
= 3 * G
.peer_cnt
;
1116 /* for selection algorithm */
1117 point_t point
[size
];
1118 unsigned num_points
, num_candidates
;
1120 unsigned num_falsetickers
;
1121 /* for cluster algorithm */
1122 survivor_t survivor
[size
];
1123 unsigned num_survivors
;
1129 while (item
!= NULL
) {
1132 p
= (peer_t
*) item
->data
;
1133 rd
= root_distance(p
);
1134 offset
= p
->filter_offset
;
1140 VERB5
bb_error_msg("interval: [%f %f %f] %s",
1146 point
[num_points
].p
= p
;
1147 point
[num_points
].type
= -1;
1148 point
[num_points
].edge
= offset
- rd
;
1149 point
[num_points
].opt_rd
= rd
;
1151 point
[num_points
].p
= p
;
1152 point
[num_points
].type
= 0;
1153 point
[num_points
].edge
= offset
;
1154 point
[num_points
].opt_rd
= rd
;
1156 point
[num_points
].p
= p
;
1157 point
[num_points
].type
= 1;
1158 point
[num_points
].edge
= offset
+ rd
;
1159 point
[num_points
].opt_rd
= rd
;
1163 num_candidates
= num_points
/ 3;
1164 if (num_candidates
== 0) {
1165 VERB3
bb_error_msg("no valid datapoints%s", ", no peer selected");
1168 //TODO: sorting does not seem to be done in reference code
1169 qsort(point
, num_points
, sizeof(point
[0]), compare_point_edge
);
1171 /* Start with the assumption that there are no falsetickers.
1172 * Attempt to find a nonempty intersection interval containing
1173 * the midpoints of all truechimers.
1174 * If a nonempty interval cannot be found, increase the number
1175 * of assumed falsetickers by one and try again.
1176 * If a nonempty interval is found and the number of falsetickers
1177 * is less than the number of truechimers, a majority has been found
1178 * and the midpoint of each truechimer represents
1179 * the candidates available to the cluster algorithm.
1181 num_falsetickers
= 0;
1184 unsigned num_midpoints
= 0;
1189 for (i
= 0; i
< num_points
; i
++) {
1191 * if (point[i].type == -1) c++;
1192 * if (point[i].type == 1) c--;
1193 * and it's simpler to do it this way:
1196 if (c
>= num_candidates
- num_falsetickers
) {
1197 /* If it was c++ and it got big enough... */
1198 low
= point
[i
].edge
;
1201 if (point
[i
].type
== 0)
1205 for (i
= num_points
-1; i
>= 0; i
--) {
1207 if (c
>= num_candidates
- num_falsetickers
) {
1208 high
= point
[i
].edge
;
1211 if (point
[i
].type
== 0)
1214 /* If the number of midpoints is greater than the number
1215 * of allowed falsetickers, the intersection contains at
1216 * least one truechimer with no midpoint - bad.
1217 * Also, interval should be nonempty.
1219 if (num_midpoints
<= num_falsetickers
&& low
< high
)
1222 if (num_falsetickers
* 2 >= num_candidates
) {
1223 VERB3
bb_error_msg("falsetickers:%d, candidates:%d%s",
1224 num_falsetickers
, num_candidates
,
1225 ", no peer selected");
1229 VERB4
bb_error_msg("selected interval: [%f, %f]; candidates:%d falsetickers:%d",
1230 low
, high
, num_candidates
, num_falsetickers
);
1234 /* Construct a list of survivors (p, metric)
1235 * from the chime list, where metric is dominated
1236 * first by stratum and then by root distance.
1237 * All other things being equal, this is the order of preference.
1240 for (i
= 0; i
< num_points
; i
++) {
1241 if (point
[i
].edge
< low
|| point
[i
].edge
> high
)
1244 survivor
[num_survivors
].p
= p
;
1245 /* x.opt_rd == root_distance(p); */
1246 survivor
[num_survivors
].metric
= MAXDIST
* p
->lastpkt_stratum
+ point
[i
].opt_rd
;
1247 VERB5
bb_error_msg("survivor[%d] metric:%f peer:%s",
1248 num_survivors
, survivor
[num_survivors
].metric
, p
->p_dotted
);
1251 /* There must be at least MIN_SELECTED survivors to satisfy the
1252 * correctness assertions. Ordinarily, the Byzantine criteria
1253 * require four survivors, but for the demonstration here, one
1256 if (num_survivors
< MIN_SELECTED
) {
1257 VERB3
bb_error_msg("survivors:%d%s",
1259 ", no peer selected");
1263 //looks like this is ONLY used by the fact that later we pick survivor[0].
1264 //we can avoid sorting then, just find the minimum once!
1265 qsort(survivor
, num_survivors
, sizeof(survivor
[0]), compare_survivor_metric
);
1267 /* For each association p in turn, calculate the selection
1268 * jitter p->sjitter as the square root of the sum of squares
1269 * (p->offset - q->offset) over all q associations. The idea is
1270 * to repeatedly discard the survivor with maximum selection
1271 * jitter until a termination condition is met.
1274 unsigned max_idx
= max_idx
;
1275 double max_selection_jitter
= max_selection_jitter
;
1276 double min_jitter
= min_jitter
;
1278 if (num_survivors
<= MIN_CLUSTERED
) {
1279 VERB4
bb_error_msg("num_survivors %d <= %d, not discarding more",
1280 num_survivors
, MIN_CLUSTERED
);
1284 /* To make sure a few survivors are left
1285 * for the clustering algorithm to chew on,
1286 * we stop if the number of survivors
1287 * is less than or equal to MIN_CLUSTERED (3).
1289 for (i
= 0; i
< num_survivors
; i
++) {
1290 double selection_jitter_sq
;
1293 if (i
== 0 || p
->filter_jitter
< min_jitter
)
1294 min_jitter
= p
->filter_jitter
;
1296 selection_jitter_sq
= 0;
1297 for (j
= 0; j
< num_survivors
; j
++) {
1298 peer_t
*q
= survivor
[j
].p
;
1299 selection_jitter_sq
+= SQUARE(p
->filter_offset
- q
->filter_offset
);
1301 if (i
== 0 || selection_jitter_sq
> max_selection_jitter
) {
1302 max_selection_jitter
= selection_jitter_sq
;
1305 VERB6
bb_error_msg("survivor %d selection_jitter^2:%f",
1306 i
, selection_jitter_sq
);
1308 max_selection_jitter
= SQRT(max_selection_jitter
/ num_survivors
);
1309 VERB5
bb_error_msg("max_selection_jitter (at %d):%f min_jitter:%f",
1310 max_idx
, max_selection_jitter
, min_jitter
);
1312 /* If the maximum selection jitter is less than the
1313 * minimum peer jitter, then tossing out more survivors
1314 * will not lower the minimum peer jitter, so we might
1317 if (max_selection_jitter
< min_jitter
) {
1318 VERB4
bb_error_msg("max_selection_jitter:%f < min_jitter:%f, num_survivors:%d, not discarding more",
1319 max_selection_jitter
, min_jitter
, num_survivors
);
1323 /* Delete survivor[max_idx] from the list
1324 * and go around again.
1326 VERB6
bb_error_msg("dropping survivor %d", max_idx
);
1328 while (max_idx
< num_survivors
) {
1329 survivor
[max_idx
] = survivor
[max_idx
+ 1];
1335 /* Combine the offsets of the clustering algorithm survivors
1336 * using a weighted average with weight determined by the root
1337 * distance. Compute the selection jitter as the weighted RMS
1338 * difference between the first survivor and the remaining
1339 * survivors. In some cases the inherent clock jitter can be
1340 * reduced by not using this algorithm, especially when frequent
1341 * clockhopping is involved. bbox: thus we don't do it.
1345 for (i
= 0; i
< num_survivors
; i
++) {
1347 x
= root_distance(p
);
1349 z
+= p
->filter_offset
/ x
;
1350 w
+= SQUARE(p
->filter_offset
- survivor
[0].p
->filter_offset
) / x
;
1352 //G.cluster_offset = z / y;
1353 //G.cluster_jitter = SQRT(w / y);
1356 /* Pick the best clock. If the old system peer is on the list
1357 * and at the same stratum as the first survivor on the list,
1358 * then don't do a clock hop. Otherwise, select the first
1359 * survivor on the list as the new system peer.
1362 if (G
.last_update_peer
1363 && G
.last_update_peer
->lastpkt_stratum
<= p
->lastpkt_stratum
1365 /* Starting from 1 is ok here */
1366 for (i
= 1; i
< num_survivors
; i
++) {
1367 if (G
.last_update_peer
== survivor
[i
].p
) {
1368 VERB5
bb_error_msg("keeping old synced peer");
1369 p
= G
.last_update_peer
;
1374 G
.last_update_peer
= p
;
1376 VERB4
bb_error_msg("selected peer %s filter_offset:%+f age:%f",
1379 G
.cur_time
- p
->lastpkt_recv_time
1386 * Local clock discipline and its helpers
1389 set_new_values(int disc_state
, double offset
, double recv_time
)
1391 /* Enter new state and set state variables. Note we use the time
1392 * of the last clock filter sample, which must be earlier than
1395 VERB4
bb_error_msg("disc_state=%d last update offset=%f recv_time=%f",
1396 disc_state
, offset
, recv_time
);
1397 G
.discipline_state
= disc_state
;
1398 G
.last_update_offset
= offset
;
1399 G
.last_update_recv_time
= recv_time
;
1401 /* Return: -1: decrease poll interval, 0: leave as is, 1: increase */
1403 update_local_clock(peer_t
*p
)
1407 /* Note: can use G.cluster_offset instead: */
1408 double offset
= p
->filter_offset
;
1409 double recv_time
= p
->lastpkt_recv_time
;
1411 #if !USING_KERNEL_PLL_LOOP
1414 #if !USING_KERNEL_PLL_LOOP || USING_INITIAL_FREQ_ESTIMATION
1415 double since_last_update
;
1417 double etemp
, dtemp
;
1419 abs_offset
= fabs(offset
);
1422 /* If needed, -S script can do it by looking at $offset
1423 * env var and killing parent */
1424 /* If the offset is too large, give up and go home */
1425 if (abs_offset
> PANIC_THRESHOLD
) {
1426 bb_error_msg_and_die("offset %f far too big, exiting", offset
);
1430 /* If this is an old update, for instance as the result
1431 * of a system peer change, avoid it. We never use
1432 * an old sample or the same sample twice.
1434 if (recv_time
<= G
.last_update_recv_time
) {
1435 VERB3
bb_error_msg("update from %s: same or older datapoint, not using it",
1437 return 0; /* "leave poll interval as is" */
1440 /* Clock state machine transition function. This is where the
1441 * action is and defines how the system reacts to large time
1442 * and frequency errors.
1444 #if !USING_KERNEL_PLL_LOOP || USING_INITIAL_FREQ_ESTIMATION
1445 since_last_update
= recv_time
- G
.reftime
;
1447 #if !USING_KERNEL_PLL_LOOP
1450 #if USING_INITIAL_FREQ_ESTIMATION
1451 if (G
.discipline_state
== STATE_FREQ
) {
1452 /* Ignore updates until the stepout threshold */
1453 if (since_last_update
< WATCH_THRESHOLD
) {
1454 VERB4
bb_error_msg("measuring drift, datapoint ignored, %f sec remains",
1455 WATCH_THRESHOLD
- since_last_update
);
1456 return 0; /* "leave poll interval as is" */
1458 # if !USING_KERNEL_PLL_LOOP
1459 freq_drift
= (offset
- G
.last_update_offset
) / since_last_update
;
1464 /* There are two main regimes: when the
1465 * offset exceeds the step threshold and when it does not.
1467 if (abs_offset
> STEP_THRESHOLD
) {
1471 // This "spike state" seems to be useless, peer selection already drops
1472 // occassional "bad" datapoints. If we are here, there were _many_
1473 // large offsets. When a few first large offsets are seen,
1474 // we end up in "no valid datapoints, no peer selected" state.
1475 // Only when enough of them are seen (which means it's not a fluke),
1476 // we end up here. Looks like _our_ clock is off.
1477 switch (G
.discipline_state
) {
1479 /* The first outlyer: ignore it, switch to SPIK state */
1480 VERB3
bb_error_msg("update from %s: offset:%+f, spike%s",
1481 p
->p_dotted
, offset
,
1483 G
.discipline_state
= STATE_SPIK
;
1484 return -1; /* "decrease poll interval" */
1487 /* Ignore succeeding outlyers until either an inlyer
1488 * is found or the stepout threshold is exceeded.
1490 remains
= WATCH_THRESHOLD
- since_last_update
;
1492 VERB3
bb_error_msg("update from %s: offset:%+f, spike%s",
1493 p
->p_dotted
, offset
,
1494 ", datapoint ignored");
1495 return -1; /* "decrease poll interval" */
1497 /* fall through: we need to step */
1501 /* Step the time and clamp down the poll interval.
1503 * In NSET state an initial frequency correction is
1504 * not available, usually because the frequency file has
1505 * not yet been written. Since the time is outside the
1506 * capture range, the clock is stepped. The frequency
1507 * will be set directly following the stepout interval.
1509 * In FSET state the initial frequency has been set
1510 * from the frequency file. Since the time is outside
1511 * the capture range, the clock is stepped immediately,
1512 * rather than after the stepout interval. Guys get
1513 * nervous if it takes 17 minutes to set the clock for
1516 * In SPIK state the stepout threshold has expired and
1517 * the phase is still above the step threshold. Note
1518 * that a single spike greater than the step threshold
1519 * is always suppressed, even at the longer poll
1522 VERB4
bb_error_msg("stepping time by %+f; poll_exp=MINPOLL", offset
);
1524 if (option_mask32
& OPT_q
) {
1525 /* We were only asked to set time once. Done. */
1529 clamp_pollexp_and_set_MAXSTRAT();
1531 run_script("step", offset
);
1533 recv_time
+= offset
;
1535 #if USING_INITIAL_FREQ_ESTIMATION
1536 if (G
.discipline_state
== STATE_NSET
) {
1537 set_new_values(STATE_FREQ
, /*offset:*/ 0, recv_time
);
1538 return 1; /* "ok to increase poll interval" */
1541 abs_offset
= offset
= 0;
1542 set_new_values(STATE_SYNC
, offset
, recv_time
);
1543 } else { /* abs_offset <= STEP_THRESHOLD */
1545 /* The ratio is calculated before jitter is updated to make
1546 * poll adjust code more sensitive to large offsets.
1548 G
.offset_to_jitter_ratio
= abs_offset
/ G
.discipline_jitter
;
1550 /* Compute the clock jitter as the RMS of exponentially
1551 * weighted offset differences. Used by the poll adjust code.
1553 etemp
= SQUARE(G
.discipline_jitter
);
1554 dtemp
= SQUARE(offset
- G
.last_update_offset
);
1555 G
.discipline_jitter
= SQRT(etemp
+ (dtemp
- etemp
) / AVG
);
1556 if (G
.discipline_jitter
< G_precision_sec
)
1557 G
.discipline_jitter
= G_precision_sec
;
1559 switch (G
.discipline_state
) {
1561 if (option_mask32
& OPT_q
) {
1562 /* We were only asked to set time once.
1563 * The clock is precise enough, no need to step.
1567 #if USING_INITIAL_FREQ_ESTIMATION
1568 /* This is the first update received and the frequency
1569 * has not been initialized. The first thing to do
1570 * is directly measure the oscillator frequency.
1572 set_new_values(STATE_FREQ
, offset
, recv_time
);
1574 set_new_values(STATE_SYNC
, offset
, recv_time
);
1576 VERB4
bb_error_msg("transitioning to FREQ, datapoint ignored");
1577 return 0; /* "leave poll interval as is" */
1579 #if 0 /* this is dead code for now */
1581 /* This is the first update and the frequency
1582 * has been initialized. Adjust the phase, but
1583 * don't adjust the frequency until the next update.
1585 set_new_values(STATE_SYNC
, offset
, recv_time
);
1586 /* freq_drift remains 0 */
1590 #if USING_INITIAL_FREQ_ESTIMATION
1592 /* since_last_update >= WATCH_THRESHOLD, we waited enough.
1593 * Correct the phase and frequency and switch to SYNC state.
1594 * freq_drift was already estimated (see code above)
1596 set_new_values(STATE_SYNC
, offset
, recv_time
);
1601 #if !USING_KERNEL_PLL_LOOP
1602 /* Compute freq_drift due to PLL and FLL contributions.
1604 * The FLL and PLL frequency gain constants
1605 * depend on the poll interval and Allan
1606 * intercept. The FLL is not used below one-half
1607 * the Allan intercept. Above that the loop gain
1608 * increases in steps to 1 / AVG.
1610 if ((1 << G
.poll_exp
) > ALLAN
/ 2) {
1611 etemp
= FLL
- G
.poll_exp
;
1614 freq_drift
+= (offset
- G
.last_update_offset
) / (MAXD(since_last_update
, ALLAN
) * etemp
);
1616 /* For the PLL the integration interval
1617 * (numerator) is the minimum of the update
1618 * interval and poll interval. This allows
1619 * oversampling, but not undersampling.
1621 etemp
= MIND(since_last_update
, (1 << G
.poll_exp
));
1622 dtemp
= (4 * PLL
) << G
.poll_exp
;
1623 freq_drift
+= offset
* etemp
/ SQUARE(dtemp
);
1625 set_new_values(STATE_SYNC
, offset
, recv_time
);
1628 if (G
.stratum
!= p
->lastpkt_stratum
+ 1) {
1629 G
.stratum
= p
->lastpkt_stratum
+ 1;
1630 run_script("stratum", offset
);
1634 G
.reftime
= G
.cur_time
;
1635 G
.ntp_status
= p
->lastpkt_status
;
1636 G
.refid
= p
->lastpkt_refid
;
1637 G
.rootdelay
= p
->lastpkt_rootdelay
+ p
->lastpkt_delay
;
1638 dtemp
= p
->filter_jitter
; // SQRT(SQUARE(p->filter_jitter) + SQUARE(G.cluster_jitter));
1639 dtemp
+= MAXD(p
->filter_dispersion
+ FREQ_TOLERANCE
* (G
.cur_time
- p
->lastpkt_recv_time
) + abs_offset
, MINDISP
);
1640 G
.rootdisp
= p
->lastpkt_rootdisp
+ dtemp
;
1641 VERB4
bb_error_msg("updating leap/refid/reftime/rootdisp from peer %s", p
->p_dotted
);
1643 /* We are in STATE_SYNC now, but did not do adjtimex yet.
1644 * (Any other state does not reach this, they all return earlier)
1645 * By this time, freq_drift and offset are set
1646 * to values suitable for adjtimex.
1648 #if !USING_KERNEL_PLL_LOOP
1649 /* Calculate the new frequency drift and frequency stability (wander).
1650 * Compute the clock wander as the RMS of exponentially weighted
1651 * frequency differences. This is not used directly, but can,
1652 * along with the jitter, be a highly useful monitoring and
1655 dtemp
= G
.discipline_freq_drift
+ freq_drift
;
1656 G
.discipline_freq_drift
= MAXD(MIND(MAXDRIFT
, dtemp
), -MAXDRIFT
);
1657 etemp
= SQUARE(G
.discipline_wander
);
1658 dtemp
= SQUARE(dtemp
);
1659 G
.discipline_wander
= SQRT(etemp
+ (dtemp
- etemp
) / AVG
);
1661 VERB4
bb_error_msg("discipline freq_drift=%.9f(int:%ld corr:%e) wander=%f",
1662 G
.discipline_freq_drift
,
1663 (long)(G
.discipline_freq_drift
* 65536e6
),
1665 G
.discipline_wander
);
1668 memset(&tmx
, 0, sizeof(tmx
));
1669 if (adjtimex(&tmx
) < 0)
1670 bb_perror_msg_and_die("adjtimex");
1671 bb_error_msg("p adjtimex freq:%ld offset:%+ld status:0x%x tc:%ld",
1672 tmx
.freq
, tmx
.offset
, tmx
.status
, tmx
.constant
);
1675 memset(&tmx
, 0, sizeof(tmx
));
1677 //doesn't work, offset remains 0 (!) in kernel:
1678 //ntpd: set adjtimex freq:1786097 tmx.offset:77487
1679 //ntpd: prev adjtimex freq:1786097 tmx.offset:0
1680 //ntpd: cur adjtimex freq:1786097 tmx.offset:0
1681 tmx
.modes
= ADJ_FREQUENCY
| ADJ_OFFSET
;
1682 /* 65536 is one ppm */
1683 tmx
.freq
= G
.discipline_freq_drift
* 65536e6
;
1685 tmx
.modes
= ADJ_OFFSET
| ADJ_STATUS
| ADJ_TIMECONST
;// | ADJ_MAXERROR | ADJ_ESTERROR;
1686 tmx
.constant
= (int)G
.poll_exp
- 4;
1688 * The below if statement should be unnecessary, but...
1689 * It looks like Linux kernel's PLL is far too gentle in changing
1690 * tmx.freq in response to clock offset. Offset keeps growing
1691 * and eventually we fall back to smaller poll intervals.
1692 * We can make correction more agressive (about x2) by supplying
1693 * PLL time constant which is one less than the real one.
1694 * To be on a safe side, let's do it only if offset is significantly
1695 * larger than jitter.
1697 if (G
.offset_to_jitter_ratio
>= TIMECONST_HACK_GATE
)
1699 tmx
.offset
= (long)(offset
* 1000000); /* usec */
1700 if (SLEW_THRESHOLD
< STEP_THRESHOLD
) {
1701 if (tmx
.offset
> (long)(SLEW_THRESHOLD
* 1000000)) {
1702 tmx
.offset
= (long)(SLEW_THRESHOLD
* 1000000);
1705 if (tmx
.offset
< -(long)(SLEW_THRESHOLD
* 1000000)) {
1706 tmx
.offset
= -(long)(SLEW_THRESHOLD
* 1000000);
1710 if (tmx
.constant
< 0)
1713 tmx
.status
= STA_PLL
;
1714 if (G
.ntp_status
& LI_PLUSSEC
)
1715 tmx
.status
|= STA_INS
;
1716 if (G
.ntp_status
& LI_MINUSSEC
)
1717 tmx
.status
|= STA_DEL
;
1719 //tmx.esterror = (uint32_t)(clock_jitter * 1e6);
1720 //tmx.maxerror = (uint32_t)((sys_rootdelay / 2 + sys_rootdisp) * 1e6);
1721 rc
= adjtimex(&tmx
);
1723 bb_perror_msg_and_die("adjtimex");
1724 /* NB: here kernel returns constant == G.poll_exp, not == G.poll_exp - 4.
1725 * Not sure why. Perhaps it is normal.
1727 VERB4
bb_error_msg("adjtimex:%d freq:%ld offset:%+ld status:0x%x",
1728 rc
, tmx
.freq
, tmx
.offset
, tmx
.status
);
1729 G
.kernel_freq_drift
= tmx
.freq
/ 65536;
1730 VERB2
bb_error_msg("update from:%s offset:%+f delay:%f jitter:%f clock drift:%+.3fppm tc:%d",
1734 G
.discipline_jitter
,
1735 (double)tmx
.freq
/ 65536,
1739 return 1; /* "ok to increase poll interval" */
1744 * We've got a new reply packet from a peer, process it
1748 poll_interval(int upper_bound
)
1750 unsigned interval
, r
, mask
;
1751 interval
= 1 << G
.poll_exp
;
1752 if (interval
> upper_bound
)
1753 interval
= upper_bound
;
1754 mask
= ((interval
-1) >> 4) | 1;
1756 interval
+= r
& mask
; /* ~ random(0..1) * interval/16 */
1757 VERB4
bb_error_msg("chose poll interval:%u (poll_exp:%d)", interval
, G
.poll_exp
);
1761 adjust_poll(int count
)
1763 G
.polladj_count
+= count
;
1764 if (G
.polladj_count
> POLLADJ_LIMIT
) {
1765 G
.polladj_count
= 0;
1766 if (G
.poll_exp
< MAXPOLL
) {
1768 VERB4
bb_error_msg("polladj: discipline_jitter:%f ++poll_exp=%d",
1769 G
.discipline_jitter
, G
.poll_exp
);
1771 } else if (G
.polladj_count
< -POLLADJ_LIMIT
|| (count
< 0 && G
.poll_exp
> BIGPOLL
)) {
1772 G
.polladj_count
= 0;
1773 if (G
.poll_exp
> MINPOLL
) {
1777 /* Correct p->next_action_time in each peer
1778 * which waits for sending, so that they send earlier.
1779 * Old pp->next_action_time are on the order
1780 * of t + (1 << old_poll_exp) + small_random,
1781 * we simply need to subtract ~half of that.
1783 for (item
= G
.ntp_peers
; item
!= NULL
; item
= item
->link
) {
1784 peer_t
*pp
= (peer_t
*) item
->data
;
1786 pp
->next_action_time
-= (1 << G
.poll_exp
);
1788 VERB4
bb_error_msg("polladj: discipline_jitter:%f --poll_exp=%d",
1789 G
.discipline_jitter
, G
.poll_exp
);
1792 VERB4
bb_error_msg("polladj: count:%d", G
.polladj_count
);
1795 static NOINLINE
void
1796 recv_and_process_peer_pkt(peer_t
*p
)
1801 double T1
, T2
, T3
, T4
;
1803 double prev_delay
, delay
;
1805 datapoint_t
*datapoint
;
1810 /* We can recvfrom here and check from.IP, but some multihomed
1811 * ntp servers reply from their *other IP*.
1812 * TODO: maybe we should check at least what we can: from.port == 123?
1815 size
= recv(p
->p_fd
, &msg
, sizeof(msg
), MSG_DONTWAIT
);
1820 if (errno
== EAGAIN
)
1821 /* There was no packet after all
1822 * (poll() returning POLLIN for a fd
1823 * is not a ironclad guarantee that data is there)
1827 * If you need a different handling for a specific
1828 * errno, always explain it in comment.
1830 bb_perror_msg_and_die("recv(%s) error", p
->p_dotted
);
1833 if (size
!= NTP_MSGSIZE_NOAUTH
&& size
!= NTP_MSGSIZE
) {
1834 bb_error_msg("malformed packet received from %s", p
->p_dotted
);
1838 if (msg
.m_orgtime
.int_partl
!= p
->p_xmt_msg
.m_xmttime
.int_partl
1839 || msg
.m_orgtime
.fractionl
!= p
->p_xmt_msg
.m_xmttime
.fractionl
1841 /* Somebody else's packet */
1845 /* We do not expect any more packets from this peer for now.
1846 * Closing the socket informs kernel about it.
1847 * We open a new socket when we send a new query.
1852 if ((msg
.m_status
& LI_ALARM
) == LI_ALARM
1853 || msg
.m_stratum
== 0
1854 || msg
.m_stratum
> NTP_MAXSTRATUM
1856 bb_error_msg("reply from %s: peer is unsynced", p
->p_dotted
);
1858 * Stratum 0 responses may have commands in 32-bit m_refid field:
1859 * "DENY", "RSTR" - peer does not like us at all,
1860 * "RATE" - peer is overloaded, reduce polling freq.
1861 * If poll interval is small, increase it.
1863 if (G
.poll_exp
< BIGPOLL
)
1864 goto increase_interval
;
1865 goto pick_normal_interval
;
1868 // /* Verify valid root distance */
1869 // if (msg.m_rootdelay / 2 + msg.m_rootdisp >= MAXDISP || p->lastpkt_reftime > msg.m_xmt)
1870 // return; /* invalid header values */
1873 * From RFC 2030 (with a correction to the delay math):
1875 * Timestamp Name ID When Generated
1876 * ------------------------------------------------------------
1877 * Originate Timestamp T1 time request sent by client
1878 * Receive Timestamp T2 time request received by server
1879 * Transmit Timestamp T3 time reply sent by server
1880 * Destination Timestamp T4 time reply received by client
1882 * The roundtrip delay and local clock offset are defined as
1884 * delay = (T4 - T1) - (T3 - T2); offset = ((T2 - T1) + (T3 - T4)) / 2
1887 T2
= lfp_to_d(msg
.m_rectime
);
1888 T3
= lfp_to_d(msg
.m_xmttime
);
1891 /* The delay calculation is a special case. In cases where the
1892 * server and client clocks are running at different rates and
1893 * with very fast networks, the delay can appear negative. In
1894 * order to avoid violating the Principle of Least Astonishment,
1895 * the delay is clamped not less than the system precision.
1897 delay
= (T4
- T1
) - (T3
- T2
);
1898 if (delay
< G_precision_sec
)
1899 delay
= G_precision_sec
;
1901 * If this packet's delay is much bigger than the last one,
1902 * it's better to just ignore it than use its much less precise value.
1904 prev_delay
= p
->p_raw_delay
;
1905 p
->p_raw_delay
= delay
;
1906 if (p
->reachable_bits
&& delay
> prev_delay
* BAD_DELAY_GROWTH
) {
1907 bb_error_msg("reply from %s: delay %f is too high, ignoring", p
->p_dotted
, delay
);
1908 goto pick_normal_interval
;
1911 p
->lastpkt_delay
= delay
;
1912 p
->lastpkt_recv_time
= T4
;
1913 VERB6
bb_error_msg("%s->lastpkt_recv_time=%f", p
->p_dotted
, p
->lastpkt_recv_time
);
1914 p
->lastpkt_status
= msg
.m_status
;
1915 p
->lastpkt_stratum
= msg
.m_stratum
;
1916 p
->lastpkt_rootdelay
= sfp_to_d(msg
.m_rootdelay
);
1917 p
->lastpkt_rootdisp
= sfp_to_d(msg
.m_rootdisp
);
1918 p
->lastpkt_refid
= msg
.m_refid
;
1920 p
->datapoint_idx
= p
->reachable_bits
? (p
->datapoint_idx
+ 1) % NUM_DATAPOINTS
: 0;
1921 datapoint
= &p
->filter_datapoint
[p
->datapoint_idx
];
1922 datapoint
->d_recv_time
= T4
;
1923 datapoint
->d_offset
= offset
= ((T2
- T1
) + (T3
- T4
)) / 2;
1924 datapoint
->d_dispersion
= LOG2D(msg
.m_precision_exp
) + G_precision_sec
;
1925 if (!p
->reachable_bits
) {
1926 /* 1st datapoint ever - replicate offset in every element */
1928 for (i
= 0; i
< NUM_DATAPOINTS
; i
++) {
1929 p
->filter_datapoint
[i
].d_offset
= offset
;
1933 p
->reachable_bits
|= 1;
1934 if ((MAX_VERBOSE
&& G
.verbose
) || (option_mask32
& OPT_w
)) {
1935 bb_error_msg("reply from %s: offset:%+f delay:%f status:0x%02x strat:%d refid:0x%08x rootdelay:%f reach:0x%02x",
1942 p
->lastpkt_rootdelay
,
1944 /* not shown: m_ppoll, m_precision_exp, m_rootdisp,
1945 * m_reftime, m_orgtime, m_rectime, m_xmttime
1950 /* Muck with statictics and update the clock */
1951 filter_datapoints(p
);
1952 q
= select_and_cluster();
1955 if (!(option_mask32
& OPT_w
)) {
1956 rc
= update_local_clock(q
);
1958 //Disabled this because there is a case where largish offsets
1959 //are unavoidable: if network round-trip delay is, say, ~0.6s,
1960 //error in offset estimation would be ~delay/2 ~= 0.3s.
1961 //Thus, offsets will be usually in -0.3...0.3s range.
1962 //In this case, this code would keep poll interval small,
1963 //but it won't be helping.
1964 //BIGOFF check below deals with a case of seeing multi-second offsets.
1966 /* If drift is dangerously large, immediately
1967 * drop poll interval one step down.
1969 if (fabs(q
->filter_offset
) >= POLLDOWN_OFFSET
) {
1970 VERB4
bb_error_msg("offset:%+f > POLLDOWN_OFFSET", q
->filter_offset
);
1971 adjust_poll(-POLLADJ_LIMIT
* 3);
1977 /* No peer selected.
1978 * If poll interval is small, increase it.
1980 if (G
.poll_exp
< BIGPOLL
)
1981 goto increase_interval
;
1985 /* Adjust the poll interval by comparing the current offset
1986 * with the clock jitter. If the offset is less than
1987 * the clock jitter times a constant, then the averaging interval
1988 * is increased, otherwise it is decreased. A bit of hysteresis
1989 * helps calm the dance. Works best using burst mode.
1991 if (rc
> 0 && G
.offset_to_jitter_ratio
<= POLLADJ_GATE
) {
1992 /* was += G.poll_exp but it is a bit
1993 * too optimistic for my taste at high poll_exp's */
1995 adjust_poll(MINPOLL
);
1998 bb_error_msg("want smaller interval: offset/jitter = %u",
1999 G
.offset_to_jitter_ratio
);
2000 adjust_poll(-G
.poll_exp
* 2);
2004 /* Decide when to send new query for this peer */
2005 pick_normal_interval
:
2006 interval
= poll_interval(INT_MAX
);
2007 if (fabs(offset
) >= BIGOFF
&& interval
> BIGOFF_INTERVAL
) {
2008 /* If we are synced, offsets are less than SLEW_THRESHOLD,
2009 * or at the very least not much larger than it.
2010 * Now we see a largish one.
2011 * Either this peer is feeling bad, or packet got corrupted,
2012 * or _our_ clock is wrong now and _all_ peers will show similar
2013 * largish offsets too.
2014 * I observed this with laptop suspend stopping clock.
2015 * In any case, it makes sense to make next request soonish:
2016 * cases 1 and 2: get a better datapoint,
2017 * case 3: allows to resync faster.
2019 interval
= BIGOFF_INTERVAL
;
2022 set_next(p
, interval
);
2025 #if ENABLE_FEATURE_NTPD_SERVER
2026 static NOINLINE
void
2027 recv_and_process_client_pkt(void /*int fd*/)
2031 len_and_sockaddr
*to
;
2032 struct sockaddr
*from
;
2034 uint8_t query_status
;
2035 l_fixedpt_t query_xmttime
;
2037 to
= get_sock_lsa(G_listen_fd
);
2038 from
= xzalloc(to
->len
);
2040 size
= recv_from_to(G_listen_fd
, &msg
, sizeof(msg
), MSG_DONTWAIT
, from
, &to
->u
.sa
, to
->len
);
2041 if (size
!= NTP_MSGSIZE_NOAUTH
&& size
!= NTP_MSGSIZE
) {
2044 if (errno
== EAGAIN
)
2046 bb_perror_msg_and_die("recv");
2048 addr
= xmalloc_sockaddr2dotted_noport(from
);
2049 bb_error_msg("malformed packet received from %s: size %u", addr
, (int)size
);
2054 query_status
= msg
.m_status
;
2055 query_xmttime
= msg
.m_xmttime
;
2057 /* Build a reply packet */
2058 memset(&msg
, 0, sizeof(msg
));
2059 msg
.m_status
= G
.stratum
< MAXSTRAT
? (G
.ntp_status
& LI_MASK
) : LI_ALARM
;
2060 msg
.m_status
|= (query_status
& VERSION_MASK
);
2061 msg
.m_status
|= ((query_status
& MODE_MASK
) == MODE_CLIENT
) ?
2062 MODE_SERVER
: MODE_SYM_PAS
;
2063 msg
.m_stratum
= G
.stratum
;
2064 msg
.m_ppoll
= G
.poll_exp
;
2065 msg
.m_precision_exp
= G_precision_exp
;
2066 /* this time was obtained between poll() and recv() */
2067 msg
.m_rectime
= d_to_lfp(G
.cur_time
);
2068 msg
.m_xmttime
= d_to_lfp(gettime1900d()); /* this instant */
2069 if (G
.peer_cnt
== 0) {
2070 /* we have no peers: "stratum 1 server" mode. reftime = our own time */
2071 G
.reftime
= G
.cur_time
;
2073 msg
.m_reftime
= d_to_lfp(G
.reftime
);
2074 msg
.m_orgtime
= query_xmttime
;
2075 msg
.m_rootdelay
= d_to_sfp(G
.rootdelay
);
2076 //simple code does not do this, fix simple code!
2077 msg
.m_rootdisp
= d_to_sfp(G
.rootdisp
);
2078 //version = (query_status & VERSION_MASK); /* ... >> VERSION_SHIFT - done below instead */
2079 msg
.m_refid
= G
.refid
; // (version > (3 << VERSION_SHIFT)) ? G.refid : G.refid3;
2081 /* We reply from the local address packet was sent to,
2082 * this makes to/from look swapped here: */
2083 do_sendto(G_listen_fd
,
2084 /*from:*/ &to
->u
.sa
, /*to:*/ from
, /*addrlen:*/ to
->len
,
2093 /* Upstream ntpd's options:
2095 * -4 Force DNS resolution of host names to the IPv4 namespace.
2096 * -6 Force DNS resolution of host names to the IPv6 namespace.
2097 * -a Require cryptographic authentication for broadcast client,
2098 * multicast client and symmetric passive associations.
2099 * This is the default.
2100 * -A Do not require cryptographic authentication for broadcast client,
2101 * multicast client and symmetric passive associations.
2102 * This is almost never a good idea.
2103 * -b Enable the client to synchronize to broadcast servers.
2105 * Specify the name and path of the configuration file,
2106 * default /etc/ntp.conf
2107 * -d Specify debugging mode. This option may occur more than once,
2108 * with each occurrence indicating greater detail of display.
2110 * Specify debugging level directly.
2112 * Specify the name and path of the frequency file.
2113 * This is the same operation as the "driftfile FILE"
2114 * configuration command.
2115 * -g Normally, ntpd exits with a message to the system log
2116 * if the offset exceeds the panic threshold, which is 1000 s
2117 * by default. This option allows the time to be set to any value
2118 * without restriction; however, this can happen only once.
2119 * If the threshold is exceeded after that, ntpd will exit
2120 * with a message to the system log. This option can be used
2121 * with the -q and -x options. See the tinker command for other options.
2123 * Chroot the server to the directory jaildir. This option also implies
2124 * that the server attempts to drop root privileges at startup
2125 * (otherwise, chroot gives very little additional security).
2126 * You may need to also specify a -u option.
2128 * Specify the name and path of the symmetric key file,
2129 * default /etc/ntp/keys. This is the same operation
2130 * as the "keys FILE" configuration command.
2132 * Specify the name and path of the log file. The default
2133 * is the system log file. This is the same operation as
2134 * the "logfile FILE" configuration command.
2135 * -L Do not listen to virtual IPs. The default is to listen.
2137 * -N To the extent permitted by the operating system,
2138 * run the ntpd at the highest priority.
2140 * Specify the name and path of the file used to record the ntpd
2141 * process ID. This is the same operation as the "pidfile FILE"
2142 * configuration command.
2144 * To the extent permitted by the operating system,
2145 * run the ntpd at the specified priority.
2146 * -q Exit the ntpd just after the first time the clock is set.
2147 * This behavior mimics that of the ntpdate program, which is
2148 * to be retired. The -g and -x options can be used with this option.
2149 * Note: The kernel time discipline is disabled with this option.
2151 * Specify the default propagation delay from the broadcast/multicast
2152 * server to this client. This is necessary only if the delay
2153 * cannot be computed automatically by the protocol.
2155 * Specify the directory path for files created by the statistics
2156 * facility. This is the same operation as the "statsdir DIR"
2157 * configuration command.
2159 * Add a key number to the trusted key list. This option can occur
2162 * Specify a user, and optionally a group, to switch to.
2165 * Add a system variable listed by default.
2166 * -x Normally, the time is slewed if the offset is less than the step
2167 * threshold, which is 128 ms by default, and stepped if above
2168 * the threshold. This option sets the threshold to 600 s, which is
2169 * well within the accuracy window to set the clock manually.
2170 * Note: since the slew rate of typical Unix kernels is limited
2171 * to 0.5 ms/s, each second of adjustment requires an amortization
2172 * interval of 2000 s. Thus, an adjustment as much as 600 s
2173 * will take almost 14 days to complete. This option can be used
2174 * with the -g and -q options. See the tinker command for other options.
2175 * Note: The kernel time discipline is disabled with this option.
2178 /* By doing init in a separate function we decrease stack usage
2181 static NOINLINE
void ntp_init(char **argv
)
2189 bb_error_msg_and_die(bb_msg_you_must_be_root
);
2191 /* Set some globals */
2192 G
.discipline_jitter
= G_precision_sec
;
2193 G
.stratum
= MAXSTRAT
;
2195 G
.poll_exp
= BURSTPOLL
; /* speeds up initial sync */
2196 G
.last_script_run
= G
.reftime
= G
.last_update_recv_time
= gettime1900d(); /* sets G.cur_time too */
2200 opt_complementary
= "dd:p::wn" /* -d: counter; -p: list; -w implies -n */
2201 IF_FEATURE_NTPD_SERVER(":Il"); /* -I implies -l */
2202 opts
= getopt32(argv
,
2204 "wp:S:"IF_FEATURE_NTPD_SERVER("l") /* NOT compat */
2205 IF_FEATURE_NTPD_SERVER("I:") /* compat */
2207 "46aAbgL", /* compat, ignored */
2208 &peers
,&G
.script_name
,
2209 #if ENABLE_FEATURE_NTPD_SERVER
2214 // if (opts & OPT_x) /* disable stepping, only slew is allowed */
2215 // G.time_was_stepped = 1;
2218 add_peers(llist_pop(&peers
));
2220 #if ENABLE_FEATURE_NTPD_CONF
2225 parser
= config_open("/etc/ntp.conf");
2226 while (config_read(parser
, token
, 3, 1, "# \t", PARSE_NORMAL
)) {
2227 if (strcmp(token
[0], "server") == 0 && token
[1]) {
2228 add_peers(token
[1]);
2231 bb_error_msg("skipping %s:%u: unimplemented command '%s'",
2232 "/etc/ntp.conf", parser
->lineno
, token
[0]
2235 config_close(parser
);
2238 if (G
.peer_cnt
== 0) {
2239 if (!(opts
& OPT_l
))
2241 /* -l but no peers: "stratum 1 server" mode */
2244 #if ENABLE_FEATURE_NTPD_SERVER
2247 G_listen_fd
= create_and_bind_dgram_or_die(NULL
, 123);
2249 if (setsockopt_bindtodevice(G_listen_fd
, G
.if_name
))
2252 socket_want_pktinfo(G_listen_fd
);
2253 setsockopt_int(G_listen_fd
, IPPROTO_IP
, IP_TOS
, IPTOS_LOWDELAY
);
2256 if (!(opts
& OPT_n
)) {
2257 bb_daemonize_or_rexec(DAEMON_DEVNULL_STDIO
, argv
);
2258 logmode
= LOGMODE_NONE
;
2260 /* I hesitate to set -20 prio. -15 should be high enough for timekeeping */
2262 setpriority(PRIO_PROCESS
, 0, -15);
2264 /* If network is up, syncronization occurs in ~10 seconds.
2265 * We give "ntpd -q" 10 seconds to get first reply,
2266 * then another 50 seconds to finish syncing.
2268 * I tested ntpd 4.2.6p1 and apparently it never exits
2269 * (will try forever), but it does not feel right.
2270 * The goal of -q is to act like ntpdate: set time
2271 * after a reasonably small period of polling, or fail.
2274 option_mask32
|= OPT_qq
;
2291 int ntpd_main(int argc UNUSED_PARAM
, char **argv
) MAIN_EXTERNALLY_VISIBLE
;
2292 int ntpd_main(int argc UNUSED_PARAM
, char **argv
)
2300 memset(&G
, 0, sizeof(G
));
2301 SET_PTR_TO_GLOBALS(&G
);
2305 /* If ENABLE_FEATURE_NTPD_SERVER, + 1 for listen_fd: */
2306 cnt
= G
.peer_cnt
+ ENABLE_FEATURE_NTPD_SERVER
;
2307 idx2peer
= xzalloc(sizeof(idx2peer
[0]) * cnt
);
2308 pfd
= xzalloc(sizeof(pfd
[0]) * cnt
);
2310 /* Countdown: we never sync before we sent INITIAL_SAMPLES+1
2311 * packets to each peer.
2312 * NB: if some peer is not responding, we may end up sending
2313 * fewer packets to it and more to other peers.
2314 * NB2: sync usually happens using INITIAL_SAMPLES packets,
2315 * since last reply does not come back instantaneously.
2317 cnt
= G
.peer_cnt
* (INITIAL_SAMPLES
+ 1);
2319 write_pidfile(CONFIG_PID_FILE_PATH
"/ntpd.pid");
2321 while (!bb_got_signal
) {
2327 /* Nothing between here and poll() blocks for any significant time */
2329 nextaction
= G
.cur_time
+ 3600;
2332 #if ENABLE_FEATURE_NTPD_SERVER
2333 if (G_listen_fd
!= -1) {
2334 pfd
[0].fd
= G_listen_fd
;
2335 pfd
[0].events
= POLLIN
;
2339 /* Pass over peer list, send requests, time out on receives */
2340 for (item
= G
.ntp_peers
; item
!= NULL
; item
= item
->link
) {
2341 peer_t
*p
= (peer_t
*) item
->data
;
2343 if (p
->next_action_time
<= G
.cur_time
) {
2344 if (p
->p_fd
== -1) {
2345 /* Time to send new req */
2347 VERB4
bb_error_msg("disabling burst mode");
2348 G
.polladj_count
= 0;
2349 G
.poll_exp
= MINPOLL
;
2351 send_query_to_peer(p
);
2353 /* Timed out waiting for reply */
2356 /* If poll interval is small, increase it */
2357 if (G
.poll_exp
< BIGPOLL
)
2358 adjust_poll(MINPOLL
);
2359 timeout
= poll_interval(NOREPLY_INTERVAL
);
2360 bb_error_msg("timed out waiting for %s, reach 0x%02x, next query in %us",
2361 p
->p_dotted
, p
->reachable_bits
, timeout
);
2363 /* What if don't see it because it changed its IP? */
2364 if (p
->reachable_bits
== 0)
2365 resolve_peer_hostname(p
, /*loop_on_fail=*/ 0);
2367 set_next(p
, timeout
);
2371 if (p
->next_action_time
< nextaction
)
2372 nextaction
= p
->next_action_time
;
2375 /* Wait for reply from this peer */
2376 pfd
[i
].fd
= p
->p_fd
;
2377 pfd
[i
].events
= POLLIN
;
2383 timeout
= nextaction
- G
.cur_time
;
2386 timeout
++; /* (nextaction - G.cur_time) rounds down, compensating */
2388 /* Here we may block */
2390 if (i
> (ENABLE_FEATURE_NTPD_SERVER
&& G_listen_fd
!= -1)) {
2391 /* We wait for at least one reply.
2392 * Poll for it, without wasting time for message.
2393 * Since replies often come under 1 second, this also
2394 * reduces clutter in logs.
2396 nfds
= poll(pfd
, i
, 1000);
2402 bb_error_msg("poll:%us sockets:%u interval:%us", timeout
, i
, 1 << G
.poll_exp
);
2404 nfds
= poll(pfd
, i
, timeout
* 1000);
2406 gettime1900d(); /* sets G.cur_time */
2408 if (!bb_got_signal
/* poll wasn't interrupted by a signal */
2409 && G
.cur_time
- G
.last_script_run
> 11*60
2411 /* Useful for updating battery-backed RTC and such */
2412 run_script("periodic", G
.last_update_offset
);
2413 gettime1900d(); /* sets G.cur_time */
2418 /* Process any received packets */
2420 #if ENABLE_FEATURE_NTPD_SERVER
2421 if (G
.listen_fd
!= -1) {
2422 if (pfd
[0].revents
/* & (POLLIN|POLLERR)*/) {
2424 recv_and_process_client_pkt(/*G.listen_fd*/);
2425 gettime1900d(); /* sets G.cur_time */
2430 for (; nfds
!= 0 && j
< i
; j
++) {
2431 if (pfd
[j
].revents
/* & (POLLIN|POLLERR)*/) {
2433 * At init, alarm was set to 10 sec.
2434 * Now we did get a reply.
2435 * Increase timeout to 50 seconds to finish syncing.
2437 if (option_mask32
& OPT_qq
) {
2438 option_mask32
&= ~OPT_qq
;
2442 recv_and_process_peer_pkt(idx2peer
[j
]);
2443 gettime1900d(); /* sets G.cur_time */
2448 if (G
.ntp_peers
&& G
.stratum
!= MAXSTRAT
) {
2449 for (item
= G
.ntp_peers
; item
!= NULL
; item
= item
->link
) {
2450 peer_t
*p
= (peer_t
*) item
->data
;
2451 if (p
->reachable_bits
)
2452 goto have_reachable_peer
;
2454 /* No peer responded for last 8 packets, panic */
2455 clamp_pollexp_and_set_MAXSTRAT();
2456 run_script("unsync", 0.0);
2457 have_reachable_peer
: ;
2459 } /* while (!bb_got_signal) */
2461 remove_pidfile(CONFIG_PID_FILE_PATH
"/ntpd.pid");
2462 kill_myself_with_sig(bb_got_signal
);
2470 /*** openntpd-4.6 uses only adjtime, not adjtimex ***/
2472 /*** ntp-4.2.6/ntpd/ntp_loopfilter.c - adjtimex usage ***/
2476 direct_freq(double fp_offset
)
2480 * If the kernel is enabled, we need the residual offset to
2481 * calculate the frequency correction.
2483 if (pll_control
&& kern_enable
) {
2484 memset(&ntv
, 0, sizeof(ntv
));
2487 clock_offset
= ntv
.offset
/ 1e9
;
2488 #else /* STA_NANO */
2489 clock_offset
= ntv
.offset
/ 1e6
;
2490 #endif /* STA_NANO */
2491 drift_comp
= FREQTOD(ntv
.freq
);
2493 #endif /* KERNEL_PLL */
2494 set_freq((fp_offset
- clock_offset
) / (current_time
- clock_epoch
) + drift_comp
);
2500 set_freq(double freq
) /* frequency update */
2508 * If the kernel is enabled, update the kernel frequency.
2510 if (pll_control
&& kern_enable
) {
2511 memset(&ntv
, 0, sizeof(ntv
));
2512 ntv
.modes
= MOD_FREQUENCY
;
2513 ntv
.freq
= DTOFREQ(drift_comp
);
2515 snprintf(tbuf
, sizeof(tbuf
), "kernel %.3f PPM", drift_comp
* 1e6
);
2516 report_event(EVNT_FSET
, NULL
, tbuf
);
2518 snprintf(tbuf
, sizeof(tbuf
), "ntpd %.3f PPM", drift_comp
* 1e6
);
2519 report_event(EVNT_FSET
, NULL
, tbuf
);
2521 #else /* KERNEL_PLL */
2522 snprintf(tbuf
, sizeof(tbuf
), "ntpd %.3f PPM", drift_comp
* 1e6
);
2523 report_event(EVNT_FSET
, NULL
, tbuf
);
2524 #endif /* KERNEL_PLL */
2533 * This code segment works when clock adjustments are made using
2534 * precision time kernel support and the ntp_adjtime() system
2535 * call. This support is available in Solaris 2.6 and later,
2536 * Digital Unix 4.0 and later, FreeBSD, Linux and specially
2537 * modified kernels for HP-UX 9 and Ultrix 4. In the case of the
2538 * DECstation 5000/240 and Alpha AXP, additional kernel
2539 * modifications provide a true microsecond clock and nanosecond
2540 * clock, respectively.
2542 * Important note: The kernel discipline is used only if the
2543 * step threshold is less than 0.5 s, as anything higher can
2544 * lead to overflow problems. This might occur if some misguided
2545 * lad set the step threshold to something ridiculous.
2547 if (pll_control
&& kern_enable
) {
2549 #define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | MOD_STATUS | MOD_TIMECONST)
2552 * We initialize the structure for the ntp_adjtime()
2553 * system call. We have to convert everything to
2554 * microseconds or nanoseconds first. Do not update the
2555 * system variables if the ext_enable flag is set. In
2556 * this case, the external clock driver will update the
2557 * variables, which will be read later by the local
2558 * clock driver. Afterwards, remember the time and
2559 * frequency offsets for jitter and stability values and
2560 * to update the frequency file.
2562 memset(&ntv
, 0, sizeof(ntv
));
2564 ntv
.modes
= MOD_STATUS
;
2567 ntv
.modes
= MOD_BITS
| MOD_NANO
;
2568 #else /* STA_NANO */
2569 ntv
.modes
= MOD_BITS
;
2570 #endif /* STA_NANO */
2571 if (clock_offset
< 0)
2576 ntv
.offset
= (int32
)(clock_offset
* 1e9
+ dtemp
);
2577 ntv
.constant
= sys_poll
;
2578 #else /* STA_NANO */
2579 ntv
.offset
= (int32
)(clock_offset
* 1e6
+ dtemp
);
2580 ntv
.constant
= sys_poll
- 4;
2581 #endif /* STA_NANO */
2582 ntv
.esterror
= (u_int32
)(clock_jitter
* 1e6
);
2583 ntv
.maxerror
= (u_int32
)((sys_rootdelay
/ 2 + sys_rootdisp
) * 1e6
);
2584 ntv
.status
= STA_PLL
;
2587 * Enable/disable the PPS if requested.
2590 if (!(pll_status
& STA_PPSTIME
))
2591 report_event(EVNT_KERN
,
2592 NULL
, "PPS enabled");
2593 ntv
.status
|= STA_PPSTIME
| STA_PPSFREQ
;
2595 if (pll_status
& STA_PPSTIME
)
2596 report_event(EVNT_KERN
,
2597 NULL
, "PPS disabled");
2598 ntv
.status
&= ~(STA_PPSTIME
| STA_PPSFREQ
);
2600 if (sys_leap
== LEAP_ADDSECOND
)
2601 ntv
.status
|= STA_INS
;
2602 else if (sys_leap
== LEAP_DELSECOND
)
2603 ntv
.status
|= STA_DEL
;
2607 * Pass the stuff to the kernel. If it squeals, turn off
2608 * the pps. In any case, fetch the kernel offset,
2609 * frequency and jitter.
2611 if (ntp_adjtime(&ntv
) == TIME_ERROR
) {
2612 if (!(ntv
.status
& STA_PPSSIGNAL
))
2613 report_event(EVNT_KERN
, NULL
,
2616 pll_status
= ntv
.status
;
2618 clock_offset
= ntv
.offset
/ 1e9
;
2619 #else /* STA_NANO */
2620 clock_offset
= ntv
.offset
/ 1e6
;
2621 #endif /* STA_NANO */
2622 clock_frequency
= FREQTOD(ntv
.freq
);
2625 * If the kernel PPS is lit, monitor its performance.
2627 if (ntv
.status
& STA_PPSTIME
) {
2629 clock_jitter
= ntv
.jitter
/ 1e9
;
2630 #else /* STA_NANO */
2631 clock_jitter
= ntv
.jitter
/ 1e6
;
2632 #endif /* STA_NANO */
2635 #if defined(STA_NANO) && NTP_API == 4
2637 * If the TAI changes, update the kernel TAI.
2639 if (loop_tai
!= sys_tai
) {
2641 ntv
.modes
= MOD_TAI
;
2642 ntv
.constant
= sys_tai
;
2645 #endif /* STA_NANO */
2647 #endif /* KERNEL_PLL */