bonding: fix a buffer overflow in bonding_show_queue_id.
[linux-2.6/x86.git] / kernel / time / timecompare.c
blobac38fbb176ccd0bb598b1eaaa7f2a703b17ec565
1 /*
2 * Copyright (C) 2009 Intel Corporation.
3 * Author: Patrick Ohly <patrick.ohly@intel.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/timecompare.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/math64.h>
26 * fixed point arithmetic scale factor for skew
28 * Usually one would measure skew in ppb (parts per billion, 1e9), but
29 * using a factor of 2 simplifies the math.
31 #define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30)
33 ktime_t timecompare_transform(struct timecompare *sync,
34 u64 source_tstamp)
36 u64 nsec;
38 nsec = source_tstamp + sync->offset;
39 nsec += (s64)(source_tstamp - sync->last_update) * sync->skew /
40 TIMECOMPARE_SKEW_RESOLUTION;
42 return ns_to_ktime(nsec);
44 EXPORT_SYMBOL_GPL(timecompare_transform);
46 int timecompare_offset(struct timecompare *sync,
47 s64 *offset,
48 u64 *source_tstamp)
50 u64 start_source = 0, end_source = 0;
51 struct {
52 s64 offset;
53 s64 duration_target;
54 } buffer[10], sample, *samples;
55 int counter = 0, i;
56 int used;
57 int index;
58 int num_samples = sync->num_samples;
60 if (num_samples > sizeof(buffer)/sizeof(buffer[0])) {
61 samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
62 if (!samples) {
63 samples = buffer;
64 num_samples = sizeof(buffer)/sizeof(buffer[0]);
66 } else {
67 samples = buffer;
70 /* run until we have enough valid samples, but do not try forever */
71 i = 0;
72 counter = 0;
73 while (1) {
74 u64 ts;
75 ktime_t start, end;
77 start = sync->target();
78 ts = timecounter_read(sync->source);
79 end = sync->target();
81 if (!i)
82 start_source = ts;
84 /* ignore negative durations */
85 sample.duration_target = ktime_to_ns(ktime_sub(end, start));
86 if (sample.duration_target >= 0) {
88 * assume symetric delay to and from source:
89 * average target time corresponds to measured
90 * source time
92 sample.offset =
93 (ktime_to_ns(end) + ktime_to_ns(start)) / 2 -
94 ts;
96 /* simple insertion sort based on duration */
97 index = counter - 1;
98 while (index >= 0) {
99 if (samples[index].duration_target <
100 sample.duration_target)
101 break;
102 samples[index + 1] = samples[index];
103 index--;
105 samples[index + 1] = sample;
106 counter++;
109 i++;
110 if (counter >= num_samples || i >= 100000) {
111 end_source = ts;
112 break;
116 *source_tstamp = (end_source + start_source) / 2;
118 /* remove outliers by only using 75% of the samples */
119 used = counter * 3 / 4;
120 if (!used)
121 used = counter;
122 if (used) {
123 /* calculate average */
124 s64 off = 0;
125 for (index = 0; index < used; index++)
126 off += samples[index].offset;
127 *offset = div_s64(off, used);
130 if (samples && samples != buffer)
131 kfree(samples);
133 return used;
135 EXPORT_SYMBOL_GPL(timecompare_offset);
137 void __timecompare_update(struct timecompare *sync,
138 u64 source_tstamp)
140 s64 offset;
141 u64 average_time;
143 if (!timecompare_offset(sync, &offset, &average_time))
144 return;
146 if (!sync->last_update) {
147 sync->last_update = average_time;
148 sync->offset = offset;
149 sync->skew = 0;
150 } else {
151 s64 delta_nsec = average_time - sync->last_update;
153 /* avoid division by negative or small deltas */
154 if (delta_nsec >= 10000) {
155 s64 delta_offset_nsec = offset - sync->offset;
156 s64 skew; /* delta_offset_nsec *
157 TIMECOMPARE_SKEW_RESOLUTION /
158 delta_nsec */
159 u64 divisor;
161 /* div_s64() is limited to 32 bit divisor */
162 skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
163 divisor = delta_nsec;
164 while (unlikely(divisor >= ((s64)1) << 32)) {
165 /* divide both by 2; beware, right shift
166 of negative value has undefined
167 behavior and can only be used for
168 the positive divisor */
169 skew = div_s64(skew, 2);
170 divisor >>= 1;
172 skew = div_s64(skew, divisor);
175 * Calculate new overall skew as 4/16 the
176 * old value and 12/16 the new one. This is
177 * a rather arbitrary tradeoff between
178 * only using the latest measurement (0/16 and
179 * 16/16) and even more weight on past measurements.
181 #define TIMECOMPARE_NEW_SKEW_PER_16 12
182 sync->skew =
183 div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
184 sync->skew +
185 TIMECOMPARE_NEW_SKEW_PER_16 * skew,
186 16);
187 sync->last_update = average_time;
188 sync->offset = offset;
192 EXPORT_SYMBOL_GPL(__timecompare_update);