kernel - Use different queue iterator for emergency pager
[dragonfly.git] / usr.sbin / ppp / lqr.c
blob0cb0ff851ab3b2ba965313226e8636b5d61134bd
1 /*-
2 * Copyright (c) 1996 - 2001 Brian Somers <brian@Awfulhak.org>
3 * based on work by Toshiharu OHNO <tony-o@iij.ad.jp>
4 * Internet Initiative Japan, Inc (IIJ)
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
28 * $FreeBSD: src/usr.sbin/ppp/lqr.c,v 1.40.2.4 2002/09/01 02:12:28 brian Exp $
29 * $DragonFly: src/usr.sbin/ppp/lqr.c,v 1.3 2004/02/03 07:11:47 dillon Exp $
32 #include <sys/param.h>
34 #ifdef __DragonFly__
35 #include <netinet/in.h>
36 #endif
37 #include <sys/un.h>
39 #include <string.h>
40 #include <termios.h>
42 #include "layer.h"
43 #include "mbuf.h"
44 #include "log.h"
45 #include "defs.h"
46 #include "timer.h"
47 #include "fsm.h"
48 #include "acf.h"
49 #include "proto.h"
50 #include "lqr.h"
51 #include "hdlc.h"
52 #include "lcp.h"
53 #include "async.h"
54 #include "throughput.h"
55 #include "ccp.h"
56 #include "link.h"
57 #include "descriptor.h"
58 #include "physical.h"
59 #include "mp.h"
60 #include "chat.h"
61 #include "auth.h"
62 #include "chap.h"
63 #include "command.h"
64 #include "cbcp.h"
65 #include "datalink.h"
67 struct echolqr {
68 u_int32_t magic;
69 u_int32_t signature;
70 u_int32_t sequence;
73 #define SIGNATURE 0x594e4f54
75 static void
76 SendEchoReq(struct lcp *lcp)
78 struct hdlc *hdlc = &link2physical(lcp->fsm.link)->hdlc;
79 struct echolqr echo;
81 echo.magic = htonl(lcp->want_magic);
82 echo.signature = htonl(SIGNATURE);
83 echo.sequence = htonl(hdlc->lqm.echo.seq_sent);
84 fsm_Output(&lcp->fsm, CODE_ECHOREQ, hdlc->lqm.echo.seq_sent++,
85 (u_char *)&echo, sizeof echo, MB_ECHOOUT);
88 struct mbuf *
89 lqr_RecvEcho(struct fsm *fp, struct mbuf *bp)
91 struct hdlc *hdlc = &link2physical(fp->link)->hdlc;
92 struct lcp *lcp = fsm2lcp(fp);
93 struct echolqr lqr;
95 if (m_length(bp) >= sizeof lqr) {
96 m_freem(mbuf_Read(bp, &lqr, sizeof lqr));
97 bp = NULL;
98 lqr.magic = ntohl(lqr.magic);
99 lqr.signature = ntohl(lqr.signature);
100 lqr.sequence = ntohl(lqr.sequence);
102 /* Tolerate echo replies with either magic number */
103 if (lqr.magic != 0 && lqr.magic != lcp->his_magic &&
104 lqr.magic != lcp->want_magic) {
105 log_Printf(LogWARN, "%s: lqr_RecvEcho: Bad magic: expected 0x%08x,"
106 " got 0x%08x\n", fp->link->name, lcp->his_magic, lqr.magic);
108 * XXX: We should send a terminate request. But poor implementations may
109 * die as a result.
112 if (lqr.signature == SIGNATURE) {
113 /* careful not to update lqm.echo.seq_recv with older values */
114 if ((hdlc->lqm.echo.seq_recv > (u_int32_t)0 - 5 && lqr.sequence < 5) ||
115 (hdlc->lqm.echo.seq_recv <= (u_int32_t)0 - 5 &&
116 lqr.sequence > hdlc->lqm.echo.seq_recv))
117 hdlc->lqm.echo.seq_recv = lqr.sequence;
118 } else
119 log_Printf(LogWARN, "lqr_RecvEcho: Got sig 0x%08lx, not 0x%08lx !\n",
120 (u_long)lqr.signature, (u_long)SIGNATURE);
121 } else
122 log_Printf(LogWARN, "lqr_RecvEcho: Got packet size %zu, expecting %zu !\n",
123 m_length(bp), sizeof(struct echolqr));
124 return bp;
127 void
128 lqr_ChangeOrder(struct lqrdata *src, struct lqrdata *dst)
130 u_int32_t *sp, *dp;
131 unsigned n;
133 sp = (u_int32_t *) src;
134 dp = (u_int32_t *) dst;
135 for (n = 0; n < sizeof(struct lqrdata) / sizeof(u_int32_t); n++, sp++, dp++)
136 *dp = ntohl(*sp);
139 static void
140 SendLqrData(struct lcp *lcp)
142 struct mbuf *bp;
143 int extra;
145 extra = proto_WrapperOctets(lcp, PROTO_LQR) +
146 acf_WrapperOctets(lcp, PROTO_LQR);
147 bp = m_get(sizeof(struct lqrdata) + extra, MB_LQROUT);
148 bp->m_len -= extra;
149 bp->m_offset += extra;
150 link_PushPacket(lcp->fsm.link, bp, lcp->fsm.bundle,
151 LINK_QUEUES(lcp->fsm.link) - 1, PROTO_LQR);
154 static void
155 SendLqrReport(void *v)
157 struct lcp *lcp = (struct lcp *)v;
158 struct physical *p = link2physical(lcp->fsm.link);
160 timer_Stop(&p->hdlc.lqm.timer);
162 if (p->hdlc.lqm.method & LQM_LQR) {
163 if (p->hdlc.lqm.lqr.resent > 5) {
164 /* XXX: Should implement LQM strategy */
165 log_Printf(LogPHASE, "%s: ** Too many LQR packets lost **\n",
166 lcp->fsm.link->name);
167 log_Printf(LogLQM, "%s: Too many LQR packets lost\n",
168 lcp->fsm.link->name);
169 p->hdlc.lqm.method = 0;
170 datalink_Down(p->dl, CLOSE_NORMAL);
171 } else {
172 SendLqrData(lcp);
173 p->hdlc.lqm.lqr.resent++;
175 } else if (p->hdlc.lqm.method & LQM_ECHO) {
176 if ((p->hdlc.lqm.echo.seq_sent > 5 &&
177 p->hdlc.lqm.echo.seq_sent - 5 > p->hdlc.lqm.echo.seq_recv) ||
178 (p->hdlc.lqm.echo.seq_sent <= 5 &&
179 p->hdlc.lqm.echo.seq_sent > p->hdlc.lqm.echo.seq_recv + 5)) {
180 log_Printf(LogPHASE, "%s: ** Too many ECHO LQR packets lost **\n",
181 lcp->fsm.link->name);
182 log_Printf(LogLQM, "%s: Too many ECHO LQR packets lost\n",
183 lcp->fsm.link->name);
184 p->hdlc.lqm.method = 0;
185 datalink_Down(p->dl, CLOSE_NORMAL);
186 } else
187 SendEchoReq(lcp);
189 if (p->hdlc.lqm.method && p->hdlc.lqm.timer.load)
190 timer_Start(&p->hdlc.lqm.timer);
193 struct mbuf *
194 lqr_Input(struct bundle *bundle __unused, struct link *l, struct mbuf *bp)
196 struct physical *p = link2physical(l);
197 struct lcp *lcp = p->hdlc.lqm.owner;
198 int len;
200 if (p == NULL) {
201 log_Printf(LogERROR, "lqr_Input: Not a physical link - dropped\n");
202 m_freem(bp);
203 return NULL;
206 p->hdlc.lqm.lqr.SaveInLQRs++;
208 len = m_length(bp);
209 if (len != sizeof(struct lqrdata))
210 log_Printf(LogWARN, "lqr_Input: Got packet size %d, expecting %ld !\n",
211 len, (long)sizeof(struct lqrdata));
212 else if (!IsAccepted(l->lcp.cfg.lqr) && !(p->hdlc.lqm.method & LQM_LQR)) {
213 bp = m_pullup(proto_Prepend(bp, PROTO_LQR, 0, 0));
214 lcp_SendProtoRej(lcp, MBUF_CTOP(bp), bp->m_len);
215 } else {
216 struct lqrdata *lqr;
217 u_int32_t lastLQR;
219 bp = m_pullup(bp);
220 lqr = (struct lqrdata *)MBUF_CTOP(bp);
221 if (ntohl(lqr->MagicNumber) != lcp->his_magic)
222 log_Printf(LogWARN, "lqr_Input: magic 0x%08lx is wrong,"
223 " expecting 0x%08lx\n",
224 (u_long)ntohl(lqr->MagicNumber), (u_long)lcp->his_magic);
225 else {
227 * Remember our PeerInLQRs, then convert byte order and save
229 lastLQR = p->hdlc.lqm.lqr.peer.PeerInLQRs;
231 lqr_ChangeOrder(lqr, &p->hdlc.lqm.lqr.peer);
232 lqr_Dump(l->name, "Input", &p->hdlc.lqm.lqr.peer);
233 /* we have received an LQR from peer */
234 p->hdlc.lqm.lqr.resent = 0;
237 * Generate an LQR response if we're not running an LQR timer OR
238 * two successive LQR's PeerInLQRs are the same OR we're not going to
239 * send our next one before the peers max timeout.
241 if (p->hdlc.lqm.timer.load == 0 ||
242 !(p->hdlc.lqm.method & LQM_LQR) ||
243 (lastLQR && lastLQR == p->hdlc.lqm.lqr.peer.PeerInLQRs) ||
244 (p->hdlc.lqm.lqr.peer_timeout &&
245 p->hdlc.lqm.timer.rest * 100 / SECTICKS >
246 (unsigned) p->hdlc.lqm.lqr.peer_timeout))
247 SendLqrData(lcp);
250 m_freem(bp);
251 return NULL;
255 * When LCP is reached to opened state, We'll start LQM activity.
258 static void
259 lqr_Setup(struct lcp *lcp)
261 struct physical *physical = link2physical(lcp->fsm.link);
263 physical->hdlc.lqm.lqr.resent = 0;
264 physical->hdlc.lqm.echo.seq_sent = 0;
265 physical->hdlc.lqm.echo.seq_recv = 0;
266 memset(&physical->hdlc.lqm.lqr.peer, '\0',
267 sizeof physical->hdlc.lqm.lqr.peer);
269 physical->hdlc.lqm.method = LQM_ECHO;
270 if (IsEnabled(lcp->cfg.lqr) && !REJECTED(lcp, TY_QUALPROTO))
271 physical->hdlc.lqm.method |= LQM_LQR;
272 timer_Stop(&physical->hdlc.lqm.timer);
274 physical->hdlc.lqm.lqr.peer_timeout = lcp->his_lqrperiod;
275 if (lcp->his_lqrperiod)
276 log_Printf(LogLQM, "%s: Expecting LQR every %d.%02d secs\n",
277 physical->link.name, lcp->his_lqrperiod / 100,
278 lcp->his_lqrperiod % 100);
280 if (lcp->want_lqrperiod) {
281 log_Printf(LogLQM, "%s: Will send %s every %d.%02d secs\n",
282 physical->link.name,
283 physical->hdlc.lqm.method & LQM_LQR ? "LQR" : "ECHO LQR",
284 lcp->want_lqrperiod / 100, lcp->want_lqrperiod % 100);
285 physical->hdlc.lqm.timer.load = lcp->want_lqrperiod * SECTICKS / 100;
286 physical->hdlc.lqm.timer.func = SendLqrReport;
287 physical->hdlc.lqm.timer.name = "lqm";
288 physical->hdlc.lqm.timer.arg = lcp;
289 } else {
290 physical->hdlc.lqm.timer.load = 0;
291 if (!lcp->his_lqrperiod)
292 log_Printf(LogLQM, "%s: LQR/ECHO LQR not negotiated\n",
293 physical->link.name);
297 void
298 lqr_Start(struct lcp *lcp)
300 struct physical *p = link2physical(lcp->fsm.link);
302 lqr_Setup(lcp);
303 if (p->hdlc.lqm.timer.load)
304 SendLqrReport(lcp);
307 void
308 lqr_reStart(struct lcp *lcp)
310 struct physical *p = link2physical(lcp->fsm.link);
312 lqr_Setup(lcp);
313 if (p->hdlc.lqm.timer.load)
314 timer_Start(&p->hdlc.lqm.timer);
317 void
318 lqr_StopTimer(struct physical *physical)
320 timer_Stop(&physical->hdlc.lqm.timer);
323 void
324 lqr_Stop(struct physical *physical, int method)
326 if (method == LQM_LQR)
327 log_Printf(LogLQM, "%s: Stop sending LQR, Use LCP ECHO instead.\n",
328 physical->link.name);
329 if (method == LQM_ECHO)
330 log_Printf(LogLQM, "%s: Stop sending LCP ECHO.\n",
331 physical->link.name);
332 physical->hdlc.lqm.method &= ~method;
333 if (physical->hdlc.lqm.method)
334 SendLqrReport(physical->hdlc.lqm.owner);
335 else
336 timer_Stop(&physical->hdlc.lqm.timer);
339 void
340 lqr_Dump(const char *link, const char *message, const struct lqrdata *lqr)
342 if (log_IsKept(LogLQM)) {
343 log_Printf(LogLQM, "%s: %s:\n", link, message);
344 log_Printf(LogLQM, " Magic: %08x LastOutLQRs: %08x\n",
345 lqr->MagicNumber, lqr->LastOutLQRs);
346 log_Printf(LogLQM, " LastOutPackets: %08x LastOutOctets: %08x\n",
347 lqr->LastOutPackets, lqr->LastOutOctets);
348 log_Printf(LogLQM, " PeerInLQRs: %08x PeerInPackets: %08x\n",
349 lqr->PeerInLQRs, lqr->PeerInPackets);
350 log_Printf(LogLQM, " PeerInDiscards: %08x PeerInErrors: %08x\n",
351 lqr->PeerInDiscards, lqr->PeerInErrors);
352 log_Printf(LogLQM, " PeerInOctets: %08x PeerOutLQRs: %08x\n",
353 lqr->PeerInOctets, lqr->PeerOutLQRs);
354 log_Printf(LogLQM, " PeerOutPackets: %08x PeerOutOctets: %08x\n",
355 lqr->PeerOutPackets, lqr->PeerOutOctets);
359 static struct mbuf *
360 lqr_LayerPush(struct bundle *b __unused, struct link *l, struct mbuf *bp,
361 int pri __unused, u_short *proto)
363 struct physical *p = link2physical(l);
364 int len;
366 if (!p) {
367 /* Oops - can't happen :-] */
368 m_freem(bp);
369 return NULL;
373 * From rfc1989:
375 * All octets which are included in the FCS calculation MUST be counted,
376 * including the packet header, the information field, and any padding.
377 * The FCS octets MUST also be counted, and one flag octet per frame
378 * MUST be counted. All other octets (such as additional flag
379 * sequences, and escape bits or octets) MUST NOT be counted.
381 * As we're stacked before the HDLC layer (otherwise HDLC wouldn't be
382 * able to calculate the FCS), we must not forget about these additional
383 * bytes when we're asynchronous.
385 * We're also expecting to be stacked *before* the proto and acf layers.
386 * If we were after these, it makes alignment more of a pain, and we
387 * don't do LQR without these layers.
390 bp = m_pullup(bp);
391 len = m_length(bp);
393 if (!physical_IsSync(p))
394 p->hdlc.lqm.OutOctets += hdlc_WrapperOctets();
395 p->hdlc.lqm.OutOctets += acf_WrapperOctets(&l->lcp, *proto) +
396 proto_WrapperOctets(&l->lcp, *proto) + len + 1;
397 p->hdlc.lqm.OutPackets++;
399 if (*proto == PROTO_LQR) {
400 /* Overwrite the entire packet (created in SendLqrData()) */
401 struct lqrdata lqr;
403 lqr.MagicNumber = p->link.lcp.want_magic;
404 lqr.LastOutLQRs = p->hdlc.lqm.lqr.peer.PeerOutLQRs;
405 lqr.LastOutPackets = p->hdlc.lqm.lqr.peer.PeerOutPackets;
406 lqr.LastOutOctets = p->hdlc.lqm.lqr.peer.PeerOutOctets;
407 lqr.PeerInLQRs = p->hdlc.lqm.lqr.SaveInLQRs;
408 lqr.PeerInPackets = p->hdlc.lqm.SaveInPackets;
409 lqr.PeerInDiscards = p->hdlc.lqm.SaveInDiscards;
410 lqr.PeerInErrors = p->hdlc.lqm.SaveInErrors;
411 lqr.PeerInOctets = p->hdlc.lqm.SaveInOctets;
412 lqr.PeerOutPackets = p->hdlc.lqm.OutPackets;
413 lqr.PeerOutOctets = p->hdlc.lqm.OutOctets;
414 if (p->hdlc.lqm.lqr.peer.LastOutLQRs == p->hdlc.lqm.lqr.OutLQRs) {
416 * only increment if it's the first time or we've got a reply
417 * from the last one
419 lqr.PeerOutLQRs = ++p->hdlc.lqm.lqr.OutLQRs;
420 lqr_Dump(l->name, "Output", &lqr);
421 } else {
422 lqr.PeerOutLQRs = p->hdlc.lqm.lqr.OutLQRs;
423 lqr_Dump(l->name, "Output (again)", &lqr);
425 lqr_ChangeOrder(&lqr, (struct lqrdata *)MBUF_CTOP(bp));
428 return bp;
431 static struct mbuf *
432 lqr_LayerPull(struct bundle *b __unused, struct link *l __unused,
433 struct mbuf *bp, u_short *proto)
436 * We mark the packet as ours but don't do anything 'till it's dispatched
437 * to lqr_Input()
439 if (*proto == PROTO_LQR)
440 m_settype(bp, MB_LQRIN);
441 return bp;
445 * Statistics for pulled packets are recorded either in hdlc_PullPacket()
446 * or sync_PullPacket()
449 struct layer lqrlayer = { LAYER_LQR, "lqr", lqr_LayerPush, lqr_LayerPull };