Staging: rtl8192e: Add #include <linux/vmalloc.h>
[linux-2.6/mini2440.git] / drivers / isdn / mISDN / layer2.c
blobe17f0044e0b6d2869175c5faed183df60bab074f
1 /*
3 * Author Karsten Keil <kkeil@novell.com>
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/mISDNif.h>
19 #include "core.h"
20 #include "fsm.h"
21 #include "layer2.h"
23 static u_int *debug;
25 static
26 struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
28 static char *strL2State[] =
30 "ST_L2_1",
31 "ST_L2_2",
32 "ST_L2_3",
33 "ST_L2_4",
34 "ST_L2_5",
35 "ST_L2_6",
36 "ST_L2_7",
37 "ST_L2_8",
40 enum {
41 EV_L2_UI,
42 EV_L2_SABME,
43 EV_L2_DISC,
44 EV_L2_DM,
45 EV_L2_UA,
46 EV_L2_FRMR,
47 EV_L2_SUPER,
48 EV_L2_I,
49 EV_L2_DL_DATA,
50 EV_L2_ACK_PULL,
51 EV_L2_DL_UNITDATA,
52 EV_L2_DL_ESTABLISH_REQ,
53 EV_L2_DL_RELEASE_REQ,
54 EV_L2_MDL_ASSIGN,
55 EV_L2_MDL_REMOVE,
56 EV_L2_MDL_ERROR,
57 EV_L1_DEACTIVATE,
58 EV_L2_T200,
59 EV_L2_T203,
60 EV_L2_SET_OWN_BUSY,
61 EV_L2_CLEAR_OWN_BUSY,
62 EV_L2_FRAME_ERROR,
65 #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
67 static char *strL2Event[] =
69 "EV_L2_UI",
70 "EV_L2_SABME",
71 "EV_L2_DISC",
72 "EV_L2_DM",
73 "EV_L2_UA",
74 "EV_L2_FRMR",
75 "EV_L2_SUPER",
76 "EV_L2_I",
77 "EV_L2_DL_DATA",
78 "EV_L2_ACK_PULL",
79 "EV_L2_DL_UNITDATA",
80 "EV_L2_DL_ESTABLISH_REQ",
81 "EV_L2_DL_RELEASE_REQ",
82 "EV_L2_MDL_ASSIGN",
83 "EV_L2_MDL_REMOVE",
84 "EV_L2_MDL_ERROR",
85 "EV_L1_DEACTIVATE",
86 "EV_L2_T200",
87 "EV_L2_T203",
88 "EV_L2_SET_OWN_BUSY",
89 "EV_L2_CLEAR_OWN_BUSY",
90 "EV_L2_FRAME_ERROR",
93 static void
94 l2m_debug(struct FsmInst *fi, char *fmt, ...)
96 struct layer2 *l2 = fi->userdata;
97 va_list va;
99 if (!(*debug & DEBUG_L2_FSM))
100 return;
101 va_start(va, fmt);
102 printk(KERN_DEBUG "l2 (sapi %d tei %d): ", l2->sapi, l2->tei);
103 vprintk(fmt, va);
104 printk("\n");
105 va_end(va);
108 inline u_int
109 l2headersize(struct layer2 *l2, int ui)
111 return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
112 (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
115 inline u_int
116 l2addrsize(struct layer2 *l2)
118 return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
121 static u_int
122 l2_newid(struct layer2 *l2)
124 u_int id;
126 id = l2->next_id++;
127 if (id == 0x7fff)
128 l2->next_id = 1;
129 id <<= 16;
130 id |= l2->tei << 8;
131 id |= l2->sapi;
132 return id;
135 static void
136 l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
138 int err;
140 if (!l2->up)
141 return;
142 mISDN_HEAD_PRIM(skb) = prim;
143 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
144 err = l2->up->send(l2->up, skb);
145 if (err) {
146 printk(KERN_WARNING "%s: err=%d\n", __func__, err);
147 dev_kfree_skb(skb);
151 static void
152 l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
154 struct sk_buff *skb;
155 struct mISDNhead *hh;
156 int err;
158 if (!l2->up)
159 return;
160 skb = mI_alloc_skb(len, GFP_ATOMIC);
161 if (!skb)
162 return;
163 hh = mISDN_HEAD_P(skb);
164 hh->prim = prim;
165 hh->id = (l2->ch.nr << 16) | l2->ch.addr;
166 if (len)
167 memcpy(skb_put(skb, len), arg, len);
168 err = l2->up->send(l2->up, skb);
169 if (err) {
170 printk(KERN_WARNING "%s: err=%d\n", __func__, err);
171 dev_kfree_skb(skb);
175 static int
176 l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
177 int ret;
179 ret = l2->ch.recv(l2->ch.peer, skb);
180 if (ret && (*debug & DEBUG_L2_RECV))
181 printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
182 return ret;
185 static int
186 l2down_raw(struct layer2 *l2, struct sk_buff *skb)
188 struct mISDNhead *hh = mISDN_HEAD_P(skb);
190 if (hh->prim == PH_DATA_REQ) {
191 if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
192 skb_queue_tail(&l2->down_queue, skb);
193 return 0;
195 l2->down_id = mISDN_HEAD_ID(skb);
197 return l2down_skb(l2, skb);
200 static int
201 l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
203 struct mISDNhead *hh = mISDN_HEAD_P(skb);
205 hh->prim = prim;
206 hh->id = id;
207 return l2down_raw(l2, skb);
210 static int
211 l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
213 struct sk_buff *skb;
214 int err;
215 struct mISDNhead *hh;
217 skb = mI_alloc_skb(len, GFP_ATOMIC);
218 if (!skb)
219 return -ENOMEM;
220 hh = mISDN_HEAD_P(skb);
221 hh->prim = prim;
222 hh->id = id;
223 if (len)
224 memcpy(skb_put(skb, len), arg, len);
225 err = l2down_raw(l2, skb);
226 if (err)
227 dev_kfree_skb(skb);
228 return err;
231 static int
232 ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
233 struct sk_buff *nskb = skb;
234 int ret = -EAGAIN;
236 if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
237 if (hh->id == l2->down_id) {
238 nskb = skb_dequeue(&l2->down_queue);
239 if (nskb) {
240 l2->down_id = mISDN_HEAD_ID(nskb);
241 if (l2down_skb(l2, nskb)) {
242 dev_kfree_skb(nskb);
243 l2->down_id = MISDN_ID_NONE;
245 } else
246 l2->down_id = MISDN_ID_NONE;
247 if (ret) {
248 dev_kfree_skb(skb);
249 ret = 0;
251 if (l2->down_id == MISDN_ID_NONE) {
252 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
253 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
257 if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
258 nskb = skb_dequeue(&l2->down_queue);
259 if (nskb) {
260 l2->down_id = mISDN_HEAD_ID(nskb);
261 if (l2down_skb(l2, nskb)) {
262 dev_kfree_skb(nskb);
263 l2->down_id = MISDN_ID_NONE;
264 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
266 } else
267 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
269 return ret;
272 static int
273 l2mgr(struct layer2 *l2, u_int prim, void *arg) {
274 long c = (long)arg;
276 printk(KERN_WARNING
277 "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
278 if (test_bit(FLG_LAPD, &l2->flag) &&
279 !test_bit(FLG_FIXED_TEI, &l2->flag)) {
280 switch (c) {
281 case 'C':
282 case 'D':
283 case 'G':
284 case 'H':
285 l2_tei(l2, prim, (u_long)arg);
286 break;
289 return 0;
292 static void
293 set_peer_busy(struct layer2 *l2) {
294 test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
295 if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
296 test_and_set_bit(FLG_L2BLOCK, &l2->flag);
299 static void
300 clear_peer_busy(struct layer2 *l2) {
301 if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
302 test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
305 static void
306 InitWin(struct layer2 *l2)
308 int i;
310 for (i = 0; i < MAX_WINDOW; i++)
311 l2->windowar[i] = NULL;
314 static int
315 freewin(struct layer2 *l2)
317 int i, cnt = 0;
319 for (i = 0; i < MAX_WINDOW; i++) {
320 if (l2->windowar[i]) {
321 cnt++;
322 dev_kfree_skb(l2->windowar[i]);
323 l2->windowar[i] = NULL;
326 return cnt;
329 static void
330 ReleaseWin(struct layer2 *l2)
332 int cnt = freewin(l2);
334 if (cnt)
335 printk(KERN_WARNING
336 "isdnl2 freed %d skbuffs in release\n", cnt);
339 inline unsigned int
340 cansend(struct layer2 *l2)
342 unsigned int p1;
344 if (test_bit(FLG_MOD128, &l2->flag))
345 p1 = (l2->vs - l2->va) % 128;
346 else
347 p1 = (l2->vs - l2->va) % 8;
348 return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
351 inline void
352 clear_exception(struct layer2 *l2)
354 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
355 test_and_clear_bit(FLG_REJEXC, &l2->flag);
356 test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
357 clear_peer_busy(l2);
360 static int
361 sethdraddr(struct layer2 *l2, u_char *header, int rsp)
363 u_char *ptr = header;
364 int crbit = rsp;
366 if (test_bit(FLG_LAPD, &l2->flag)) {
367 if (test_bit(FLG_LAPD_NET, &l2->flag))
368 crbit = !crbit;
369 *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
370 *ptr++ = (l2->tei << 1) | 1;
371 return 2;
372 } else {
373 if (test_bit(FLG_ORIG, &l2->flag))
374 crbit = !crbit;
375 if (crbit)
376 *ptr++ = l2->addr.B;
377 else
378 *ptr++ = l2->addr.A;
379 return 1;
383 static inline void
384 enqueue_super(struct layer2 *l2, struct sk_buff *skb)
386 if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
387 dev_kfree_skb(skb);
390 static inline void
391 enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
393 if (l2->tm)
394 l2_tei(l2, MDL_STATUS_UI_IND, 0);
395 if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
396 dev_kfree_skb(skb);
399 inline int
400 IsUI(u_char *data)
402 return (data[0] & 0xef) == UI;
405 inline int
406 IsUA(u_char *data)
408 return (data[0] & 0xef) == UA;
411 inline int
412 IsDM(u_char *data)
414 return (data[0] & 0xef) == DM;
417 inline int
418 IsDISC(u_char *data)
420 return (data[0] & 0xef) == DISC;
423 inline int
424 IsRR(u_char *data, struct layer2 *l2)
426 if (test_bit(FLG_MOD128, &l2->flag))
427 return data[0] == RR;
428 else
429 return (data[0] & 0xf) == 1;
432 inline int
433 IsSFrame(u_char *data, struct layer2 *l2)
435 register u_char d = *data;
437 if (!test_bit(FLG_MOD128, &l2->flag))
438 d &= 0xf;
439 return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
442 inline int
443 IsSABME(u_char *data, struct layer2 *l2)
445 u_char d = data[0] & ~0x10;
447 return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
450 inline int
451 IsREJ(u_char *data, struct layer2 *l2)
453 return test_bit(FLG_MOD128, &l2->flag) ?
454 data[0] == REJ : (data[0] & 0xf) == REJ;
457 inline int
458 IsFRMR(u_char *data)
460 return (data[0] & 0xef) == FRMR;
463 inline int
464 IsRNR(u_char *data, struct layer2 *l2)
466 return test_bit(FLG_MOD128, &l2->flag) ?
467 data[0] == RNR : (data[0] & 0xf) == RNR;
470 static int
471 iframe_error(struct layer2 *l2, struct sk_buff *skb)
473 u_int i;
474 int rsp = *skb->data & 0x2;
476 i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
477 if (test_bit(FLG_ORIG, &l2->flag))
478 rsp = !rsp;
479 if (rsp)
480 return 'L';
481 if (skb->len < i)
482 return 'N';
483 if ((skb->len - i) > l2->maxlen)
484 return 'O';
485 return 0;
488 static int
489 super_error(struct layer2 *l2, struct sk_buff *skb)
491 if (skb->len != l2addrsize(l2) +
492 (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
493 return 'N';
494 return 0;
497 static int
498 unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
500 int rsp = (*skb->data & 0x2) >> 1;
501 if (test_bit(FLG_ORIG, &l2->flag))
502 rsp = !rsp;
503 if (rsp != wantrsp)
504 return 'L';
505 if (skb->len != l2addrsize(l2) + 1)
506 return 'N';
507 return 0;
510 static int
511 UI_error(struct layer2 *l2, struct sk_buff *skb)
513 int rsp = *skb->data & 0x2;
514 if (test_bit(FLG_ORIG, &l2->flag))
515 rsp = !rsp;
516 if (rsp)
517 return 'L';
518 if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
519 return 'O';
520 return 0;
523 static int
524 FRMR_error(struct layer2 *l2, struct sk_buff *skb)
526 u_int headers = l2addrsize(l2) + 1;
527 u_char *datap = skb->data + headers;
528 int rsp = *skb->data & 0x2;
530 if (test_bit(FLG_ORIG, &l2->flag))
531 rsp = !rsp;
532 if (!rsp)
533 return 'L';
534 if (test_bit(FLG_MOD128, &l2->flag)) {
535 if (skb->len < headers + 5)
536 return 'N';
537 else if (*debug & DEBUG_L2)
538 l2m_debug(&l2->l2m,
539 "FRMR information %2x %2x %2x %2x %2x",
540 datap[0], datap[1], datap[2], datap[3], datap[4]);
541 } else {
542 if (skb->len < headers + 3)
543 return 'N';
544 else if (*debug & DEBUG_L2)
545 l2m_debug(&l2->l2m,
546 "FRMR information %2x %2x %2x",
547 datap[0], datap[1], datap[2]);
549 return 0;
552 static unsigned int
553 legalnr(struct layer2 *l2, unsigned int nr)
555 if (test_bit(FLG_MOD128, &l2->flag))
556 return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
557 else
558 return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
561 static void
562 setva(struct layer2 *l2, unsigned int nr)
564 struct sk_buff *skb;
566 while (l2->va != nr) {
567 l2->va++;
568 if (test_bit(FLG_MOD128, &l2->flag))
569 l2->va %= 128;
570 else
571 l2->va %= 8;
572 if (l2->windowar[l2->sow]) {
573 skb_trim(l2->windowar[l2->sow], 0);
574 skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
575 l2->windowar[l2->sow] = NULL;
577 l2->sow = (l2->sow + 1) % l2->window;
579 skb = skb_dequeue(&l2->tmp_queue);
580 while (skb) {
581 dev_kfree_skb(skb);
582 skb = skb_dequeue(&l2->tmp_queue);
586 static void
587 send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
589 u_char tmp[MAX_L2HEADER_LEN];
590 int i;
592 i = sethdraddr(l2, tmp, cr);
593 tmp[i++] = cmd;
594 if (skb)
595 skb_trim(skb, 0);
596 else {
597 skb = mI_alloc_skb(i, GFP_ATOMIC);
598 if (!skb) {
599 printk(KERN_WARNING "%s: can't alloc skbuff\n",
600 __func__);
601 return;
604 memcpy(skb_put(skb, i), tmp, i);
605 enqueue_super(l2, skb);
609 inline u_char
610 get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
612 return skb->data[l2addrsize(l2)] & 0x10;
615 inline u_char
616 get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
618 u_char PF;
620 PF = get_PollFlag(l2, skb);
621 dev_kfree_skb(skb);
622 return PF;
625 inline void
626 start_t200(struct layer2 *l2, int i)
628 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
629 test_and_set_bit(FLG_T200_RUN, &l2->flag);
632 inline void
633 restart_t200(struct layer2 *l2, int i)
635 mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
636 test_and_set_bit(FLG_T200_RUN, &l2->flag);
639 inline void
640 stop_t200(struct layer2 *l2, int i)
642 if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
643 mISDN_FsmDelTimer(&l2->t200, i);
646 inline void
647 st5_dl_release_l2l3(struct layer2 *l2)
649 int pr;
651 if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
652 pr = DL_RELEASE_CNF;
653 else
654 pr = DL_RELEASE_IND;
655 l2up_create(l2, pr, 0, NULL);
658 inline void
659 lapb_dl_release_l2l3(struct layer2 *l2, int f)
661 if (test_bit(FLG_LAPB, &l2->flag))
662 l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
663 l2up_create(l2, f, 0, NULL);
666 static void
667 establishlink(struct FsmInst *fi)
669 struct layer2 *l2 = fi->userdata;
670 u_char cmd;
672 clear_exception(l2);
673 l2->rc = 0;
674 cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
675 send_uframe(l2, NULL, cmd, CMD);
676 mISDN_FsmDelTimer(&l2->t203, 1);
677 restart_t200(l2, 1);
678 test_and_clear_bit(FLG_PEND_REL, &l2->flag);
679 freewin(l2);
680 mISDN_FsmChangeState(fi, ST_L2_5);
683 static void
684 l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
686 struct sk_buff *skb = arg;
687 struct layer2 *l2 = fi->userdata;
689 if (get_PollFlagFree(l2, skb))
690 l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
691 else
692 l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
696 static void
697 l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
699 struct sk_buff *skb = arg;
700 struct layer2 *l2 = fi->userdata;
702 if (get_PollFlagFree(l2, skb))
703 l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
704 else {
705 l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
706 establishlink(fi);
707 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
711 static void
712 l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
714 struct sk_buff *skb = arg;
715 struct layer2 *l2 = fi->userdata;
717 if (get_PollFlagFree(l2, skb))
718 l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
719 else
720 l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
721 establishlink(fi);
722 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
725 static void
726 l2_go_st3(struct FsmInst *fi, int event, void *arg)
728 dev_kfree_skb((struct sk_buff *)arg);
729 mISDN_FsmChangeState(fi, ST_L2_3);
732 static void
733 l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
735 struct layer2 *l2 = fi->userdata;
737 mISDN_FsmChangeState(fi, ST_L2_3);
738 dev_kfree_skb((struct sk_buff *)arg);
739 l2_tei(l2, MDL_ASSIGN_IND, 0);
742 static void
743 l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
745 struct layer2 *l2 = fi->userdata;
746 struct sk_buff *skb = arg;
748 skb_queue_tail(&l2->ui_queue, skb);
749 mISDN_FsmChangeState(fi, ST_L2_2);
750 l2_tei(l2, MDL_ASSIGN_IND, 0);
753 static void
754 l2_queue_ui(struct FsmInst *fi, int event, void *arg)
756 struct layer2 *l2 = fi->userdata;
757 struct sk_buff *skb = arg;
759 skb_queue_tail(&l2->ui_queue, skb);
762 static void
763 tx_ui(struct layer2 *l2)
765 struct sk_buff *skb;
766 u_char header[MAX_L2HEADER_LEN];
767 int i;
769 i = sethdraddr(l2, header, CMD);
770 if (test_bit(FLG_LAPD_NET, &l2->flag))
771 header[1] = 0xff; /* tei 127 */
772 header[i++] = UI;
773 while ((skb = skb_dequeue(&l2->ui_queue))) {
774 memcpy(skb_push(skb, i), header, i);
775 enqueue_ui(l2, skb);
779 static void
780 l2_send_ui(struct FsmInst *fi, int event, void *arg)
782 struct layer2 *l2 = fi->userdata;
783 struct sk_buff *skb = arg;
785 skb_queue_tail(&l2->ui_queue, skb);
786 tx_ui(l2);
789 static void
790 l2_got_ui(struct FsmInst *fi, int event, void *arg)
792 struct layer2 *l2 = fi->userdata;
793 struct sk_buff *skb = arg;
795 skb_pull(skb, l2headersize(l2, 1));
797 * in states 1-3 for broadcast
800 if (l2->tm)
801 l2_tei(l2, MDL_STATUS_UI_IND, 0);
802 l2up(l2, DL_UNITDATA_IND, skb);
805 static void
806 l2_establish(struct FsmInst *fi, int event, void *arg)
808 struct sk_buff *skb = arg;
809 struct layer2 *l2 = fi->userdata;
811 establishlink(fi);
812 test_and_set_bit(FLG_L3_INIT, &l2->flag);
813 dev_kfree_skb(skb);
816 static void
817 l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
819 struct sk_buff *skb = arg;
820 struct layer2 *l2 = fi->userdata;
822 skb_queue_purge(&l2->i_queue);
823 test_and_set_bit(FLG_L3_INIT, &l2->flag);
824 test_and_clear_bit(FLG_PEND_REL, &l2->flag);
825 dev_kfree_skb(skb);
828 static void
829 l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
831 struct sk_buff *skb = arg;
832 struct layer2 *l2 = fi->userdata;
834 skb_queue_purge(&l2->i_queue);
835 establishlink(fi);
836 test_and_set_bit(FLG_L3_INIT, &l2->flag);
837 dev_kfree_skb(skb);
840 static void
841 l2_release(struct FsmInst *fi, int event, void *arg)
843 struct layer2 *l2 = fi->userdata;
844 struct sk_buff *skb = arg;
846 skb_trim(skb, 0);
847 l2up(l2, DL_RELEASE_CNF, skb);
850 static void
851 l2_pend_rel(struct FsmInst *fi, int event, void *arg)
853 struct sk_buff *skb = arg;
854 struct layer2 *l2 = fi->userdata;
856 test_and_set_bit(FLG_PEND_REL, &l2->flag);
857 dev_kfree_skb(skb);
860 static void
861 l2_disconnect(struct FsmInst *fi, int event, void *arg)
863 struct layer2 *l2 = fi->userdata;
864 struct sk_buff *skb = arg;
866 skb_queue_purge(&l2->i_queue);
867 freewin(l2);
868 mISDN_FsmChangeState(fi, ST_L2_6);
869 l2->rc = 0;
870 send_uframe(l2, NULL, DISC | 0x10, CMD);
871 mISDN_FsmDelTimer(&l2->t203, 1);
872 restart_t200(l2, 2);
873 if (skb)
874 dev_kfree_skb(skb);
877 static void
878 l2_start_multi(struct FsmInst *fi, int event, void *arg)
880 struct layer2 *l2 = fi->userdata;
881 struct sk_buff *skb = arg;
883 l2->vs = 0;
884 l2->va = 0;
885 l2->vr = 0;
886 l2->sow = 0;
887 clear_exception(l2);
888 send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
889 mISDN_FsmChangeState(fi, ST_L2_7);
890 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
891 skb_trim(skb, 0);
892 l2up(l2, DL_ESTABLISH_IND, skb);
893 if (l2->tm)
894 l2_tei(l2, MDL_STATUS_UP_IND, 0);
897 static void
898 l2_send_UA(struct FsmInst *fi, int event, void *arg)
900 struct layer2 *l2 = fi->userdata;
901 struct sk_buff *skb = arg;
903 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
906 static void
907 l2_send_DM(struct FsmInst *fi, int event, void *arg)
909 struct layer2 *l2 = fi->userdata;
910 struct sk_buff *skb = arg;
912 send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
915 static void
916 l2_restart_multi(struct FsmInst *fi, int event, void *arg)
918 struct layer2 *l2 = fi->userdata;
919 struct sk_buff *skb = arg;
920 int est = 0;
922 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
924 l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
926 if (l2->vs != l2->va) {
927 skb_queue_purge(&l2->i_queue);
928 est = 1;
931 clear_exception(l2);
932 l2->vs = 0;
933 l2->va = 0;
934 l2->vr = 0;
935 l2->sow = 0;
936 mISDN_FsmChangeState(fi, ST_L2_7);
937 stop_t200(l2, 3);
938 mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
940 if (est)
941 l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
942 /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
943 * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
944 * 0, NULL, 0);
946 if (skb_queue_len(&l2->i_queue) && cansend(l2))
947 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
950 static void
951 l2_stop_multi(struct FsmInst *fi, int event, void *arg)
953 struct layer2 *l2 = fi->userdata;
954 struct sk_buff *skb = arg;
956 mISDN_FsmChangeState(fi, ST_L2_4);
957 mISDN_FsmDelTimer(&l2->t203, 3);
958 stop_t200(l2, 4);
960 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
961 skb_queue_purge(&l2->i_queue);
962 freewin(l2);
963 lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
964 if (l2->tm)
965 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
968 static void
969 l2_connected(struct FsmInst *fi, int event, void *arg)
971 struct layer2 *l2 = fi->userdata;
972 struct sk_buff *skb = arg;
973 int pr = -1;
975 if (!get_PollFlag(l2, skb)) {
976 l2_mdl_error_ua(fi, event, arg);
977 return;
979 dev_kfree_skb(skb);
980 if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
981 l2_disconnect(fi, event, NULL);
982 if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
983 pr = DL_ESTABLISH_CNF;
984 } else if (l2->vs != l2->va) {
985 skb_queue_purge(&l2->i_queue);
986 pr = DL_ESTABLISH_IND;
988 stop_t200(l2, 5);
989 l2->vr = 0;
990 l2->vs = 0;
991 l2->va = 0;
992 l2->sow = 0;
993 mISDN_FsmChangeState(fi, ST_L2_7);
994 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
995 if (pr != -1)
996 l2up_create(l2, pr, 0, NULL);
998 if (skb_queue_len(&l2->i_queue) && cansend(l2))
999 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1001 if (l2->tm)
1002 l2_tei(l2, MDL_STATUS_UP_IND, 0);
1005 static void
1006 l2_released(struct FsmInst *fi, int event, void *arg)
1008 struct layer2 *l2 = fi->userdata;
1009 struct sk_buff *skb = arg;
1011 if (!get_PollFlag(l2, skb)) {
1012 l2_mdl_error_ua(fi, event, arg);
1013 return;
1015 dev_kfree_skb(skb);
1016 stop_t200(l2, 6);
1017 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1018 mISDN_FsmChangeState(fi, ST_L2_4);
1019 if (l2->tm)
1020 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1023 static void
1024 l2_reestablish(struct FsmInst *fi, int event, void *arg)
1026 struct layer2 *l2 = fi->userdata;
1027 struct sk_buff *skb = arg;
1029 if (!get_PollFlagFree(l2, skb)) {
1030 establishlink(fi);
1031 test_and_set_bit(FLG_L3_INIT, &l2->flag);
1035 static void
1036 l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
1038 struct layer2 *l2 = fi->userdata;
1039 struct sk_buff *skb = arg;
1041 if (get_PollFlagFree(l2, skb)) {
1042 stop_t200(l2, 7);
1043 if (!test_bit(FLG_L3_INIT, &l2->flag))
1044 skb_queue_purge(&l2->i_queue);
1045 if (test_bit(FLG_LAPB, &l2->flag))
1046 l2down_create(l2, PH_DEACTIVATE_REQ,
1047 l2_newid(l2), 0, NULL);
1048 st5_dl_release_l2l3(l2);
1049 mISDN_FsmChangeState(fi, ST_L2_4);
1050 if (l2->tm)
1051 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1055 static void
1056 l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
1058 struct layer2 *l2 = fi->userdata;
1059 struct sk_buff *skb = arg;
1061 if (get_PollFlagFree(l2, skb)) {
1062 stop_t200(l2, 8);
1063 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1064 mISDN_FsmChangeState(fi, ST_L2_4);
1065 if (l2->tm)
1066 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1070 static void
1071 enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
1073 struct sk_buff *skb;
1074 u_char tmp[MAX_L2HEADER_LEN];
1075 int i;
1077 i = sethdraddr(l2, tmp, cr);
1078 if (test_bit(FLG_MOD128, &l2->flag)) {
1079 tmp[i++] = typ;
1080 tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
1081 } else
1082 tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
1083 skb = mI_alloc_skb(i, GFP_ATOMIC);
1084 if (!skb) {
1085 printk(KERN_WARNING
1086 "isdnl2 can't alloc sbbuff for enquiry_cr\n");
1087 return;
1089 memcpy(skb_put(skb, i), tmp, i);
1090 enqueue_super(l2, skb);
1093 inline void
1094 enquiry_response(struct layer2 *l2)
1096 if (test_bit(FLG_OWN_BUSY, &l2->flag))
1097 enquiry_cr(l2, RNR, RSP, 1);
1098 else
1099 enquiry_cr(l2, RR, RSP, 1);
1100 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1103 inline void
1104 transmit_enquiry(struct layer2 *l2)
1106 if (test_bit(FLG_OWN_BUSY, &l2->flag))
1107 enquiry_cr(l2, RNR, CMD, 1);
1108 else
1109 enquiry_cr(l2, RR, CMD, 1);
1110 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1111 start_t200(l2, 9);
1115 static void
1116 nrerrorrecovery(struct FsmInst *fi)
1118 struct layer2 *l2 = fi->userdata;
1120 l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
1121 establishlink(fi);
1122 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1125 static void
1126 invoke_retransmission(struct layer2 *l2, unsigned int nr)
1128 u_int p1;
1130 if (l2->vs != nr) {
1131 while (l2->vs != nr) {
1132 (l2->vs)--;
1133 if (test_bit(FLG_MOD128, &l2->flag)) {
1134 l2->vs %= 128;
1135 p1 = (l2->vs - l2->va) % 128;
1136 } else {
1137 l2->vs %= 8;
1138 p1 = (l2->vs - l2->va) % 8;
1140 p1 = (p1 + l2->sow) % l2->window;
1141 if (l2->windowar[p1])
1142 skb_queue_head(&l2->i_queue, l2->windowar[p1]);
1143 else
1144 printk(KERN_WARNING
1145 "%s: windowar[%d] is NULL\n",
1146 __func__, p1);
1147 l2->windowar[p1] = NULL;
1149 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
1153 static void
1154 l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
1156 struct layer2 *l2 = fi->userdata;
1157 struct sk_buff *skb = arg;
1158 int PollFlag, rsp, typ = RR;
1159 unsigned int nr;
1161 rsp = *skb->data & 0x2;
1162 if (test_bit(FLG_ORIG, &l2->flag))
1163 rsp = !rsp;
1165 skb_pull(skb, l2addrsize(l2));
1166 if (IsRNR(skb->data, l2)) {
1167 set_peer_busy(l2);
1168 typ = RNR;
1169 } else
1170 clear_peer_busy(l2);
1171 if (IsREJ(skb->data, l2))
1172 typ = REJ;
1174 if (test_bit(FLG_MOD128, &l2->flag)) {
1175 PollFlag = (skb->data[1] & 0x1) == 0x1;
1176 nr = skb->data[1] >> 1;
1177 } else {
1178 PollFlag = (skb->data[0] & 0x10);
1179 nr = (skb->data[0] >> 5) & 0x7;
1181 dev_kfree_skb(skb);
1183 if (PollFlag) {
1184 if (rsp)
1185 l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
1186 else
1187 enquiry_response(l2);
1189 if (legalnr(l2, nr)) {
1190 if (typ == REJ) {
1191 setva(l2, nr);
1192 invoke_retransmission(l2, nr);
1193 stop_t200(l2, 10);
1194 if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
1195 EV_L2_T203, NULL, 6))
1196 l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
1197 } else if ((nr == l2->vs) && (typ == RR)) {
1198 setva(l2, nr);
1199 stop_t200(l2, 11);
1200 mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1201 EV_L2_T203, NULL, 7);
1202 } else if ((l2->va != nr) || (typ == RNR)) {
1203 setva(l2, nr);
1204 if (typ != RR)
1205 mISDN_FsmDelTimer(&l2->t203, 9);
1206 restart_t200(l2, 12);
1208 if (skb_queue_len(&l2->i_queue) && (typ == RR))
1209 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1210 } else
1211 nrerrorrecovery(fi);
1214 static void
1215 l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
1217 struct layer2 *l2 = fi->userdata;
1218 struct sk_buff *skb = arg;
1220 if (!test_bit(FLG_L3_INIT, &l2->flag))
1221 skb_queue_tail(&l2->i_queue, skb);
1222 else
1223 dev_kfree_skb(skb);
1226 static void
1227 l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
1229 struct layer2 *l2 = fi->userdata;
1230 struct sk_buff *skb = arg;
1232 skb_queue_tail(&l2->i_queue, skb);
1233 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1236 static void
1237 l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
1239 struct layer2 *l2 = fi->userdata;
1240 struct sk_buff *skb = arg;
1242 skb_queue_tail(&l2->i_queue, skb);
1245 static void
1246 l2_got_iframe(struct FsmInst *fi, int event, void *arg)
1248 struct layer2 *l2 = fi->userdata;
1249 struct sk_buff *skb = arg;
1250 int PollFlag, i;
1251 u_int ns, nr;
1253 i = l2addrsize(l2);
1254 if (test_bit(FLG_MOD128, &l2->flag)) {
1255 PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
1256 ns = skb->data[i] >> 1;
1257 nr = (skb->data[i + 1] >> 1) & 0x7f;
1258 } else {
1259 PollFlag = (skb->data[i] & 0x10);
1260 ns = (skb->data[i] >> 1) & 0x7;
1261 nr = (skb->data[i] >> 5) & 0x7;
1263 if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
1264 dev_kfree_skb(skb);
1265 if (PollFlag)
1266 enquiry_response(l2);
1267 } else {
1268 if (l2->vr == ns) {
1269 l2->vr++;
1270 if (test_bit(FLG_MOD128, &l2->flag))
1271 l2->vr %= 128;
1272 else
1273 l2->vr %= 8;
1274 test_and_clear_bit(FLG_REJEXC, &l2->flag);
1275 if (PollFlag)
1276 enquiry_response(l2);
1277 else
1278 test_and_set_bit(FLG_ACK_PEND, &l2->flag);
1279 skb_pull(skb, l2headersize(l2, 0));
1280 l2up(l2, DL_DATA_IND, skb);
1281 } else {
1282 /* n(s)!=v(r) */
1283 dev_kfree_skb(skb);
1284 if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
1285 if (PollFlag)
1286 enquiry_response(l2);
1287 } else {
1288 enquiry_cr(l2, REJ, RSP, PollFlag);
1289 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1293 if (legalnr(l2, nr)) {
1294 if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
1295 (fi->state == ST_L2_7)) {
1296 if (nr == l2->vs) {
1297 stop_t200(l2, 13);
1298 mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1299 EV_L2_T203, NULL, 7);
1300 } else if (nr != l2->va)
1301 restart_t200(l2, 14);
1303 setva(l2, nr);
1304 } else {
1305 nrerrorrecovery(fi);
1306 return;
1308 if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
1309 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1310 if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
1311 enquiry_cr(l2, RR, RSP, 0);
1314 static void
1315 l2_got_tei(struct FsmInst *fi, int event, void *arg)
1317 struct layer2 *l2 = fi->userdata;
1318 u_int info;
1320 l2->tei = (signed char)(long)arg;
1321 set_channel_address(&l2->ch, l2->sapi, l2->tei);
1322 info = DL_INFO_L2_CONNECT;
1323 l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
1324 if (fi->state == ST_L2_3) {
1325 establishlink(fi);
1326 test_and_set_bit(FLG_L3_INIT, &l2->flag);
1327 } else
1328 mISDN_FsmChangeState(fi, ST_L2_4);
1329 if (skb_queue_len(&l2->ui_queue))
1330 tx_ui(l2);
1333 static void
1334 l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
1336 struct layer2 *l2 = fi->userdata;
1338 if (test_bit(FLG_LAPD, &l2->flag) &&
1339 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1340 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1341 } else if (l2->rc == l2->N200) {
1342 mISDN_FsmChangeState(fi, ST_L2_4);
1343 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1344 skb_queue_purge(&l2->i_queue);
1345 l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
1346 if (test_bit(FLG_LAPB, &l2->flag))
1347 l2down_create(l2, PH_DEACTIVATE_REQ,
1348 l2_newid(l2), 0, NULL);
1349 st5_dl_release_l2l3(l2);
1350 if (l2->tm)
1351 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1352 } else {
1353 l2->rc++;
1354 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1355 send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
1356 SABME : SABM) | 0x10, CMD);
1360 static void
1361 l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
1363 struct layer2 *l2 = fi->userdata;
1365 if (test_bit(FLG_LAPD, &l2->flag) &&
1366 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1367 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1368 } else if (l2->rc == l2->N200) {
1369 mISDN_FsmChangeState(fi, ST_L2_4);
1370 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1371 l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
1372 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1373 if (l2->tm)
1374 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1375 } else {
1376 l2->rc++;
1377 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
1378 NULL, 9);
1379 send_uframe(l2, NULL, DISC | 0x10, CMD);
1383 static void
1384 l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
1386 struct layer2 *l2 = fi->userdata;
1388 if (test_bit(FLG_LAPD, &l2->flag) &&
1389 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1390 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1391 return;
1393 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1394 l2->rc = 0;
1395 mISDN_FsmChangeState(fi, ST_L2_8);
1396 transmit_enquiry(l2);
1397 l2->rc++;
1400 static void
1401 l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
1403 struct layer2 *l2 = fi->userdata;
1405 if (test_bit(FLG_LAPD, &l2->flag) &&
1406 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1407 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1408 return;
1410 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1411 if (l2->rc == l2->N200) {
1412 l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
1413 establishlink(fi);
1414 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1415 } else {
1416 transmit_enquiry(l2);
1417 l2->rc++;
1421 static void
1422 l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
1424 struct layer2 *l2 = fi->userdata;
1426 if (test_bit(FLG_LAPD, &l2->flag) &&
1427 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1428 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
1429 return;
1431 mISDN_FsmChangeState(fi, ST_L2_8);
1432 transmit_enquiry(l2);
1433 l2->rc = 0;
1436 static void
1437 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1439 struct layer2 *l2 = fi->userdata;
1440 struct sk_buff *skb, *nskb, *oskb;
1441 u_char header[MAX_L2HEADER_LEN];
1442 u_int i, p1;
1444 if (!cansend(l2))
1445 return;
1447 skb = skb_dequeue(&l2->i_queue);
1448 if (!skb)
1449 return;
1451 if (test_bit(FLG_MOD128, &l2->flag))
1452 p1 = (l2->vs - l2->va) % 128;
1453 else
1454 p1 = (l2->vs - l2->va) % 8;
1455 p1 = (p1 + l2->sow) % l2->window;
1456 if (l2->windowar[p1]) {
1457 printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
1458 p1);
1459 dev_kfree_skb(l2->windowar[p1]);
1461 l2->windowar[p1] = skb;
1462 i = sethdraddr(l2, header, CMD);
1463 if (test_bit(FLG_MOD128, &l2->flag)) {
1464 header[i++] = l2->vs << 1;
1465 header[i++] = l2->vr << 1;
1466 l2->vs = (l2->vs + 1) % 128;
1467 } else {
1468 header[i++] = (l2->vr << 5) | (l2->vs << 1);
1469 l2->vs = (l2->vs + 1) % 8;
1472 nskb = skb_clone(skb, GFP_ATOMIC);
1473 p1 = skb_headroom(nskb);
1474 if (p1 >= i)
1475 memcpy(skb_push(nskb, i), header, i);
1476 else {
1477 printk(KERN_WARNING
1478 "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
1479 oskb = nskb;
1480 nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
1481 if (!nskb) {
1482 dev_kfree_skb(oskb);
1483 printk(KERN_WARNING "%s: no skb mem\n", __func__);
1484 return;
1486 memcpy(skb_put(nskb, i), header, i);
1487 memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
1488 dev_kfree_skb(oskb);
1490 l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
1491 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1492 if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
1493 mISDN_FsmDelTimer(&l2->t203, 13);
1494 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
1498 static void
1499 l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
1501 struct layer2 *l2 = fi->userdata;
1502 struct sk_buff *skb = arg;
1503 int PollFlag, rsp, rnr = 0;
1504 unsigned int nr;
1506 rsp = *skb->data & 0x2;
1507 if (test_bit(FLG_ORIG, &l2->flag))
1508 rsp = !rsp;
1510 skb_pull(skb, l2addrsize(l2));
1512 if (IsRNR(skb->data, l2)) {
1513 set_peer_busy(l2);
1514 rnr = 1;
1515 } else
1516 clear_peer_busy(l2);
1518 if (test_bit(FLG_MOD128, &l2->flag)) {
1519 PollFlag = (skb->data[1] & 0x1) == 0x1;
1520 nr = skb->data[1] >> 1;
1521 } else {
1522 PollFlag = (skb->data[0] & 0x10);
1523 nr = (skb->data[0] >> 5) & 0x7;
1525 dev_kfree_skb(skb);
1526 if (rsp && PollFlag) {
1527 if (legalnr(l2, nr)) {
1528 if (rnr) {
1529 restart_t200(l2, 15);
1530 } else {
1531 stop_t200(l2, 16);
1532 mISDN_FsmAddTimer(&l2->t203, l2->T203,
1533 EV_L2_T203, NULL, 5);
1534 setva(l2, nr);
1536 invoke_retransmission(l2, nr);
1537 mISDN_FsmChangeState(fi, ST_L2_7);
1538 if (skb_queue_len(&l2->i_queue) && cansend(l2))
1539 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1540 } else
1541 nrerrorrecovery(fi);
1542 } else {
1543 if (!rsp && PollFlag)
1544 enquiry_response(l2);
1545 if (legalnr(l2, nr))
1546 setva(l2, nr);
1547 else
1548 nrerrorrecovery(fi);
1552 static void
1553 l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
1555 struct layer2 *l2 = fi->userdata;
1556 struct sk_buff *skb = arg;
1558 skb_pull(skb, l2addrsize(l2) + 1);
1560 if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
1561 (IsUA(skb->data) && (fi->state == ST_L2_7))) {
1562 l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
1563 establishlink(fi);
1564 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1566 dev_kfree_skb(skb);
1569 static void
1570 l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
1572 struct layer2 *l2 = fi->userdata;
1574 skb_queue_purge(&l2->ui_queue);
1575 l2->tei = GROUP_TEI;
1576 mISDN_FsmChangeState(fi, ST_L2_1);
1579 static void
1580 l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
1582 struct layer2 *l2 = fi->userdata;
1584 skb_queue_purge(&l2->ui_queue);
1585 l2->tei = GROUP_TEI;
1586 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1587 mISDN_FsmChangeState(fi, ST_L2_1);
1590 static void
1591 l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
1593 struct layer2 *l2 = fi->userdata;
1595 skb_queue_purge(&l2->i_queue);
1596 skb_queue_purge(&l2->ui_queue);
1597 freewin(l2);
1598 l2->tei = GROUP_TEI;
1599 stop_t200(l2, 17);
1600 st5_dl_release_l2l3(l2);
1601 mISDN_FsmChangeState(fi, ST_L2_1);
1604 static void
1605 l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
1607 struct layer2 *l2 = fi->userdata;
1609 skb_queue_purge(&l2->ui_queue);
1610 l2->tei = GROUP_TEI;
1611 stop_t200(l2, 18);
1612 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1613 mISDN_FsmChangeState(fi, ST_L2_1);
1616 static void
1617 l2_tei_remove(struct FsmInst *fi, int event, void *arg)
1619 struct layer2 *l2 = fi->userdata;
1621 skb_queue_purge(&l2->i_queue);
1622 skb_queue_purge(&l2->ui_queue);
1623 freewin(l2);
1624 l2->tei = GROUP_TEI;
1625 stop_t200(l2, 17);
1626 mISDN_FsmDelTimer(&l2->t203, 19);
1627 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1628 /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
1629 * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
1630 * 0, NULL, 0);
1632 mISDN_FsmChangeState(fi, ST_L2_1);
1635 static void
1636 l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg)
1638 struct layer2 *l2 = fi->userdata;
1639 struct sk_buff *skb = arg;
1641 skb_queue_purge(&l2->i_queue);
1642 skb_queue_purge(&l2->ui_queue);
1643 if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1644 l2up(l2, DL_RELEASE_IND, skb);
1645 else
1646 dev_kfree_skb(skb);
1649 static void
1650 l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg)
1652 struct layer2 *l2 = fi->userdata;
1653 struct sk_buff *skb = arg;
1655 skb_queue_purge(&l2->i_queue);
1656 skb_queue_purge(&l2->ui_queue);
1657 freewin(l2);
1658 stop_t200(l2, 19);
1659 st5_dl_release_l2l3(l2);
1660 mISDN_FsmChangeState(fi, ST_L2_4);
1661 if (l2->tm)
1662 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1663 dev_kfree_skb(skb);
1666 static void
1667 l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg)
1669 struct layer2 *l2 = fi->userdata;
1670 struct sk_buff *skb = arg;
1672 skb_queue_purge(&l2->ui_queue);
1673 stop_t200(l2, 20);
1674 l2up(l2, DL_RELEASE_CNF, skb);
1675 mISDN_FsmChangeState(fi, ST_L2_4);
1676 if (l2->tm)
1677 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1680 static void
1681 l2_persistant_da(struct FsmInst *fi, int event, void *arg)
1683 struct layer2 *l2 = fi->userdata;
1684 struct sk_buff *skb = arg;
1686 skb_queue_purge(&l2->i_queue);
1687 skb_queue_purge(&l2->ui_queue);
1688 freewin(l2);
1689 stop_t200(l2, 19);
1690 mISDN_FsmDelTimer(&l2->t203, 19);
1691 l2up(l2, DL_RELEASE_IND, skb);
1692 mISDN_FsmChangeState(fi, ST_L2_4);
1693 if (l2->tm)
1694 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1697 static void
1698 l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
1700 struct layer2 *l2 = fi->userdata;
1701 struct sk_buff *skb = arg;
1703 if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
1704 enquiry_cr(l2, RNR, RSP, 0);
1705 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1707 if (skb)
1708 dev_kfree_skb(skb);
1711 static void
1712 l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
1714 struct layer2 *l2 = fi->userdata;
1715 struct sk_buff *skb = arg;
1717 if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
1718 enquiry_cr(l2, RR, RSP, 0);
1719 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1721 if (skb)
1722 dev_kfree_skb(skb);
1725 static void
1726 l2_frame_error(struct FsmInst *fi, int event, void *arg)
1728 struct layer2 *l2 = fi->userdata;
1730 l2mgr(l2, MDL_ERROR_IND, arg);
1733 static void
1734 l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
1736 struct layer2 *l2 = fi->userdata;
1738 l2mgr(l2, MDL_ERROR_IND, arg);
1739 establishlink(fi);
1740 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1743 static struct FsmNode L2FnList[] =
1745 {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
1746 {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
1747 {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
1748 {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
1749 {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1750 {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1751 {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
1752 {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
1753 {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1754 {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1755 {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
1756 {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
1757 {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
1758 {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
1759 {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
1760 {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
1761 {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
1762 {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
1763 {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
1764 {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
1765 {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
1766 {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
1767 {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
1768 {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
1769 {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
1770 {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
1771 {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
1772 {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
1773 {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
1774 {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
1775 {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
1776 {ST_L2_4, EV_L2_SABME, l2_start_multi},
1777 {ST_L2_5, EV_L2_SABME, l2_send_UA},
1778 {ST_L2_6, EV_L2_SABME, l2_send_DM},
1779 {ST_L2_7, EV_L2_SABME, l2_restart_multi},
1780 {ST_L2_8, EV_L2_SABME, l2_restart_multi},
1781 {ST_L2_4, EV_L2_DISC, l2_send_DM},
1782 {ST_L2_5, EV_L2_DISC, l2_send_DM},
1783 {ST_L2_6, EV_L2_DISC, l2_send_UA},
1784 {ST_L2_7, EV_L2_DISC, l2_stop_multi},
1785 {ST_L2_8, EV_L2_DISC, l2_stop_multi},
1786 {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
1787 {ST_L2_5, EV_L2_UA, l2_connected},
1788 {ST_L2_6, EV_L2_UA, l2_released},
1789 {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
1790 {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
1791 {ST_L2_4, EV_L2_DM, l2_reestablish},
1792 {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
1793 {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
1794 {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
1795 {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
1796 {ST_L2_1, EV_L2_UI, l2_got_ui},
1797 {ST_L2_2, EV_L2_UI, l2_got_ui},
1798 {ST_L2_3, EV_L2_UI, l2_got_ui},
1799 {ST_L2_4, EV_L2_UI, l2_got_ui},
1800 {ST_L2_5, EV_L2_UI, l2_got_ui},
1801 {ST_L2_6, EV_L2_UI, l2_got_ui},
1802 {ST_L2_7, EV_L2_UI, l2_got_ui},
1803 {ST_L2_8, EV_L2_UI, l2_got_ui},
1804 {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
1805 {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
1806 {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
1807 {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
1808 {ST_L2_7, EV_L2_I, l2_got_iframe},
1809 {ST_L2_8, EV_L2_I, l2_got_iframe},
1810 {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
1811 {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
1812 {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
1813 {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
1814 {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
1815 {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
1816 {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1817 {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1818 {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1819 {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1820 {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
1821 {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
1822 {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
1823 {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1824 {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1825 {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistant_da},
1826 {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
1827 {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
1828 {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistant_da},
1829 {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistant_da},
1830 {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistant_da},
1831 {ST_L2_7, EV_L1_DEACTIVATE, l2_persistant_da},
1832 {ST_L2_8, EV_L1_DEACTIVATE, l2_persistant_da},
1835 static int
1836 ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1838 u_char *datap = skb->data;
1839 int ret = -EINVAL;
1840 int psapi, ptei;
1841 u_int l;
1842 int c = 0;
1844 l = l2addrsize(l2);
1845 if (skb->len <= l) {
1846 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
1847 return ret;
1849 if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
1850 psapi = *datap++;
1851 ptei = *datap++;
1852 if ((psapi & 1) || !(ptei & 1)) {
1853 printk(KERN_WARNING
1854 "l2 D-channel frame wrong EA0/EA1\n");
1855 return ret;
1857 psapi >>= 2;
1858 ptei >>= 1;
1859 if (psapi != l2->sapi) {
1860 /* not our bussiness */
1861 if (*debug & DEBUG_L2)
1862 printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
1863 __func__, psapi, l2->sapi);
1864 dev_kfree_skb(skb);
1865 return 0;
1867 if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
1868 /* not our bussiness */
1869 if (*debug & DEBUG_L2)
1870 printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
1871 __func__, ptei, l2->tei);
1872 dev_kfree_skb(skb);
1873 return 0;
1875 } else
1876 datap += l;
1877 if (!(*datap & 1)) { /* I-Frame */
1878 c = iframe_error(l2, skb);
1879 if (!c)
1880 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
1881 } else if (IsSFrame(datap, l2)) { /* S-Frame */
1882 c = super_error(l2, skb);
1883 if (!c)
1884 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
1885 } else if (IsUI(datap)) {
1886 c = UI_error(l2, skb);
1887 if (!c)
1888 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
1889 } else if (IsSABME(datap, l2)) {
1890 c = unnum_error(l2, skb, CMD);
1891 if (!c)
1892 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
1893 } else if (IsUA(datap)) {
1894 c = unnum_error(l2, skb, RSP);
1895 if (!c)
1896 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
1897 } else if (IsDISC(datap)) {
1898 c = unnum_error(l2, skb, CMD);
1899 if (!c)
1900 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
1901 } else if (IsDM(datap)) {
1902 c = unnum_error(l2, skb, RSP);
1903 if (!c)
1904 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
1905 } else if (IsFRMR(datap)) {
1906 c = FRMR_error(l2, skb);
1907 if (!c)
1908 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
1909 } else
1910 c = 'L';
1911 if (c) {
1912 printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
1913 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
1915 return ret;
1918 static int
1919 l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1921 struct layer2 *l2 = container_of(ch, struct layer2, ch);
1922 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1923 int ret = -EINVAL;
1925 if (*debug & DEBUG_L2_RECV)
1926 printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n",
1927 __func__, hh->prim, hh->id, l2->sapi, l2->tei);
1928 switch (hh->prim) {
1929 case PH_DATA_IND:
1930 ret = ph_data_indication(l2, hh, skb);
1931 break;
1932 case PH_DATA_CNF:
1933 ret = ph_data_confirm(l2, hh, skb);
1934 break;
1935 case PH_ACTIVATE_IND:
1936 test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
1937 l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
1938 if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1939 ret = mISDN_FsmEvent(&l2->l2m,
1940 EV_L2_DL_ESTABLISH_REQ, skb);
1941 break;
1942 case PH_DEACTIVATE_IND:
1943 test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
1944 l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
1945 ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
1946 break;
1947 case MPH_INFORMATION_IND:
1948 if (!l2->up)
1949 break;
1950 ret = l2->up->send(l2->up, skb);
1951 break;
1952 case DL_DATA_REQ:
1953 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
1954 break;
1955 case DL_UNITDATA_REQ:
1956 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
1957 break;
1958 case DL_ESTABLISH_REQ:
1959 if (test_bit(FLG_LAPB, &l2->flag))
1960 test_and_set_bit(FLG_ORIG, &l2->flag);
1961 if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
1962 if (test_bit(FLG_LAPD, &l2->flag) ||
1963 test_bit(FLG_ORIG, &l2->flag))
1964 ret = mISDN_FsmEvent(&l2->l2m,
1965 EV_L2_DL_ESTABLISH_REQ, skb);
1966 } else {
1967 if (test_bit(FLG_LAPD, &l2->flag) ||
1968 test_bit(FLG_ORIG, &l2->flag)) {
1969 test_and_set_bit(FLG_ESTAB_PEND,
1970 &l2->flag);
1972 ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
1973 skb);
1975 break;
1976 case DL_RELEASE_REQ:
1977 if (test_bit(FLG_LAPB, &l2->flag))
1978 l2down_create(l2, PH_DEACTIVATE_REQ,
1979 l2_newid(l2), 0, NULL);
1980 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
1981 skb);
1982 break;
1983 default:
1984 if (*debug & DEBUG_L2)
1985 l2m_debug(&l2->l2m, "l2 unknown pr %04x",
1986 hh->prim);
1988 if (ret) {
1989 dev_kfree_skb(skb);
1990 ret = 0;
1992 return ret;
1996 tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
1998 int ret = -EINVAL;
2000 if (*debug & DEBUG_L2_TEI)
2001 printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
2002 switch (cmd) {
2003 case (MDL_ASSIGN_REQ):
2004 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
2005 break;
2006 case (MDL_REMOVE_REQ):
2007 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
2008 break;
2009 case (MDL_ERROR_IND):
2010 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2011 break;
2012 case (MDL_ERROR_RSP):
2013 /* ETS 300-125 5.3.2.1 Test: TC13010 */
2014 printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
2015 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2016 break;
2018 return ret;
2021 static void
2022 release_l2(struct layer2 *l2)
2024 mISDN_FsmDelTimer(&l2->t200, 21);
2025 mISDN_FsmDelTimer(&l2->t203, 16);
2026 skb_queue_purge(&l2->i_queue);
2027 skb_queue_purge(&l2->ui_queue);
2028 skb_queue_purge(&l2->down_queue);
2029 ReleaseWin(l2);
2030 if (test_bit(FLG_LAPD, &l2->flag)) {
2031 TEIrelease(l2);
2032 if (l2->ch.st)
2033 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
2034 CLOSE_CHANNEL, NULL);
2036 kfree(l2);
2039 static int
2040 l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
2042 struct layer2 *l2 = container_of(ch, struct layer2, ch);
2043 u_int info;
2045 if (*debug & DEBUG_L2_CTRL)
2046 printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
2048 switch (cmd) {
2049 case OPEN_CHANNEL:
2050 if (test_bit(FLG_LAPD, &l2->flag)) {
2051 set_channel_address(&l2->ch, l2->sapi, l2->tei);
2052 info = DL_INFO_L2_CONNECT;
2053 l2up_create(l2, DL_INFORMATION_IND,
2054 sizeof(info), &info);
2056 break;
2057 case CLOSE_CHANNEL:
2058 if (l2->ch.peer)
2059 l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
2060 release_l2(l2);
2061 break;
2063 return 0;
2066 struct layer2 *
2067 create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
2068 int sapi)
2070 struct layer2 *l2;
2071 struct channel_req rq;
2073 l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
2074 if (!l2) {
2075 printk(KERN_ERR "kzalloc layer2 failed\n");
2076 return NULL;
2078 l2->next_id = 1;
2079 l2->down_id = MISDN_ID_NONE;
2080 l2->up = ch;
2081 l2->ch.st = ch->st;
2082 l2->ch.send = l2_send;
2083 l2->ch.ctrl = l2_ctrl;
2084 switch (protocol) {
2085 case ISDN_P_LAPD_NT:
2086 test_and_set_bit(FLG_LAPD, &l2->flag);
2087 test_and_set_bit(FLG_LAPD_NET, &l2->flag);
2088 test_and_set_bit(FLG_MOD128, &l2->flag);
2089 l2->sapi = sapi;
2090 l2->maxlen = MAX_DFRAME_LEN;
2091 if (test_bit(OPTION_L2_PMX, &options))
2092 l2->window = 7;
2093 else
2094 l2->window = 1;
2095 if (test_bit(OPTION_L2_PTP, &options))
2096 test_and_set_bit(FLG_PTP, &l2->flag);
2097 if (test_bit(OPTION_L2_FIXEDTEI, &options))
2098 test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2099 l2->tei = tei;
2100 l2->T200 = 1000;
2101 l2->N200 = 3;
2102 l2->T203 = 10000;
2103 if (test_bit(OPTION_L2_PMX, &options))
2104 rq.protocol = ISDN_P_NT_E1;
2105 else
2106 rq.protocol = ISDN_P_NT_S0;
2107 rq.adr.channel = 0;
2108 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2109 break;
2110 case ISDN_P_LAPD_TE:
2111 test_and_set_bit(FLG_LAPD, &l2->flag);
2112 test_and_set_bit(FLG_MOD128, &l2->flag);
2113 test_and_set_bit(FLG_ORIG, &l2->flag);
2114 l2->sapi = sapi;
2115 l2->maxlen = MAX_DFRAME_LEN;
2116 if (test_bit(OPTION_L2_PMX, &options))
2117 l2->window = 7;
2118 else
2119 l2->window = 1;
2120 if (test_bit(OPTION_L2_PTP, &options))
2121 test_and_set_bit(FLG_PTP, &l2->flag);
2122 if (test_bit(OPTION_L2_FIXEDTEI, &options))
2123 test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2124 l2->tei = tei;
2125 l2->T200 = 1000;
2126 l2->N200 = 3;
2127 l2->T203 = 10000;
2128 if (test_bit(OPTION_L2_PMX, &options))
2129 rq.protocol = ISDN_P_TE_E1;
2130 else
2131 rq.protocol = ISDN_P_TE_S0;
2132 rq.adr.channel = 0;
2133 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2134 break;
2135 case ISDN_P_B_X75SLP:
2136 test_and_set_bit(FLG_LAPB, &l2->flag);
2137 l2->window = 7;
2138 l2->maxlen = MAX_DATA_SIZE;
2139 l2->T200 = 1000;
2140 l2->N200 = 4;
2141 l2->T203 = 5000;
2142 l2->addr.A = 3;
2143 l2->addr.B = 1;
2144 break;
2145 default:
2146 printk(KERN_ERR "layer2 create failed prt %x\n",
2147 protocol);
2148 kfree(l2);
2149 return NULL;
2151 skb_queue_head_init(&l2->i_queue);
2152 skb_queue_head_init(&l2->ui_queue);
2153 skb_queue_head_init(&l2->down_queue);
2154 skb_queue_head_init(&l2->tmp_queue);
2155 InitWin(l2);
2156 l2->l2m.fsm = &l2fsm;
2157 if (test_bit(FLG_LAPB, &l2->flag) ||
2158 test_bit(FLG_PTP, &l2->flag) ||
2159 test_bit(FLG_LAPD_NET, &l2->flag))
2160 l2->l2m.state = ST_L2_4;
2161 else
2162 l2->l2m.state = ST_L2_1;
2163 l2->l2m.debug = *debug;
2164 l2->l2m.userdata = l2;
2165 l2->l2m.userint = 0;
2166 l2->l2m.printdebug = l2m_debug;
2168 mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
2169 mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
2170 return l2;
2173 static int
2174 x75create(struct channel_req *crq)
2176 struct layer2 *l2;
2178 if (crq->protocol != ISDN_P_B_X75SLP)
2179 return -EPROTONOSUPPORT;
2180 l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0);
2181 if (!l2)
2182 return -ENOMEM;
2183 crq->ch = &l2->ch;
2184 crq->protocol = ISDN_P_B_HDLC;
2185 return 0;
2188 static struct Bprotocol X75SLP = {
2189 .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
2190 .name = "X75SLP",
2191 .create = x75create
2195 Isdnl2_Init(u_int *deb)
2197 debug = deb;
2198 mISDN_register_Bprotocol(&X75SLP);
2199 l2fsm.state_count = L2_STATE_COUNT;
2200 l2fsm.event_count = L2_EVENT_COUNT;
2201 l2fsm.strEvent = strL2Event;
2202 l2fsm.strState = strL2State;
2203 mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
2204 TEIInit(deb);
2205 return 0;
2208 void
2209 Isdnl2_cleanup(void)
2211 mISDN_unregister_Bprotocol(&X75SLP);
2212 TEIFree();
2213 mISDN_FsmFree(&l2fsm);