GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / isdn / mISDN / layer2.c
blob0d8976bec8051e5e697bd3b41627bd6526913875
1 /*
3 * Author Karsten Keil <kkeil@novell.com>
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/mISDNif.h>
19 #include <linux/slab.h>
20 #include "core.h"
21 #include "fsm.h"
22 #include "layer2.h"
24 static u_int *debug;
26 static
27 struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
29 static char *strL2State[] =
31 "ST_L2_1",
32 "ST_L2_2",
33 "ST_L2_3",
34 "ST_L2_4",
35 "ST_L2_5",
36 "ST_L2_6",
37 "ST_L2_7",
38 "ST_L2_8",
41 enum {
42 EV_L2_UI,
43 EV_L2_SABME,
44 EV_L2_DISC,
45 EV_L2_DM,
46 EV_L2_UA,
47 EV_L2_FRMR,
48 EV_L2_SUPER,
49 EV_L2_I,
50 EV_L2_DL_DATA,
51 EV_L2_ACK_PULL,
52 EV_L2_DL_UNITDATA,
53 EV_L2_DL_ESTABLISH_REQ,
54 EV_L2_DL_RELEASE_REQ,
55 EV_L2_MDL_ASSIGN,
56 EV_L2_MDL_REMOVE,
57 EV_L2_MDL_ERROR,
58 EV_L1_DEACTIVATE,
59 EV_L2_T200,
60 EV_L2_T203,
61 EV_L2_SET_OWN_BUSY,
62 EV_L2_CLEAR_OWN_BUSY,
63 EV_L2_FRAME_ERROR,
66 #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
68 static char *strL2Event[] =
70 "EV_L2_UI",
71 "EV_L2_SABME",
72 "EV_L2_DISC",
73 "EV_L2_DM",
74 "EV_L2_UA",
75 "EV_L2_FRMR",
76 "EV_L2_SUPER",
77 "EV_L2_I",
78 "EV_L2_DL_DATA",
79 "EV_L2_ACK_PULL",
80 "EV_L2_DL_UNITDATA",
81 "EV_L2_DL_ESTABLISH_REQ",
82 "EV_L2_DL_RELEASE_REQ",
83 "EV_L2_MDL_ASSIGN",
84 "EV_L2_MDL_REMOVE",
85 "EV_L2_MDL_ERROR",
86 "EV_L1_DEACTIVATE",
87 "EV_L2_T200",
88 "EV_L2_T203",
89 "EV_L2_SET_OWN_BUSY",
90 "EV_L2_CLEAR_OWN_BUSY",
91 "EV_L2_FRAME_ERROR",
94 static void
95 l2m_debug(struct FsmInst *fi, char *fmt, ...)
97 struct layer2 *l2 = fi->userdata;
98 va_list va;
100 if (!(*debug & DEBUG_L2_FSM))
101 return;
102 va_start(va, fmt);
103 printk(KERN_DEBUG "l2 (sapi %d tei %d): ", l2->sapi, l2->tei);
104 vprintk(fmt, va);
105 printk("\n");
106 va_end(va);
109 inline u_int
110 l2headersize(struct layer2 *l2, int ui)
112 return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
113 (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
116 inline u_int
117 l2addrsize(struct layer2 *l2)
119 return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
122 static u_int
123 l2_newid(struct layer2 *l2)
125 u_int id;
127 id = l2->next_id++;
128 if (id == 0x7fff)
129 l2->next_id = 1;
130 id <<= 16;
131 id |= l2->tei << 8;
132 id |= l2->sapi;
133 return id;
136 static void
137 l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
139 int err;
141 if (!l2->up)
142 return;
143 mISDN_HEAD_PRIM(skb) = prim;
144 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
145 err = l2->up->send(l2->up, skb);
146 if (err) {
147 printk(KERN_WARNING "%s: err=%d\n", __func__, err);
148 dev_kfree_skb(skb);
152 static void
153 l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
155 struct sk_buff *skb;
156 struct mISDNhead *hh;
157 int err;
159 if (!l2->up)
160 return;
161 skb = mI_alloc_skb(len, GFP_ATOMIC);
162 if (!skb)
163 return;
164 hh = mISDN_HEAD_P(skb);
165 hh->prim = prim;
166 hh->id = (l2->ch.nr << 16) | l2->ch.addr;
167 if (len)
168 memcpy(skb_put(skb, len), arg, len);
169 err = l2->up->send(l2->up, skb);
170 if (err) {
171 printk(KERN_WARNING "%s: err=%d\n", __func__, err);
172 dev_kfree_skb(skb);
176 static int
177 l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
178 int ret;
180 ret = l2->ch.recv(l2->ch.peer, skb);
181 if (ret && (*debug & DEBUG_L2_RECV))
182 printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
183 return ret;
186 static int
187 l2down_raw(struct layer2 *l2, struct sk_buff *skb)
189 struct mISDNhead *hh = mISDN_HEAD_P(skb);
191 if (hh->prim == PH_DATA_REQ) {
192 if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
193 skb_queue_tail(&l2->down_queue, skb);
194 return 0;
196 l2->down_id = mISDN_HEAD_ID(skb);
198 return l2down_skb(l2, skb);
201 static int
202 l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
204 struct mISDNhead *hh = mISDN_HEAD_P(skb);
206 hh->prim = prim;
207 hh->id = id;
208 return l2down_raw(l2, skb);
211 static int
212 l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
214 struct sk_buff *skb;
215 int err;
216 struct mISDNhead *hh;
218 skb = mI_alloc_skb(len, GFP_ATOMIC);
219 if (!skb)
220 return -ENOMEM;
221 hh = mISDN_HEAD_P(skb);
222 hh->prim = prim;
223 hh->id = id;
224 if (len)
225 memcpy(skb_put(skb, len), arg, len);
226 err = l2down_raw(l2, skb);
227 if (err)
228 dev_kfree_skb(skb);
229 return err;
232 static int
233 ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
234 struct sk_buff *nskb = skb;
235 int ret = -EAGAIN;
237 if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
238 if (hh->id == l2->down_id) {
239 nskb = skb_dequeue(&l2->down_queue);
240 if (nskb) {
241 l2->down_id = mISDN_HEAD_ID(nskb);
242 if (l2down_skb(l2, nskb)) {
243 dev_kfree_skb(nskb);
244 l2->down_id = MISDN_ID_NONE;
246 } else
247 l2->down_id = MISDN_ID_NONE;
248 if (ret) {
249 dev_kfree_skb(skb);
250 ret = 0;
252 if (l2->down_id == MISDN_ID_NONE) {
253 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
254 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
258 if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
259 nskb = skb_dequeue(&l2->down_queue);
260 if (nskb) {
261 l2->down_id = mISDN_HEAD_ID(nskb);
262 if (l2down_skb(l2, nskb)) {
263 dev_kfree_skb(nskb);
264 l2->down_id = MISDN_ID_NONE;
265 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
267 } else
268 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
270 return ret;
273 static int
274 l2mgr(struct layer2 *l2, u_int prim, void *arg) {
275 long c = (long)arg;
277 printk(KERN_WARNING
278 "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
279 if (test_bit(FLG_LAPD, &l2->flag) &&
280 !test_bit(FLG_FIXED_TEI, &l2->flag)) {
281 switch (c) {
282 case 'C':
283 case 'D':
284 case 'G':
285 case 'H':
286 l2_tei(l2, prim, (u_long)arg);
287 break;
290 return 0;
293 static void
294 set_peer_busy(struct layer2 *l2) {
295 test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
296 if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
297 test_and_set_bit(FLG_L2BLOCK, &l2->flag);
300 static void
301 clear_peer_busy(struct layer2 *l2) {
302 if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
303 test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
306 static void
307 InitWin(struct layer2 *l2)
309 int i;
311 for (i = 0; i < MAX_WINDOW; i++)
312 l2->windowar[i] = NULL;
315 static int
316 freewin(struct layer2 *l2)
318 int i, cnt = 0;
320 for (i = 0; i < MAX_WINDOW; i++) {
321 if (l2->windowar[i]) {
322 cnt++;
323 dev_kfree_skb(l2->windowar[i]);
324 l2->windowar[i] = NULL;
327 return cnt;
330 static void
331 ReleaseWin(struct layer2 *l2)
333 int cnt = freewin(l2);
335 if (cnt)
336 printk(KERN_WARNING
337 "isdnl2 freed %d skbuffs in release\n", cnt);
340 inline unsigned int
341 cansend(struct layer2 *l2)
343 unsigned int p1;
345 if (test_bit(FLG_MOD128, &l2->flag))
346 p1 = (l2->vs - l2->va) % 128;
347 else
348 p1 = (l2->vs - l2->va) % 8;
349 return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
352 inline void
353 clear_exception(struct layer2 *l2)
355 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
356 test_and_clear_bit(FLG_REJEXC, &l2->flag);
357 test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
358 clear_peer_busy(l2);
361 static int
362 sethdraddr(struct layer2 *l2, u_char *header, int rsp)
364 u_char *ptr = header;
365 int crbit = rsp;
367 if (test_bit(FLG_LAPD, &l2->flag)) {
368 if (test_bit(FLG_LAPD_NET, &l2->flag))
369 crbit = !crbit;
370 *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
371 *ptr++ = (l2->tei << 1) | 1;
372 return 2;
373 } else {
374 if (test_bit(FLG_ORIG, &l2->flag))
375 crbit = !crbit;
376 if (crbit)
377 *ptr++ = l2->addr.B;
378 else
379 *ptr++ = l2->addr.A;
380 return 1;
384 static inline void
385 enqueue_super(struct layer2 *l2, struct sk_buff *skb)
387 if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
388 dev_kfree_skb(skb);
391 static inline void
392 enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
394 if (l2->tm)
395 l2_tei(l2, MDL_STATUS_UI_IND, 0);
396 if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
397 dev_kfree_skb(skb);
400 inline int
401 IsUI(u_char *data)
403 return (data[0] & 0xef) == UI;
406 inline int
407 IsUA(u_char *data)
409 return (data[0] & 0xef) == UA;
412 inline int
413 IsDM(u_char *data)
415 return (data[0] & 0xef) == DM;
418 inline int
419 IsDISC(u_char *data)
421 return (data[0] & 0xef) == DISC;
424 inline int
425 IsRR(u_char *data, struct layer2 *l2)
427 if (test_bit(FLG_MOD128, &l2->flag))
428 return data[0] == RR;
429 else
430 return (data[0] & 0xf) == 1;
433 inline int
434 IsSFrame(u_char *data, struct layer2 *l2)
436 register u_char d = *data;
438 if (!test_bit(FLG_MOD128, &l2->flag))
439 d &= 0xf;
440 return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
443 inline int
444 IsSABME(u_char *data, struct layer2 *l2)
446 u_char d = data[0] & ~0x10;
448 return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
451 inline int
452 IsREJ(u_char *data, struct layer2 *l2)
454 return test_bit(FLG_MOD128, &l2->flag) ?
455 data[0] == REJ : (data[0] & 0xf) == REJ;
458 inline int
459 IsFRMR(u_char *data)
461 return (data[0] & 0xef) == FRMR;
464 inline int
465 IsRNR(u_char *data, struct layer2 *l2)
467 return test_bit(FLG_MOD128, &l2->flag) ?
468 data[0] == RNR : (data[0] & 0xf) == RNR;
471 static int
472 iframe_error(struct layer2 *l2, struct sk_buff *skb)
474 u_int i;
475 int rsp = *skb->data & 0x2;
477 i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
478 if (test_bit(FLG_ORIG, &l2->flag))
479 rsp = !rsp;
480 if (rsp)
481 return 'L';
482 if (skb->len < i)
483 return 'N';
484 if ((skb->len - i) > l2->maxlen)
485 return 'O';
486 return 0;
489 static int
490 super_error(struct layer2 *l2, struct sk_buff *skb)
492 if (skb->len != l2addrsize(l2) +
493 (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
494 return 'N';
495 return 0;
498 static int
499 unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
501 int rsp = (*skb->data & 0x2) >> 1;
502 if (test_bit(FLG_ORIG, &l2->flag))
503 rsp = !rsp;
504 if (rsp != wantrsp)
505 return 'L';
506 if (skb->len != l2addrsize(l2) + 1)
507 return 'N';
508 return 0;
511 static int
512 UI_error(struct layer2 *l2, struct sk_buff *skb)
514 int rsp = *skb->data & 0x2;
515 if (test_bit(FLG_ORIG, &l2->flag))
516 rsp = !rsp;
517 if (rsp)
518 return 'L';
519 if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
520 return 'O';
521 return 0;
524 static int
525 FRMR_error(struct layer2 *l2, struct sk_buff *skb)
527 u_int headers = l2addrsize(l2) + 1;
528 u_char *datap = skb->data + headers;
529 int rsp = *skb->data & 0x2;
531 if (test_bit(FLG_ORIG, &l2->flag))
532 rsp = !rsp;
533 if (!rsp)
534 return 'L';
535 if (test_bit(FLG_MOD128, &l2->flag)) {
536 if (skb->len < headers + 5)
537 return 'N';
538 else if (*debug & DEBUG_L2)
539 l2m_debug(&l2->l2m,
540 "FRMR information %2x %2x %2x %2x %2x",
541 datap[0], datap[1], datap[2], datap[3], datap[4]);
542 } else {
543 if (skb->len < headers + 3)
544 return 'N';
545 else if (*debug & DEBUG_L2)
546 l2m_debug(&l2->l2m,
547 "FRMR information %2x %2x %2x",
548 datap[0], datap[1], datap[2]);
550 return 0;
553 static unsigned int
554 legalnr(struct layer2 *l2, unsigned int nr)
556 if (test_bit(FLG_MOD128, &l2->flag))
557 return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
558 else
559 return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
562 static void
563 setva(struct layer2 *l2, unsigned int nr)
565 struct sk_buff *skb;
567 while (l2->va != nr) {
568 l2->va++;
569 if (test_bit(FLG_MOD128, &l2->flag))
570 l2->va %= 128;
571 else
572 l2->va %= 8;
573 if (l2->windowar[l2->sow]) {
574 skb_trim(l2->windowar[l2->sow], 0);
575 skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
576 l2->windowar[l2->sow] = NULL;
578 l2->sow = (l2->sow + 1) % l2->window;
580 skb = skb_dequeue(&l2->tmp_queue);
581 while (skb) {
582 dev_kfree_skb(skb);
583 skb = skb_dequeue(&l2->tmp_queue);
587 static void
588 send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
590 u_char tmp[MAX_L2HEADER_LEN];
591 int i;
593 i = sethdraddr(l2, tmp, cr);
594 tmp[i++] = cmd;
595 if (skb)
596 skb_trim(skb, 0);
597 else {
598 skb = mI_alloc_skb(i, GFP_ATOMIC);
599 if (!skb) {
600 printk(KERN_WARNING "%s: can't alloc skbuff\n",
601 __func__);
602 return;
605 memcpy(skb_put(skb, i), tmp, i);
606 enqueue_super(l2, skb);
610 inline u_char
611 get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
613 return skb->data[l2addrsize(l2)] & 0x10;
616 inline u_char
617 get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
619 u_char PF;
621 PF = get_PollFlag(l2, skb);
622 dev_kfree_skb(skb);
623 return PF;
626 inline void
627 start_t200(struct layer2 *l2, int i)
629 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
630 test_and_set_bit(FLG_T200_RUN, &l2->flag);
633 inline void
634 restart_t200(struct layer2 *l2, int i)
636 mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
637 test_and_set_bit(FLG_T200_RUN, &l2->flag);
640 inline void
641 stop_t200(struct layer2 *l2, int i)
643 if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
644 mISDN_FsmDelTimer(&l2->t200, i);
647 inline void
648 st5_dl_release_l2l3(struct layer2 *l2)
650 int pr;
652 if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
653 pr = DL_RELEASE_CNF;
654 else
655 pr = DL_RELEASE_IND;
656 l2up_create(l2, pr, 0, NULL);
659 inline void
660 lapb_dl_release_l2l3(struct layer2 *l2, int f)
662 if (test_bit(FLG_LAPB, &l2->flag))
663 l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
664 l2up_create(l2, f, 0, NULL);
667 static void
668 establishlink(struct FsmInst *fi)
670 struct layer2 *l2 = fi->userdata;
671 u_char cmd;
673 clear_exception(l2);
674 l2->rc = 0;
675 cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
676 send_uframe(l2, NULL, cmd, CMD);
677 mISDN_FsmDelTimer(&l2->t203, 1);
678 restart_t200(l2, 1);
679 test_and_clear_bit(FLG_PEND_REL, &l2->flag);
680 freewin(l2);
681 mISDN_FsmChangeState(fi, ST_L2_5);
684 static void
685 l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
687 struct sk_buff *skb = arg;
688 struct layer2 *l2 = fi->userdata;
690 if (get_PollFlagFree(l2, skb))
691 l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
692 else
693 l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
697 static void
698 l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
700 struct sk_buff *skb = arg;
701 struct layer2 *l2 = fi->userdata;
703 if (get_PollFlagFree(l2, skb))
704 l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
705 else {
706 l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
707 establishlink(fi);
708 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
712 static void
713 l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
715 struct sk_buff *skb = arg;
716 struct layer2 *l2 = fi->userdata;
718 if (get_PollFlagFree(l2, skb))
719 l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
720 else
721 l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
722 establishlink(fi);
723 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
726 static void
727 l2_go_st3(struct FsmInst *fi, int event, void *arg)
729 dev_kfree_skb((struct sk_buff *)arg);
730 mISDN_FsmChangeState(fi, ST_L2_3);
733 static void
734 l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
736 struct layer2 *l2 = fi->userdata;
738 mISDN_FsmChangeState(fi, ST_L2_3);
739 dev_kfree_skb((struct sk_buff *)arg);
740 l2_tei(l2, MDL_ASSIGN_IND, 0);
743 static void
744 l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
746 struct layer2 *l2 = fi->userdata;
747 struct sk_buff *skb = arg;
749 skb_queue_tail(&l2->ui_queue, skb);
750 mISDN_FsmChangeState(fi, ST_L2_2);
751 l2_tei(l2, MDL_ASSIGN_IND, 0);
754 static void
755 l2_queue_ui(struct FsmInst *fi, int event, void *arg)
757 struct layer2 *l2 = fi->userdata;
758 struct sk_buff *skb = arg;
760 skb_queue_tail(&l2->ui_queue, skb);
763 static void
764 tx_ui(struct layer2 *l2)
766 struct sk_buff *skb;
767 u_char header[MAX_L2HEADER_LEN];
768 int i;
770 i = sethdraddr(l2, header, CMD);
771 if (test_bit(FLG_LAPD_NET, &l2->flag))
772 header[1] = 0xff; /* tei 127 */
773 header[i++] = UI;
774 while ((skb = skb_dequeue(&l2->ui_queue))) {
775 memcpy(skb_push(skb, i), header, i);
776 enqueue_ui(l2, skb);
780 static void
781 l2_send_ui(struct FsmInst *fi, int event, void *arg)
783 struct layer2 *l2 = fi->userdata;
784 struct sk_buff *skb = arg;
786 skb_queue_tail(&l2->ui_queue, skb);
787 tx_ui(l2);
790 static void
791 l2_got_ui(struct FsmInst *fi, int event, void *arg)
793 struct layer2 *l2 = fi->userdata;
794 struct sk_buff *skb = arg;
796 skb_pull(skb, l2headersize(l2, 1));
798 * in states 1-3 for broadcast
801 if (l2->tm)
802 l2_tei(l2, MDL_STATUS_UI_IND, 0);
803 l2up(l2, DL_UNITDATA_IND, skb);
806 static void
807 l2_establish(struct FsmInst *fi, int event, void *arg)
809 struct sk_buff *skb = arg;
810 struct layer2 *l2 = fi->userdata;
812 establishlink(fi);
813 test_and_set_bit(FLG_L3_INIT, &l2->flag);
814 dev_kfree_skb(skb);
817 static void
818 l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
820 struct sk_buff *skb = arg;
821 struct layer2 *l2 = fi->userdata;
823 skb_queue_purge(&l2->i_queue);
824 test_and_set_bit(FLG_L3_INIT, &l2->flag);
825 test_and_clear_bit(FLG_PEND_REL, &l2->flag);
826 dev_kfree_skb(skb);
829 static void
830 l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
832 struct sk_buff *skb = arg;
833 struct layer2 *l2 = fi->userdata;
835 skb_queue_purge(&l2->i_queue);
836 establishlink(fi);
837 test_and_set_bit(FLG_L3_INIT, &l2->flag);
838 dev_kfree_skb(skb);
841 static void
842 l2_release(struct FsmInst *fi, int event, void *arg)
844 struct layer2 *l2 = fi->userdata;
845 struct sk_buff *skb = arg;
847 skb_trim(skb, 0);
848 l2up(l2, DL_RELEASE_CNF, skb);
851 static void
852 l2_pend_rel(struct FsmInst *fi, int event, void *arg)
854 struct sk_buff *skb = arg;
855 struct layer2 *l2 = fi->userdata;
857 test_and_set_bit(FLG_PEND_REL, &l2->flag);
858 dev_kfree_skb(skb);
861 static void
862 l2_disconnect(struct FsmInst *fi, int event, void *arg)
864 struct layer2 *l2 = fi->userdata;
865 struct sk_buff *skb = arg;
867 skb_queue_purge(&l2->i_queue);
868 freewin(l2);
869 mISDN_FsmChangeState(fi, ST_L2_6);
870 l2->rc = 0;
871 send_uframe(l2, NULL, DISC | 0x10, CMD);
872 mISDN_FsmDelTimer(&l2->t203, 1);
873 restart_t200(l2, 2);
874 if (skb)
875 dev_kfree_skb(skb);
878 static void
879 l2_start_multi(struct FsmInst *fi, int event, void *arg)
881 struct layer2 *l2 = fi->userdata;
882 struct sk_buff *skb = arg;
884 l2->vs = 0;
885 l2->va = 0;
886 l2->vr = 0;
887 l2->sow = 0;
888 clear_exception(l2);
889 send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
890 mISDN_FsmChangeState(fi, ST_L2_7);
891 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
892 skb_trim(skb, 0);
893 l2up(l2, DL_ESTABLISH_IND, skb);
894 if (l2->tm)
895 l2_tei(l2, MDL_STATUS_UP_IND, 0);
898 static void
899 l2_send_UA(struct FsmInst *fi, int event, void *arg)
901 struct layer2 *l2 = fi->userdata;
902 struct sk_buff *skb = arg;
904 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
907 static void
908 l2_send_DM(struct FsmInst *fi, int event, void *arg)
910 struct layer2 *l2 = fi->userdata;
911 struct sk_buff *skb = arg;
913 send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
916 static void
917 l2_restart_multi(struct FsmInst *fi, int event, void *arg)
919 struct layer2 *l2 = fi->userdata;
920 struct sk_buff *skb = arg;
921 int est = 0;
923 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
925 l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
927 if (l2->vs != l2->va) {
928 skb_queue_purge(&l2->i_queue);
929 est = 1;
932 clear_exception(l2);
933 l2->vs = 0;
934 l2->va = 0;
935 l2->vr = 0;
936 l2->sow = 0;
937 mISDN_FsmChangeState(fi, ST_L2_7);
938 stop_t200(l2, 3);
939 mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
941 if (est)
942 l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
943 /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
944 * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
945 * 0, NULL, 0);
947 if (skb_queue_len(&l2->i_queue) && cansend(l2))
948 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
951 static void
952 l2_stop_multi(struct FsmInst *fi, int event, void *arg)
954 struct layer2 *l2 = fi->userdata;
955 struct sk_buff *skb = arg;
957 mISDN_FsmChangeState(fi, ST_L2_4);
958 mISDN_FsmDelTimer(&l2->t203, 3);
959 stop_t200(l2, 4);
961 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
962 skb_queue_purge(&l2->i_queue);
963 freewin(l2);
964 lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
965 if (l2->tm)
966 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
969 static void
970 l2_connected(struct FsmInst *fi, int event, void *arg)
972 struct layer2 *l2 = fi->userdata;
973 struct sk_buff *skb = arg;
974 int pr = -1;
976 if (!get_PollFlag(l2, skb)) {
977 l2_mdl_error_ua(fi, event, arg);
978 return;
980 dev_kfree_skb(skb);
981 if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
982 l2_disconnect(fi, event, NULL);
983 if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
984 pr = DL_ESTABLISH_CNF;
985 } else if (l2->vs != l2->va) {
986 skb_queue_purge(&l2->i_queue);
987 pr = DL_ESTABLISH_IND;
989 stop_t200(l2, 5);
990 l2->vr = 0;
991 l2->vs = 0;
992 l2->va = 0;
993 l2->sow = 0;
994 mISDN_FsmChangeState(fi, ST_L2_7);
995 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
996 if (pr != -1)
997 l2up_create(l2, pr, 0, NULL);
999 if (skb_queue_len(&l2->i_queue) && cansend(l2))
1000 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1002 if (l2->tm)
1003 l2_tei(l2, MDL_STATUS_UP_IND, 0);
1006 static void
1007 l2_released(struct FsmInst *fi, int event, void *arg)
1009 struct layer2 *l2 = fi->userdata;
1010 struct sk_buff *skb = arg;
1012 if (!get_PollFlag(l2, skb)) {
1013 l2_mdl_error_ua(fi, event, arg);
1014 return;
1016 dev_kfree_skb(skb);
1017 stop_t200(l2, 6);
1018 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1019 mISDN_FsmChangeState(fi, ST_L2_4);
1020 if (l2->tm)
1021 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1024 static void
1025 l2_reestablish(struct FsmInst *fi, int event, void *arg)
1027 struct layer2 *l2 = fi->userdata;
1028 struct sk_buff *skb = arg;
1030 if (!get_PollFlagFree(l2, skb)) {
1031 establishlink(fi);
1032 test_and_set_bit(FLG_L3_INIT, &l2->flag);
1036 static void
1037 l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
1039 struct layer2 *l2 = fi->userdata;
1040 struct sk_buff *skb = arg;
1042 if (get_PollFlagFree(l2, skb)) {
1043 stop_t200(l2, 7);
1044 if (!test_bit(FLG_L3_INIT, &l2->flag))
1045 skb_queue_purge(&l2->i_queue);
1046 if (test_bit(FLG_LAPB, &l2->flag))
1047 l2down_create(l2, PH_DEACTIVATE_REQ,
1048 l2_newid(l2), 0, NULL);
1049 st5_dl_release_l2l3(l2);
1050 mISDN_FsmChangeState(fi, ST_L2_4);
1051 if (l2->tm)
1052 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1056 static void
1057 l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
1059 struct layer2 *l2 = fi->userdata;
1060 struct sk_buff *skb = arg;
1062 if (get_PollFlagFree(l2, skb)) {
1063 stop_t200(l2, 8);
1064 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1065 mISDN_FsmChangeState(fi, ST_L2_4);
1066 if (l2->tm)
1067 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1071 static void
1072 enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
1074 struct sk_buff *skb;
1075 u_char tmp[MAX_L2HEADER_LEN];
1076 int i;
1078 i = sethdraddr(l2, tmp, cr);
1079 if (test_bit(FLG_MOD128, &l2->flag)) {
1080 tmp[i++] = typ;
1081 tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
1082 } else
1083 tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
1084 skb = mI_alloc_skb(i, GFP_ATOMIC);
1085 if (!skb) {
1086 printk(KERN_WARNING
1087 "isdnl2 can't alloc sbbuff for enquiry_cr\n");
1088 return;
1090 memcpy(skb_put(skb, i), tmp, i);
1091 enqueue_super(l2, skb);
1094 inline void
1095 enquiry_response(struct layer2 *l2)
1097 if (test_bit(FLG_OWN_BUSY, &l2->flag))
1098 enquiry_cr(l2, RNR, RSP, 1);
1099 else
1100 enquiry_cr(l2, RR, RSP, 1);
1101 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1104 inline void
1105 transmit_enquiry(struct layer2 *l2)
1107 if (test_bit(FLG_OWN_BUSY, &l2->flag))
1108 enquiry_cr(l2, RNR, CMD, 1);
1109 else
1110 enquiry_cr(l2, RR, CMD, 1);
1111 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1112 start_t200(l2, 9);
1116 static void
1117 nrerrorrecovery(struct FsmInst *fi)
1119 struct layer2 *l2 = fi->userdata;
1121 l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
1122 establishlink(fi);
1123 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1126 static void
1127 invoke_retransmission(struct layer2 *l2, unsigned int nr)
1129 u_int p1;
1131 if (l2->vs != nr) {
1132 while (l2->vs != nr) {
1133 (l2->vs)--;
1134 if (test_bit(FLG_MOD128, &l2->flag)) {
1135 l2->vs %= 128;
1136 p1 = (l2->vs - l2->va) % 128;
1137 } else {
1138 l2->vs %= 8;
1139 p1 = (l2->vs - l2->va) % 8;
1141 p1 = (p1 + l2->sow) % l2->window;
1142 if (l2->windowar[p1])
1143 skb_queue_head(&l2->i_queue, l2->windowar[p1]);
1144 else
1145 printk(KERN_WARNING
1146 "%s: windowar[%d] is NULL\n",
1147 __func__, p1);
1148 l2->windowar[p1] = NULL;
1150 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
1154 static void
1155 l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
1157 struct layer2 *l2 = fi->userdata;
1158 struct sk_buff *skb = arg;
1159 int PollFlag, rsp, typ = RR;
1160 unsigned int nr;
1162 rsp = *skb->data & 0x2;
1163 if (test_bit(FLG_ORIG, &l2->flag))
1164 rsp = !rsp;
1166 skb_pull(skb, l2addrsize(l2));
1167 if (IsRNR(skb->data, l2)) {
1168 set_peer_busy(l2);
1169 typ = RNR;
1170 } else
1171 clear_peer_busy(l2);
1172 if (IsREJ(skb->data, l2))
1173 typ = REJ;
1175 if (test_bit(FLG_MOD128, &l2->flag)) {
1176 PollFlag = (skb->data[1] & 0x1) == 0x1;
1177 nr = skb->data[1] >> 1;
1178 } else {
1179 PollFlag = (skb->data[0] & 0x10);
1180 nr = (skb->data[0] >> 5) & 0x7;
1182 dev_kfree_skb(skb);
1184 if (PollFlag) {
1185 if (rsp)
1186 l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
1187 else
1188 enquiry_response(l2);
1190 if (legalnr(l2, nr)) {
1191 if (typ == REJ) {
1192 setva(l2, nr);
1193 invoke_retransmission(l2, nr);
1194 stop_t200(l2, 10);
1195 if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
1196 EV_L2_T203, NULL, 6))
1197 l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
1198 } else if ((nr == l2->vs) && (typ == RR)) {
1199 setva(l2, nr);
1200 stop_t200(l2, 11);
1201 mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1202 EV_L2_T203, NULL, 7);
1203 } else if ((l2->va != nr) || (typ == RNR)) {
1204 setva(l2, nr);
1205 if (typ != RR)
1206 mISDN_FsmDelTimer(&l2->t203, 9);
1207 restart_t200(l2, 12);
1209 if (skb_queue_len(&l2->i_queue) && (typ == RR))
1210 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1211 } else
1212 nrerrorrecovery(fi);
1215 static void
1216 l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
1218 struct layer2 *l2 = fi->userdata;
1219 struct sk_buff *skb = arg;
1221 if (!test_bit(FLG_L3_INIT, &l2->flag))
1222 skb_queue_tail(&l2->i_queue, skb);
1223 else
1224 dev_kfree_skb(skb);
1227 static void
1228 l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
1230 struct layer2 *l2 = fi->userdata;
1231 struct sk_buff *skb = arg;
1233 skb_queue_tail(&l2->i_queue, skb);
1234 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1237 static void
1238 l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
1240 struct layer2 *l2 = fi->userdata;
1241 struct sk_buff *skb = arg;
1243 skb_queue_tail(&l2->i_queue, skb);
1246 static void
1247 l2_got_iframe(struct FsmInst *fi, int event, void *arg)
1249 struct layer2 *l2 = fi->userdata;
1250 struct sk_buff *skb = arg;
1251 int PollFlag, i;
1252 u_int ns, nr;
1254 i = l2addrsize(l2);
1255 if (test_bit(FLG_MOD128, &l2->flag)) {
1256 PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
1257 ns = skb->data[i] >> 1;
1258 nr = (skb->data[i + 1] >> 1) & 0x7f;
1259 } else {
1260 PollFlag = (skb->data[i] & 0x10);
1261 ns = (skb->data[i] >> 1) & 0x7;
1262 nr = (skb->data[i] >> 5) & 0x7;
1264 if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
1265 dev_kfree_skb(skb);
1266 if (PollFlag)
1267 enquiry_response(l2);
1268 } else {
1269 if (l2->vr == ns) {
1270 l2->vr++;
1271 if (test_bit(FLG_MOD128, &l2->flag))
1272 l2->vr %= 128;
1273 else
1274 l2->vr %= 8;
1275 test_and_clear_bit(FLG_REJEXC, &l2->flag);
1276 if (PollFlag)
1277 enquiry_response(l2);
1278 else
1279 test_and_set_bit(FLG_ACK_PEND, &l2->flag);
1280 skb_pull(skb, l2headersize(l2, 0));
1281 l2up(l2, DL_DATA_IND, skb);
1282 } else {
1283 /* n(s)!=v(r) */
1284 dev_kfree_skb(skb);
1285 if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
1286 if (PollFlag)
1287 enquiry_response(l2);
1288 } else {
1289 enquiry_cr(l2, REJ, RSP, PollFlag);
1290 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1294 if (legalnr(l2, nr)) {
1295 if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
1296 (fi->state == ST_L2_7)) {
1297 if (nr == l2->vs) {
1298 stop_t200(l2, 13);
1299 mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1300 EV_L2_T203, NULL, 7);
1301 } else if (nr != l2->va)
1302 restart_t200(l2, 14);
1304 setva(l2, nr);
1305 } else {
1306 nrerrorrecovery(fi);
1307 return;
1309 if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
1310 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1311 if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
1312 enquiry_cr(l2, RR, RSP, 0);
1315 static void
1316 l2_got_tei(struct FsmInst *fi, int event, void *arg)
1318 struct layer2 *l2 = fi->userdata;
1319 u_int info;
1321 l2->tei = (signed char)(long)arg;
1322 set_channel_address(&l2->ch, l2->sapi, l2->tei);
1323 info = DL_INFO_L2_CONNECT;
1324 l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
1325 if (fi->state == ST_L2_3) {
1326 establishlink(fi);
1327 test_and_set_bit(FLG_L3_INIT, &l2->flag);
1328 } else
1329 mISDN_FsmChangeState(fi, ST_L2_4);
1330 if (skb_queue_len(&l2->ui_queue))
1331 tx_ui(l2);
1334 static void
1335 l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
1337 struct layer2 *l2 = fi->userdata;
1339 if (test_bit(FLG_LAPD, &l2->flag) &&
1340 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1341 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1342 } else if (l2->rc == l2->N200) {
1343 mISDN_FsmChangeState(fi, ST_L2_4);
1344 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1345 skb_queue_purge(&l2->i_queue);
1346 l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
1347 if (test_bit(FLG_LAPB, &l2->flag))
1348 l2down_create(l2, PH_DEACTIVATE_REQ,
1349 l2_newid(l2), 0, NULL);
1350 st5_dl_release_l2l3(l2);
1351 if (l2->tm)
1352 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1353 } else {
1354 l2->rc++;
1355 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1356 send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
1357 SABME : SABM) | 0x10, CMD);
1361 static void
1362 l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
1364 struct layer2 *l2 = fi->userdata;
1366 if (test_bit(FLG_LAPD, &l2->flag) &&
1367 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1368 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1369 } else if (l2->rc == l2->N200) {
1370 mISDN_FsmChangeState(fi, ST_L2_4);
1371 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1372 l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
1373 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1374 if (l2->tm)
1375 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1376 } else {
1377 l2->rc++;
1378 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
1379 NULL, 9);
1380 send_uframe(l2, NULL, DISC | 0x10, CMD);
1384 static void
1385 l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
1387 struct layer2 *l2 = fi->userdata;
1389 if (test_bit(FLG_LAPD, &l2->flag) &&
1390 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1391 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1392 return;
1394 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1395 l2->rc = 0;
1396 mISDN_FsmChangeState(fi, ST_L2_8);
1397 transmit_enquiry(l2);
1398 l2->rc++;
1401 static void
1402 l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
1404 struct layer2 *l2 = fi->userdata;
1406 if (test_bit(FLG_LAPD, &l2->flag) &&
1407 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1408 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1409 return;
1411 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1412 if (l2->rc == l2->N200) {
1413 l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
1414 establishlink(fi);
1415 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1416 } else {
1417 transmit_enquiry(l2);
1418 l2->rc++;
1422 static void
1423 l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
1425 struct layer2 *l2 = fi->userdata;
1427 if (test_bit(FLG_LAPD, &l2->flag) &&
1428 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1429 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
1430 return;
1432 mISDN_FsmChangeState(fi, ST_L2_8);
1433 transmit_enquiry(l2);
1434 l2->rc = 0;
1437 static void
1438 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1440 struct layer2 *l2 = fi->userdata;
1441 struct sk_buff *skb, *nskb, *oskb;
1442 u_char header[MAX_L2HEADER_LEN];
1443 u_int i, p1;
1445 if (!cansend(l2))
1446 return;
1448 skb = skb_dequeue(&l2->i_queue);
1449 if (!skb)
1450 return;
1452 if (test_bit(FLG_MOD128, &l2->flag))
1453 p1 = (l2->vs - l2->va) % 128;
1454 else
1455 p1 = (l2->vs - l2->va) % 8;
1456 p1 = (p1 + l2->sow) % l2->window;
1457 if (l2->windowar[p1]) {
1458 printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
1459 p1);
1460 dev_kfree_skb(l2->windowar[p1]);
1462 l2->windowar[p1] = skb;
1463 i = sethdraddr(l2, header, CMD);
1464 if (test_bit(FLG_MOD128, &l2->flag)) {
1465 header[i++] = l2->vs << 1;
1466 header[i++] = l2->vr << 1;
1467 l2->vs = (l2->vs + 1) % 128;
1468 } else {
1469 header[i++] = (l2->vr << 5) | (l2->vs << 1);
1470 l2->vs = (l2->vs + 1) % 8;
1473 nskb = skb_clone(skb, GFP_ATOMIC);
1474 p1 = skb_headroom(nskb);
1475 if (p1 >= i)
1476 memcpy(skb_push(nskb, i), header, i);
1477 else {
1478 printk(KERN_WARNING
1479 "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
1480 oskb = nskb;
1481 nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
1482 if (!nskb) {
1483 dev_kfree_skb(oskb);
1484 printk(KERN_WARNING "%s: no skb mem\n", __func__);
1485 return;
1487 memcpy(skb_put(nskb, i), header, i);
1488 memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
1489 dev_kfree_skb(oskb);
1491 l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
1492 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1493 if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
1494 mISDN_FsmDelTimer(&l2->t203, 13);
1495 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
1499 static void
1500 l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
1502 struct layer2 *l2 = fi->userdata;
1503 struct sk_buff *skb = arg;
1504 int PollFlag, rsp, rnr = 0;
1505 unsigned int nr;
1507 rsp = *skb->data & 0x2;
1508 if (test_bit(FLG_ORIG, &l2->flag))
1509 rsp = !rsp;
1511 skb_pull(skb, l2addrsize(l2));
1513 if (IsRNR(skb->data, l2)) {
1514 set_peer_busy(l2);
1515 rnr = 1;
1516 } else
1517 clear_peer_busy(l2);
1519 if (test_bit(FLG_MOD128, &l2->flag)) {
1520 PollFlag = (skb->data[1] & 0x1) == 0x1;
1521 nr = skb->data[1] >> 1;
1522 } else {
1523 PollFlag = (skb->data[0] & 0x10);
1524 nr = (skb->data[0] >> 5) & 0x7;
1526 dev_kfree_skb(skb);
1527 if (rsp && PollFlag) {
1528 if (legalnr(l2, nr)) {
1529 if (rnr) {
1530 restart_t200(l2, 15);
1531 } else {
1532 stop_t200(l2, 16);
1533 mISDN_FsmAddTimer(&l2->t203, l2->T203,
1534 EV_L2_T203, NULL, 5);
1535 setva(l2, nr);
1537 invoke_retransmission(l2, nr);
1538 mISDN_FsmChangeState(fi, ST_L2_7);
1539 if (skb_queue_len(&l2->i_queue) && cansend(l2))
1540 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1541 } else
1542 nrerrorrecovery(fi);
1543 } else {
1544 if (!rsp && PollFlag)
1545 enquiry_response(l2);
1546 if (legalnr(l2, nr))
1547 setva(l2, nr);
1548 else
1549 nrerrorrecovery(fi);
1553 static void
1554 l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
1556 struct layer2 *l2 = fi->userdata;
1557 struct sk_buff *skb = arg;
1559 skb_pull(skb, l2addrsize(l2) + 1);
1561 if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
1562 (IsUA(skb->data) && (fi->state == ST_L2_7))) {
1563 l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
1564 establishlink(fi);
1565 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1567 dev_kfree_skb(skb);
1570 static void
1571 l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
1573 struct layer2 *l2 = fi->userdata;
1575 skb_queue_purge(&l2->ui_queue);
1576 l2->tei = GROUP_TEI;
1577 mISDN_FsmChangeState(fi, ST_L2_1);
1580 static void
1581 l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
1583 struct layer2 *l2 = fi->userdata;
1585 skb_queue_purge(&l2->ui_queue);
1586 l2->tei = GROUP_TEI;
1587 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1588 mISDN_FsmChangeState(fi, ST_L2_1);
1591 static void
1592 l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
1594 struct layer2 *l2 = fi->userdata;
1596 skb_queue_purge(&l2->i_queue);
1597 skb_queue_purge(&l2->ui_queue);
1598 freewin(l2);
1599 l2->tei = GROUP_TEI;
1600 stop_t200(l2, 17);
1601 st5_dl_release_l2l3(l2);
1602 mISDN_FsmChangeState(fi, ST_L2_1);
1605 static void
1606 l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
1608 struct layer2 *l2 = fi->userdata;
1610 skb_queue_purge(&l2->ui_queue);
1611 l2->tei = GROUP_TEI;
1612 stop_t200(l2, 18);
1613 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1614 mISDN_FsmChangeState(fi, ST_L2_1);
1617 static void
1618 l2_tei_remove(struct FsmInst *fi, int event, void *arg)
1620 struct layer2 *l2 = fi->userdata;
1622 skb_queue_purge(&l2->i_queue);
1623 skb_queue_purge(&l2->ui_queue);
1624 freewin(l2);
1625 l2->tei = GROUP_TEI;
1626 stop_t200(l2, 17);
1627 mISDN_FsmDelTimer(&l2->t203, 19);
1628 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1629 /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
1630 * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
1631 * 0, NULL, 0);
1633 mISDN_FsmChangeState(fi, ST_L2_1);
1636 static void
1637 l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg)
1639 struct layer2 *l2 = fi->userdata;
1640 struct sk_buff *skb = arg;
1642 skb_queue_purge(&l2->i_queue);
1643 skb_queue_purge(&l2->ui_queue);
1644 if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1645 l2up(l2, DL_RELEASE_IND, skb);
1646 else
1647 dev_kfree_skb(skb);
1650 static void
1651 l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg)
1653 struct layer2 *l2 = fi->userdata;
1654 struct sk_buff *skb = arg;
1656 skb_queue_purge(&l2->i_queue);
1657 skb_queue_purge(&l2->ui_queue);
1658 freewin(l2);
1659 stop_t200(l2, 19);
1660 st5_dl_release_l2l3(l2);
1661 mISDN_FsmChangeState(fi, ST_L2_4);
1662 if (l2->tm)
1663 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1664 dev_kfree_skb(skb);
1667 static void
1668 l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg)
1670 struct layer2 *l2 = fi->userdata;
1671 struct sk_buff *skb = arg;
1673 skb_queue_purge(&l2->ui_queue);
1674 stop_t200(l2, 20);
1675 l2up(l2, DL_RELEASE_CNF, skb);
1676 mISDN_FsmChangeState(fi, ST_L2_4);
1677 if (l2->tm)
1678 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1681 static void
1682 l2_persistant_da(struct FsmInst *fi, int event, void *arg)
1684 struct layer2 *l2 = fi->userdata;
1685 struct sk_buff *skb = arg;
1687 skb_queue_purge(&l2->i_queue);
1688 skb_queue_purge(&l2->ui_queue);
1689 freewin(l2);
1690 stop_t200(l2, 19);
1691 mISDN_FsmDelTimer(&l2->t203, 19);
1692 l2up(l2, DL_RELEASE_IND, skb);
1693 mISDN_FsmChangeState(fi, ST_L2_4);
1694 if (l2->tm)
1695 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1698 static void
1699 l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
1701 struct layer2 *l2 = fi->userdata;
1702 struct sk_buff *skb = arg;
1704 if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
1705 enquiry_cr(l2, RNR, RSP, 0);
1706 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1708 if (skb)
1709 dev_kfree_skb(skb);
1712 static void
1713 l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
1715 struct layer2 *l2 = fi->userdata;
1716 struct sk_buff *skb = arg;
1718 if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
1719 enquiry_cr(l2, RR, RSP, 0);
1720 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1722 if (skb)
1723 dev_kfree_skb(skb);
1726 static void
1727 l2_frame_error(struct FsmInst *fi, int event, void *arg)
1729 struct layer2 *l2 = fi->userdata;
1731 l2mgr(l2, MDL_ERROR_IND, arg);
1734 static void
1735 l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
1737 struct layer2 *l2 = fi->userdata;
1739 l2mgr(l2, MDL_ERROR_IND, arg);
1740 establishlink(fi);
1741 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1744 static struct FsmNode L2FnList[] =
1746 {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
1747 {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
1748 {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
1749 {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
1750 {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1751 {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1752 {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
1753 {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
1754 {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1755 {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1756 {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
1757 {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
1758 {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
1759 {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
1760 {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
1761 {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
1762 {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
1763 {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
1764 {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
1765 {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
1766 {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
1767 {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
1768 {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
1769 {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
1770 {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
1771 {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
1772 {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
1773 {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
1774 {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
1775 {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
1776 {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
1777 {ST_L2_4, EV_L2_SABME, l2_start_multi},
1778 {ST_L2_5, EV_L2_SABME, l2_send_UA},
1779 {ST_L2_6, EV_L2_SABME, l2_send_DM},
1780 {ST_L2_7, EV_L2_SABME, l2_restart_multi},
1781 {ST_L2_8, EV_L2_SABME, l2_restart_multi},
1782 {ST_L2_4, EV_L2_DISC, l2_send_DM},
1783 {ST_L2_5, EV_L2_DISC, l2_send_DM},
1784 {ST_L2_6, EV_L2_DISC, l2_send_UA},
1785 {ST_L2_7, EV_L2_DISC, l2_stop_multi},
1786 {ST_L2_8, EV_L2_DISC, l2_stop_multi},
1787 {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
1788 {ST_L2_5, EV_L2_UA, l2_connected},
1789 {ST_L2_6, EV_L2_UA, l2_released},
1790 {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
1791 {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
1792 {ST_L2_4, EV_L2_DM, l2_reestablish},
1793 {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
1794 {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
1795 {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
1796 {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
1797 {ST_L2_1, EV_L2_UI, l2_got_ui},
1798 {ST_L2_2, EV_L2_UI, l2_got_ui},
1799 {ST_L2_3, EV_L2_UI, l2_got_ui},
1800 {ST_L2_4, EV_L2_UI, l2_got_ui},
1801 {ST_L2_5, EV_L2_UI, l2_got_ui},
1802 {ST_L2_6, EV_L2_UI, l2_got_ui},
1803 {ST_L2_7, EV_L2_UI, l2_got_ui},
1804 {ST_L2_8, EV_L2_UI, l2_got_ui},
1805 {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
1806 {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
1807 {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
1808 {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
1809 {ST_L2_7, EV_L2_I, l2_got_iframe},
1810 {ST_L2_8, EV_L2_I, l2_got_iframe},
1811 {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
1812 {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
1813 {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
1814 {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
1815 {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
1816 {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
1817 {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1818 {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1819 {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1820 {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1821 {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
1822 {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
1823 {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
1824 {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1825 {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1826 {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistant_da},
1827 {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
1828 {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
1829 {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistant_da},
1830 {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistant_da},
1831 {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistant_da},
1832 {ST_L2_7, EV_L1_DEACTIVATE, l2_persistant_da},
1833 {ST_L2_8, EV_L1_DEACTIVATE, l2_persistant_da},
1836 static int
1837 ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1839 u_char *datap = skb->data;
1840 int ret = -EINVAL;
1841 int psapi, ptei;
1842 u_int l;
1843 int c = 0;
1845 l = l2addrsize(l2);
1846 if (skb->len <= l) {
1847 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
1848 return ret;
1850 if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
1851 psapi = *datap++;
1852 ptei = *datap++;
1853 if ((psapi & 1) || !(ptei & 1)) {
1854 printk(KERN_WARNING
1855 "l2 D-channel frame wrong EA0/EA1\n");
1856 return ret;
1858 psapi >>= 2;
1859 ptei >>= 1;
1860 if (psapi != l2->sapi) {
1861 /* not our bussiness */
1862 if (*debug & DEBUG_L2)
1863 printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
1864 __func__, psapi, l2->sapi);
1865 dev_kfree_skb(skb);
1866 return 0;
1868 if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
1869 /* not our bussiness */
1870 if (*debug & DEBUG_L2)
1871 printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
1872 __func__, ptei, l2->tei);
1873 dev_kfree_skb(skb);
1874 return 0;
1876 } else
1877 datap += l;
1878 if (!(*datap & 1)) { /* I-Frame */
1879 c = iframe_error(l2, skb);
1880 if (!c)
1881 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
1882 } else if (IsSFrame(datap, l2)) { /* S-Frame */
1883 c = super_error(l2, skb);
1884 if (!c)
1885 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
1886 } else if (IsUI(datap)) {
1887 c = UI_error(l2, skb);
1888 if (!c)
1889 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
1890 } else if (IsSABME(datap, l2)) {
1891 c = unnum_error(l2, skb, CMD);
1892 if (!c)
1893 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
1894 } else if (IsUA(datap)) {
1895 c = unnum_error(l2, skb, RSP);
1896 if (!c)
1897 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
1898 } else if (IsDISC(datap)) {
1899 c = unnum_error(l2, skb, CMD);
1900 if (!c)
1901 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
1902 } else if (IsDM(datap)) {
1903 c = unnum_error(l2, skb, RSP);
1904 if (!c)
1905 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
1906 } else if (IsFRMR(datap)) {
1907 c = FRMR_error(l2, skb);
1908 if (!c)
1909 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
1910 } else
1911 c = 'L';
1912 if (c) {
1913 printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
1914 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
1916 return ret;
1919 static int
1920 l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1922 struct layer2 *l2 = container_of(ch, struct layer2, ch);
1923 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1924 int ret = -EINVAL;
1926 if (*debug & DEBUG_L2_RECV)
1927 printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n",
1928 __func__, hh->prim, hh->id, l2->sapi, l2->tei);
1929 switch (hh->prim) {
1930 case PH_DATA_IND:
1931 ret = ph_data_indication(l2, hh, skb);
1932 break;
1933 case PH_DATA_CNF:
1934 ret = ph_data_confirm(l2, hh, skb);
1935 break;
1936 case PH_ACTIVATE_IND:
1937 test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
1938 l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
1939 if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1940 ret = mISDN_FsmEvent(&l2->l2m,
1941 EV_L2_DL_ESTABLISH_REQ, skb);
1942 break;
1943 case PH_DEACTIVATE_IND:
1944 test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
1945 l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
1946 ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
1947 break;
1948 case MPH_INFORMATION_IND:
1949 if (!l2->up)
1950 break;
1951 ret = l2->up->send(l2->up, skb);
1952 break;
1953 case DL_DATA_REQ:
1954 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
1955 break;
1956 case DL_UNITDATA_REQ:
1957 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
1958 break;
1959 case DL_ESTABLISH_REQ:
1960 if (test_bit(FLG_LAPB, &l2->flag))
1961 test_and_set_bit(FLG_ORIG, &l2->flag);
1962 if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
1963 if (test_bit(FLG_LAPD, &l2->flag) ||
1964 test_bit(FLG_ORIG, &l2->flag))
1965 ret = mISDN_FsmEvent(&l2->l2m,
1966 EV_L2_DL_ESTABLISH_REQ, skb);
1967 } else {
1968 if (test_bit(FLG_LAPD, &l2->flag) ||
1969 test_bit(FLG_ORIG, &l2->flag)) {
1970 test_and_set_bit(FLG_ESTAB_PEND,
1971 &l2->flag);
1973 ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
1974 skb);
1976 break;
1977 case DL_RELEASE_REQ:
1978 if (test_bit(FLG_LAPB, &l2->flag))
1979 l2down_create(l2, PH_DEACTIVATE_REQ,
1980 l2_newid(l2), 0, NULL);
1981 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
1982 skb);
1983 break;
1984 default:
1985 if (*debug & DEBUG_L2)
1986 l2m_debug(&l2->l2m, "l2 unknown pr %04x",
1987 hh->prim);
1989 if (ret) {
1990 dev_kfree_skb(skb);
1991 ret = 0;
1993 return ret;
1997 tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
1999 int ret = -EINVAL;
2001 if (*debug & DEBUG_L2_TEI)
2002 printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
2003 switch (cmd) {
2004 case (MDL_ASSIGN_REQ):
2005 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
2006 break;
2007 case (MDL_REMOVE_REQ):
2008 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
2009 break;
2010 case (MDL_ERROR_IND):
2011 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2012 break;
2013 case (MDL_ERROR_RSP):
2014 /* ETS 300-125 5.3.2.1 Test: TC13010 */
2015 printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
2016 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2017 break;
2019 return ret;
2022 static void
2023 release_l2(struct layer2 *l2)
2025 mISDN_FsmDelTimer(&l2->t200, 21);
2026 mISDN_FsmDelTimer(&l2->t203, 16);
2027 skb_queue_purge(&l2->i_queue);
2028 skb_queue_purge(&l2->ui_queue);
2029 skb_queue_purge(&l2->down_queue);
2030 ReleaseWin(l2);
2031 if (test_bit(FLG_LAPD, &l2->flag)) {
2032 TEIrelease(l2);
2033 if (l2->ch.st)
2034 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
2035 CLOSE_CHANNEL, NULL);
2037 kfree(l2);
2040 static int
2041 l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
2043 struct layer2 *l2 = container_of(ch, struct layer2, ch);
2044 u_int info;
2046 if (*debug & DEBUG_L2_CTRL)
2047 printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
2049 switch (cmd) {
2050 case OPEN_CHANNEL:
2051 if (test_bit(FLG_LAPD, &l2->flag)) {
2052 set_channel_address(&l2->ch, l2->sapi, l2->tei);
2053 info = DL_INFO_L2_CONNECT;
2054 l2up_create(l2, DL_INFORMATION_IND,
2055 sizeof(info), &info);
2057 break;
2058 case CLOSE_CHANNEL:
2059 if (l2->ch.peer)
2060 l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
2061 release_l2(l2);
2062 break;
2064 return 0;
2067 struct layer2 *
2068 create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
2069 int sapi)
2071 struct layer2 *l2;
2072 struct channel_req rq;
2074 l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
2075 if (!l2) {
2076 printk(KERN_ERR "kzalloc layer2 failed\n");
2077 return NULL;
2079 l2->next_id = 1;
2080 l2->down_id = MISDN_ID_NONE;
2081 l2->up = ch;
2082 l2->ch.st = ch->st;
2083 l2->ch.send = l2_send;
2084 l2->ch.ctrl = l2_ctrl;
2085 switch (protocol) {
2086 case ISDN_P_LAPD_NT:
2087 test_and_set_bit(FLG_LAPD, &l2->flag);
2088 test_and_set_bit(FLG_LAPD_NET, &l2->flag);
2089 test_and_set_bit(FLG_MOD128, &l2->flag);
2090 l2->sapi = sapi;
2091 l2->maxlen = MAX_DFRAME_LEN;
2092 if (test_bit(OPTION_L2_PMX, &options))
2093 l2->window = 7;
2094 else
2095 l2->window = 1;
2096 if (test_bit(OPTION_L2_PTP, &options))
2097 test_and_set_bit(FLG_PTP, &l2->flag);
2098 if (test_bit(OPTION_L2_FIXEDTEI, &options))
2099 test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2100 l2->tei = tei;
2101 l2->T200 = 1000;
2102 l2->N200 = 3;
2103 l2->T203 = 10000;
2104 if (test_bit(OPTION_L2_PMX, &options))
2105 rq.protocol = ISDN_P_NT_E1;
2106 else
2107 rq.protocol = ISDN_P_NT_S0;
2108 rq.adr.channel = 0;
2109 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2110 break;
2111 case ISDN_P_LAPD_TE:
2112 test_and_set_bit(FLG_LAPD, &l2->flag);
2113 test_and_set_bit(FLG_MOD128, &l2->flag);
2114 test_and_set_bit(FLG_ORIG, &l2->flag);
2115 l2->sapi = sapi;
2116 l2->maxlen = MAX_DFRAME_LEN;
2117 if (test_bit(OPTION_L2_PMX, &options))
2118 l2->window = 7;
2119 else
2120 l2->window = 1;
2121 if (test_bit(OPTION_L2_PTP, &options))
2122 test_and_set_bit(FLG_PTP, &l2->flag);
2123 if (test_bit(OPTION_L2_FIXEDTEI, &options))
2124 test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2125 l2->tei = tei;
2126 l2->T200 = 1000;
2127 l2->N200 = 3;
2128 l2->T203 = 10000;
2129 if (test_bit(OPTION_L2_PMX, &options))
2130 rq.protocol = ISDN_P_TE_E1;
2131 else
2132 rq.protocol = ISDN_P_TE_S0;
2133 rq.adr.channel = 0;
2134 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2135 break;
2136 case ISDN_P_B_X75SLP:
2137 test_and_set_bit(FLG_LAPB, &l2->flag);
2138 l2->window = 7;
2139 l2->maxlen = MAX_DATA_SIZE;
2140 l2->T200 = 1000;
2141 l2->N200 = 4;
2142 l2->T203 = 5000;
2143 l2->addr.A = 3;
2144 l2->addr.B = 1;
2145 break;
2146 default:
2147 printk(KERN_ERR "layer2 create failed prt %x\n",
2148 protocol);
2149 kfree(l2);
2150 return NULL;
2152 skb_queue_head_init(&l2->i_queue);
2153 skb_queue_head_init(&l2->ui_queue);
2154 skb_queue_head_init(&l2->down_queue);
2155 skb_queue_head_init(&l2->tmp_queue);
2156 InitWin(l2);
2157 l2->l2m.fsm = &l2fsm;
2158 if (test_bit(FLG_LAPB, &l2->flag) ||
2159 test_bit(FLG_PTP, &l2->flag) ||
2160 test_bit(FLG_LAPD_NET, &l2->flag))
2161 l2->l2m.state = ST_L2_4;
2162 else
2163 l2->l2m.state = ST_L2_1;
2164 l2->l2m.debug = *debug;
2165 l2->l2m.userdata = l2;
2166 l2->l2m.userint = 0;
2167 l2->l2m.printdebug = l2m_debug;
2169 mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
2170 mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
2171 return l2;
2174 static int
2175 x75create(struct channel_req *crq)
2177 struct layer2 *l2;
2179 if (crq->protocol != ISDN_P_B_X75SLP)
2180 return -EPROTONOSUPPORT;
2181 l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0);
2182 if (!l2)
2183 return -ENOMEM;
2184 crq->ch = &l2->ch;
2185 crq->protocol = ISDN_P_B_HDLC;
2186 return 0;
2189 static struct Bprotocol X75SLP = {
2190 .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
2191 .name = "X75SLP",
2192 .create = x75create
2196 Isdnl2_Init(u_int *deb)
2198 debug = deb;
2199 mISDN_register_Bprotocol(&X75SLP);
2200 l2fsm.state_count = L2_STATE_COUNT;
2201 l2fsm.event_count = L2_EVENT_COUNT;
2202 l2fsm.strEvent = strL2Event;
2203 l2fsm.strState = strL2State;
2204 mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
2205 TEIInit(deb);
2206 return 0;
2209 void
2210 Isdnl2_cleanup(void)
2212 mISDN_unregister_Bprotocol(&X75SLP);
2213 TEIFree();
2214 mISDN_FsmFree(&l2fsm);