Fix namespace issue with Hisax
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / isdn / mISDN / layer2.c
bloba7915a156c0419eff3cbafede3b69d649549e934
1 /*
3 * Author Karsten Keil <kkeil@novell.com>
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include "fsm.h"
19 #include "layer2.h"
21 static int *debug;
23 static
24 struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
26 static char *strL2State[] =
28 "ST_L2_1",
29 "ST_L2_2",
30 "ST_L2_3",
31 "ST_L2_4",
32 "ST_L2_5",
33 "ST_L2_6",
34 "ST_L2_7",
35 "ST_L2_8",
38 enum {
39 EV_L2_UI,
40 EV_L2_SABME,
41 EV_L2_DISC,
42 EV_L2_DM,
43 EV_L2_UA,
44 EV_L2_FRMR,
45 EV_L2_SUPER,
46 EV_L2_I,
47 EV_L2_DL_DATA,
48 EV_L2_ACK_PULL,
49 EV_L2_DL_UNITDATA,
50 EV_L2_DL_ESTABLISH_REQ,
51 EV_L2_DL_RELEASE_REQ,
52 EV_L2_MDL_ASSIGN,
53 EV_L2_MDL_REMOVE,
54 EV_L2_MDL_ERROR,
55 EV_L1_DEACTIVATE,
56 EV_L2_T200,
57 EV_L2_T203,
58 EV_L2_SET_OWN_BUSY,
59 EV_L2_CLEAR_OWN_BUSY,
60 EV_L2_FRAME_ERROR,
63 #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
65 static char *strL2Event[] =
67 "EV_L2_UI",
68 "EV_L2_SABME",
69 "EV_L2_DISC",
70 "EV_L2_DM",
71 "EV_L2_UA",
72 "EV_L2_FRMR",
73 "EV_L2_SUPER",
74 "EV_L2_I",
75 "EV_L2_DL_DATA",
76 "EV_L2_ACK_PULL",
77 "EV_L2_DL_UNITDATA",
78 "EV_L2_DL_ESTABLISH_REQ",
79 "EV_L2_DL_RELEASE_REQ",
80 "EV_L2_MDL_ASSIGN",
81 "EV_L2_MDL_REMOVE",
82 "EV_L2_MDL_ERROR",
83 "EV_L1_DEACTIVATE",
84 "EV_L2_T200",
85 "EV_L2_T203",
86 "EV_L2_SET_OWN_BUSY",
87 "EV_L2_CLEAR_OWN_BUSY",
88 "EV_L2_FRAME_ERROR",
91 static void
92 l2m_debug(struct FsmInst *fi, char *fmt, ...)
94 struct layer2 *l2 = fi->userdata;
95 va_list va;
97 if (!(*debug & DEBUG_L2_FSM))
98 return;
99 va_start(va, fmt);
100 printk(KERN_DEBUG "l2 (tei %d): ", l2->tei);
101 vprintk(fmt, va);
102 printk("\n");
103 va_end(va);
106 inline u_int
107 l2headersize(struct layer2 *l2, int ui)
109 return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
110 (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
113 inline u_int
114 l2addrsize(struct layer2 *l2)
116 return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
119 static u_int
120 l2_newid(struct layer2 *l2)
122 u_int id;
124 id = l2->next_id++;
125 if (id == 0x7fff)
126 l2->next_id = 1;
127 id <<= 16;
128 id |= l2->tei << 8;
129 id |= l2->sapi;
130 return id;
133 static void
134 l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
136 int err;
138 if (!l2->up)
139 return;
140 mISDN_HEAD_PRIM(skb) = prim;
141 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
142 err = l2->up->send(l2->up, skb);
143 if (err) {
144 printk(KERN_WARNING "%s: err=%d\n", __func__, err);
145 dev_kfree_skb(skb);
149 static void
150 l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
152 struct sk_buff *skb;
153 struct mISDNhead *hh;
154 int err;
156 if (!l2->up)
157 return;
158 skb = mI_alloc_skb(len, GFP_ATOMIC);
159 if (!skb)
160 return;
161 hh = mISDN_HEAD_P(skb);
162 hh->prim = prim;
163 hh->id = (l2->ch.nr << 16) | l2->ch.addr;
164 if (len)
165 memcpy(skb_put(skb, len), arg, len);
166 err = l2->up->send(l2->up, skb);
167 if (err) {
168 printk(KERN_WARNING "%s: err=%d\n", __func__, err);
169 dev_kfree_skb(skb);
173 static int
174 l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
175 int ret;
177 ret = l2->ch.recv(l2->ch.peer, skb);
178 if (ret && (*debug & DEBUG_L2_RECV))
179 printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
180 return ret;
183 static int
184 l2down_raw(struct layer2 *l2, struct sk_buff *skb)
186 struct mISDNhead *hh = mISDN_HEAD_P(skb);
188 if (hh->prim == PH_DATA_REQ) {
189 if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
190 skb_queue_tail(&l2->down_queue, skb);
191 return 0;
193 l2->down_id = mISDN_HEAD_ID(skb);
195 return l2down_skb(l2, skb);
198 static int
199 l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
201 struct mISDNhead *hh = mISDN_HEAD_P(skb);
203 hh->prim = prim;
204 hh->id = id;
205 return l2down_raw(l2, skb);
208 static int
209 l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
211 struct sk_buff *skb;
212 int err;
213 struct mISDNhead *hh;
215 skb = mI_alloc_skb(len, GFP_ATOMIC);
216 if (!skb)
217 return -ENOMEM;
218 hh = mISDN_HEAD_P(skb);
219 hh->prim = prim;
220 hh->id = id;
221 if (len)
222 memcpy(skb_put(skb, len), arg, len);
223 err = l2down_raw(l2, skb);
224 if (err)
225 dev_kfree_skb(skb);
226 return err;
229 static int
230 ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
231 struct sk_buff *nskb = skb;
232 int ret = -EAGAIN;
234 if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
235 if (hh->id == l2->down_id) {
236 nskb = skb_dequeue(&l2->down_queue);
237 if (nskb) {
238 l2->down_id = mISDN_HEAD_ID(nskb);
239 if (l2down_skb(l2, nskb)) {
240 dev_kfree_skb(nskb);
241 l2->down_id = MISDN_ID_NONE;
243 } else
244 l2->down_id = MISDN_ID_NONE;
245 if (ret) {
246 dev_kfree_skb(skb);
247 ret = 0;
249 if (l2->down_id == MISDN_ID_NONE) {
250 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
251 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
255 if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
256 nskb = skb_dequeue(&l2->down_queue);
257 if (nskb) {
258 l2->down_id = mISDN_HEAD_ID(nskb);
259 if (l2down_skb(l2, nskb)) {
260 dev_kfree_skb(nskb);
261 l2->down_id = MISDN_ID_NONE;
262 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
264 } else
265 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
267 return ret;
270 static int
271 l2mgr(struct layer2 *l2, u_int prim, void *arg) {
272 long c = (long)arg;
274 printk(KERN_WARNING
275 "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
276 if (test_bit(FLG_LAPD, &l2->flag) &&
277 !test_bit(FLG_FIXED_TEI, &l2->flag)) {
278 switch (c) {
279 case 'C':
280 case 'D':
281 case 'G':
282 case 'H':
283 l2_tei(l2, prim, (u_long)arg);
284 break;
287 return 0;
290 static void
291 set_peer_busy(struct layer2 *l2) {
292 test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
293 if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
294 test_and_set_bit(FLG_L2BLOCK, &l2->flag);
297 static void
298 clear_peer_busy(struct layer2 *l2) {
299 if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
300 test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
303 static void
304 InitWin(struct layer2 *l2)
306 int i;
308 for (i = 0; i < MAX_WINDOW; i++)
309 l2->windowar[i] = NULL;
312 static int
313 freewin(struct layer2 *l2)
315 int i, cnt = 0;
317 for (i = 0; i < MAX_WINDOW; i++) {
318 if (l2->windowar[i]) {
319 cnt++;
320 dev_kfree_skb(l2->windowar[i]);
321 l2->windowar[i] = NULL;
324 return cnt;
327 static void
328 ReleaseWin(struct layer2 *l2)
330 int cnt = freewin(l2);
332 if (cnt)
333 printk(KERN_WARNING
334 "isdnl2 freed %d skbuffs in release\n", cnt);
337 inline unsigned int
338 cansend(struct layer2 *l2)
340 unsigned int p1;
342 if (test_bit(FLG_MOD128, &l2->flag))
343 p1 = (l2->vs - l2->va) % 128;
344 else
345 p1 = (l2->vs - l2->va) % 8;
346 return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
349 inline void
350 clear_exception(struct layer2 *l2)
352 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
353 test_and_clear_bit(FLG_REJEXC, &l2->flag);
354 test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
355 clear_peer_busy(l2);
358 static int
359 sethdraddr(struct layer2 *l2, u_char *header, int rsp)
361 u_char *ptr = header;
362 int crbit = rsp;
364 if (test_bit(FLG_LAPD, &l2->flag)) {
365 if (test_bit(FLG_LAPD_NET, &l2->flag))
366 crbit = !crbit;
367 *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
368 *ptr++ = (l2->tei << 1) | 1;
369 return 2;
370 } else {
371 if (test_bit(FLG_ORIG, &l2->flag))
372 crbit = !crbit;
373 if (crbit)
374 *ptr++ = l2->addr.B;
375 else
376 *ptr++ = l2->addr.A;
377 return 1;
381 static inline void
382 enqueue_super(struct layer2 *l2, struct sk_buff *skb)
384 if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
385 dev_kfree_skb(skb);
388 static inline void
389 enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
391 if (l2->tm)
392 l2_tei(l2, MDL_STATUS_UI_IND, 0);
393 if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
394 dev_kfree_skb(skb);
397 inline int
398 IsUI(u_char *data)
400 return (data[0] & 0xef) == UI;
403 inline int
404 IsUA(u_char *data)
406 return (data[0] & 0xef) == UA;
409 inline int
410 IsDM(u_char *data)
412 return (data[0] & 0xef) == DM;
415 inline int
416 IsDISC(u_char *data)
418 return (data[0] & 0xef) == DISC;
421 inline int
422 IsRR(u_char *data, struct layer2 *l2)
424 if (test_bit(FLG_MOD128, &l2->flag))
425 return data[0] == RR;
426 else
427 return (data[0] & 0xf) == 1;
430 inline int
431 IsSFrame(u_char *data, struct layer2 *l2)
433 register u_char d = *data;
435 if (!test_bit(FLG_MOD128, &l2->flag))
436 d &= 0xf;
437 return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
440 inline int
441 IsSABME(u_char *data, struct layer2 *l2)
443 u_char d = data[0] & ~0x10;
445 return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
448 inline int
449 IsREJ(u_char *data, struct layer2 *l2)
451 return test_bit(FLG_MOD128, &l2->flag) ?
452 data[0] == REJ : (data[0] & 0xf) == REJ;
455 inline int
456 IsFRMR(u_char *data)
458 return (data[0] & 0xef) == FRMR;
461 inline int
462 IsRNR(u_char *data, struct layer2 *l2)
464 return test_bit(FLG_MOD128, &l2->flag) ?
465 data[0] == RNR : (data[0] & 0xf) == RNR;
469 iframe_error(struct layer2 *l2, struct sk_buff *skb)
471 u_int i;
472 int rsp = *skb->data & 0x2;
474 i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
475 if (test_bit(FLG_ORIG, &l2->flag))
476 rsp = !rsp;
477 if (rsp)
478 return 'L';
479 if (skb->len < i)
480 return 'N';
481 if ((skb->len - i) > l2->maxlen)
482 return 'O';
483 return 0;
487 super_error(struct layer2 *l2, struct sk_buff *skb)
489 if (skb->len != l2addrsize(l2) +
490 (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
491 return 'N';
492 return 0;
496 unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
498 int rsp = (*skb->data & 0x2) >> 1;
499 if (test_bit(FLG_ORIG, &l2->flag))
500 rsp = !rsp;
501 if (rsp != wantrsp)
502 return 'L';
503 if (skb->len != l2addrsize(l2) + 1)
504 return 'N';
505 return 0;
509 UI_error(struct layer2 *l2, struct sk_buff *skb)
511 int rsp = *skb->data & 0x2;
512 if (test_bit(FLG_ORIG, &l2->flag))
513 rsp = !rsp;
514 if (rsp)
515 return 'L';
516 if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
517 return 'O';
518 return 0;
522 FRMR_error(struct layer2 *l2, struct sk_buff *skb)
524 u_int headers = l2addrsize(l2) + 1;
525 u_char *datap = skb->data + headers;
526 int rsp = *skb->data & 0x2;
528 if (test_bit(FLG_ORIG, &l2->flag))
529 rsp = !rsp;
530 if (!rsp)
531 return 'L';
532 if (test_bit(FLG_MOD128, &l2->flag)) {
533 if (skb->len < headers + 5)
534 return 'N';
535 else if (*debug & DEBUG_L2)
536 l2m_debug(&l2->l2m,
537 "FRMR information %2x %2x %2x %2x %2x",
538 datap[0], datap[1], datap[2], datap[3], datap[4]);
539 } else {
540 if (skb->len < headers + 3)
541 return 'N';
542 else if (*debug & DEBUG_L2)
543 l2m_debug(&l2->l2m,
544 "FRMR information %2x %2x %2x",
545 datap[0], datap[1], datap[2]);
547 return 0;
550 static unsigned int
551 legalnr(struct layer2 *l2, unsigned int nr)
553 if (test_bit(FLG_MOD128, &l2->flag))
554 return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
555 else
556 return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
559 static void
560 setva(struct layer2 *l2, unsigned int nr)
562 struct sk_buff *skb;
564 while (l2->va != nr) {
565 l2->va++;
566 if (test_bit(FLG_MOD128, &l2->flag))
567 l2->va %= 128;
568 else
569 l2->va %= 8;
570 if (l2->windowar[l2->sow]) {
571 skb_trim(l2->windowar[l2->sow], 0);
572 skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
573 l2->windowar[l2->sow] = NULL;
575 l2->sow = (l2->sow + 1) % l2->window;
577 skb = skb_dequeue(&l2->tmp_queue);
578 while (skb) {
579 dev_kfree_skb(skb);
580 skb = skb_dequeue(&l2->tmp_queue);
584 static void
585 send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
587 u_char tmp[MAX_L2HEADER_LEN];
588 int i;
590 i = sethdraddr(l2, tmp, cr);
591 tmp[i++] = cmd;
592 if (skb)
593 skb_trim(skb, 0);
594 else {
595 skb = mI_alloc_skb(i, GFP_ATOMIC);
596 if (!skb) {
597 printk(KERN_WARNING "%s: can't alloc skbuff\n",
598 __func__);
599 return;
602 memcpy(skb_put(skb, i), tmp, i);
603 enqueue_super(l2, skb);
607 inline u_char
608 get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
610 return skb->data[l2addrsize(l2)] & 0x10;
613 inline u_char
614 get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
616 u_char PF;
618 PF = get_PollFlag(l2, skb);
619 dev_kfree_skb(skb);
620 return PF;
623 inline void
624 start_t200(struct layer2 *l2, int i)
626 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
627 test_and_set_bit(FLG_T200_RUN, &l2->flag);
630 inline void
631 restart_t200(struct layer2 *l2, int i)
633 mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
634 test_and_set_bit(FLG_T200_RUN, &l2->flag);
637 inline void
638 stop_t200(struct layer2 *l2, int i)
640 if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
641 mISDN_FsmDelTimer(&l2->t200, i);
644 inline void
645 st5_dl_release_l2l3(struct layer2 *l2)
647 int pr;
649 if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
650 pr = DL_RELEASE_CNF;
651 else
652 pr = DL_RELEASE_IND;
653 l2up_create(l2, pr, 0, NULL);
656 inline void
657 lapb_dl_release_l2l3(struct layer2 *l2, int f)
659 if (test_bit(FLG_LAPB, &l2->flag))
660 l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
661 l2up_create(l2, f, 0, NULL);
664 static void
665 establishlink(struct FsmInst *fi)
667 struct layer2 *l2 = fi->userdata;
668 u_char cmd;
670 clear_exception(l2);
671 l2->rc = 0;
672 cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
673 send_uframe(l2, NULL, cmd, CMD);
674 mISDN_FsmDelTimer(&l2->t203, 1);
675 restart_t200(l2, 1);
676 test_and_clear_bit(FLG_PEND_REL, &l2->flag);
677 freewin(l2);
678 mISDN_FsmChangeState(fi, ST_L2_5);
681 static void
682 l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
684 struct sk_buff *skb = arg;
685 struct layer2 *l2 = fi->userdata;
687 if (get_PollFlagFree(l2, skb))
688 l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
689 else
690 l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
694 static void
695 l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
697 struct sk_buff *skb = arg;
698 struct layer2 *l2 = fi->userdata;
700 if (get_PollFlagFree(l2, skb))
701 l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
702 else {
703 l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
704 establishlink(fi);
705 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
709 static void
710 l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
712 struct sk_buff *skb = arg;
713 struct layer2 *l2 = fi->userdata;
715 if (get_PollFlagFree(l2, skb))
716 l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
717 else
718 l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
719 establishlink(fi);
720 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
723 static void
724 l2_go_st3(struct FsmInst *fi, int event, void *arg)
726 dev_kfree_skb((struct sk_buff *)arg);
727 mISDN_FsmChangeState(fi, ST_L2_3);
730 static void
731 l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
733 struct layer2 *l2 = fi->userdata;
735 mISDN_FsmChangeState(fi, ST_L2_3);
736 dev_kfree_skb((struct sk_buff *)arg);
737 l2_tei(l2, MDL_ASSIGN_IND, 0);
740 static void
741 l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
743 struct layer2 *l2 = fi->userdata;
744 struct sk_buff *skb = arg;
746 skb_queue_tail(&l2->ui_queue, skb);
747 mISDN_FsmChangeState(fi, ST_L2_2);
748 l2_tei(l2, MDL_ASSIGN_IND, 0);
751 static void
752 l2_queue_ui(struct FsmInst *fi, int event, void *arg)
754 struct layer2 *l2 = fi->userdata;
755 struct sk_buff *skb = arg;
757 skb_queue_tail(&l2->ui_queue, skb);
760 static void
761 tx_ui(struct layer2 *l2)
763 struct sk_buff *skb;
764 u_char header[MAX_L2HEADER_LEN];
765 int i;
767 i = sethdraddr(l2, header, CMD);
768 if (test_bit(FLG_LAPD_NET, &l2->flag))
769 header[1] = 0xff; /* tei 127 */
770 header[i++] = UI;
771 while ((skb = skb_dequeue(&l2->ui_queue))) {
772 memcpy(skb_push(skb, i), header, i);
773 enqueue_ui(l2, skb);
777 static void
778 l2_send_ui(struct FsmInst *fi, int event, void *arg)
780 struct layer2 *l2 = fi->userdata;
781 struct sk_buff *skb = arg;
783 skb_queue_tail(&l2->ui_queue, skb);
784 tx_ui(l2);
787 static void
788 l2_got_ui(struct FsmInst *fi, int event, void *arg)
790 struct layer2 *l2 = fi->userdata;
791 struct sk_buff *skb = arg;
793 skb_pull(skb, l2headersize(l2, 1));
795 * in states 1-3 for broadcast
798 if (l2->tm)
799 l2_tei(l2, MDL_STATUS_UI_IND, 0);
800 l2up(l2, DL_UNITDATA_IND, skb);
803 static void
804 l2_establish(struct FsmInst *fi, int event, void *arg)
806 struct sk_buff *skb = arg;
807 struct layer2 *l2 = fi->userdata;
809 establishlink(fi);
810 test_and_set_bit(FLG_L3_INIT, &l2->flag);
811 dev_kfree_skb(skb);
814 static void
815 l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
817 struct sk_buff *skb = arg;
818 struct layer2 *l2 = fi->userdata;
820 skb_queue_purge(&l2->i_queue);
821 test_and_set_bit(FLG_L3_INIT, &l2->flag);
822 test_and_clear_bit(FLG_PEND_REL, &l2->flag);
823 dev_kfree_skb(skb);
826 static void
827 l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
829 struct sk_buff *skb = arg;
830 struct layer2 *l2 = fi->userdata;
832 skb_queue_purge(&l2->i_queue);
833 establishlink(fi);
834 test_and_set_bit(FLG_L3_INIT, &l2->flag);
835 dev_kfree_skb(skb);
838 static void
839 l2_release(struct FsmInst *fi, int event, void *arg)
841 struct layer2 *l2 = fi->userdata;
842 struct sk_buff *skb = arg;
844 skb_trim(skb, 0);
845 l2up(l2, DL_RELEASE_CNF, skb);
848 static void
849 l2_pend_rel(struct FsmInst *fi, int event, void *arg)
851 struct sk_buff *skb = arg;
852 struct layer2 *l2 = fi->userdata;
854 test_and_set_bit(FLG_PEND_REL, &l2->flag);
855 dev_kfree_skb(skb);
858 static void
859 l2_disconnect(struct FsmInst *fi, int event, void *arg)
861 struct layer2 *l2 = fi->userdata;
862 struct sk_buff *skb = arg;
864 skb_queue_purge(&l2->i_queue);
865 freewin(l2);
866 mISDN_FsmChangeState(fi, ST_L2_6);
867 l2->rc = 0;
868 send_uframe(l2, NULL, DISC | 0x10, CMD);
869 mISDN_FsmDelTimer(&l2->t203, 1);
870 restart_t200(l2, 2);
871 if (skb)
872 dev_kfree_skb(skb);
875 static void
876 l2_start_multi(struct FsmInst *fi, int event, void *arg)
878 struct layer2 *l2 = fi->userdata;
879 struct sk_buff *skb = arg;
881 l2->vs = 0;
882 l2->va = 0;
883 l2->vr = 0;
884 l2->sow = 0;
885 clear_exception(l2);
886 send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
887 mISDN_FsmChangeState(fi, ST_L2_7);
888 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
889 skb_trim(skb, 0);
890 l2up(l2, DL_ESTABLISH_IND, skb);
891 if (l2->tm)
892 l2_tei(l2, MDL_STATUS_UP_IND, 0);
895 static void
896 l2_send_UA(struct FsmInst *fi, int event, void *arg)
898 struct layer2 *l2 = fi->userdata;
899 struct sk_buff *skb = arg;
901 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
904 static void
905 l2_send_DM(struct FsmInst *fi, int event, void *arg)
907 struct layer2 *l2 = fi->userdata;
908 struct sk_buff *skb = arg;
910 send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
913 static void
914 l2_restart_multi(struct FsmInst *fi, int event, void *arg)
916 struct layer2 *l2 = fi->userdata;
917 struct sk_buff *skb = arg;
918 int est = 0;
920 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
922 l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
924 if (l2->vs != l2->va) {
925 skb_queue_purge(&l2->i_queue);
926 est = 1;
929 clear_exception(l2);
930 l2->vs = 0;
931 l2->va = 0;
932 l2->vr = 0;
933 l2->sow = 0;
934 mISDN_FsmChangeState(fi, ST_L2_7);
935 stop_t200(l2, 3);
936 mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
938 if (est)
939 l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
940 /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
941 * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
942 * 0, NULL, 0);
944 if (skb_queue_len(&l2->i_queue) && cansend(l2))
945 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
948 static void
949 l2_stop_multi(struct FsmInst *fi, int event, void *arg)
951 struct layer2 *l2 = fi->userdata;
952 struct sk_buff *skb = arg;
954 mISDN_FsmChangeState(fi, ST_L2_4);
955 mISDN_FsmDelTimer(&l2->t203, 3);
956 stop_t200(l2, 4);
958 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
959 skb_queue_purge(&l2->i_queue);
960 freewin(l2);
961 lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
962 if (l2->tm)
963 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
966 static void
967 l2_connected(struct FsmInst *fi, int event, void *arg)
969 struct layer2 *l2 = fi->userdata;
970 struct sk_buff *skb = arg;
971 int pr = -1;
973 if (!get_PollFlag(l2, skb)) {
974 l2_mdl_error_ua(fi, event, arg);
975 return;
977 dev_kfree_skb(skb);
978 if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
979 l2_disconnect(fi, event, NULL);
980 if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
981 pr = DL_ESTABLISH_CNF;
982 } else if (l2->vs != l2->va) {
983 skb_queue_purge(&l2->i_queue);
984 pr = DL_ESTABLISH_IND;
986 stop_t200(l2, 5);
987 l2->vr = 0;
988 l2->vs = 0;
989 l2->va = 0;
990 l2->sow = 0;
991 mISDN_FsmChangeState(fi, ST_L2_7);
992 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
993 if (pr != -1)
994 l2up_create(l2, pr, 0, NULL);
996 if (skb_queue_len(&l2->i_queue) && cansend(l2))
997 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
999 if (l2->tm)
1000 l2_tei(l2, MDL_STATUS_UP_IND, 0);
1003 static void
1004 l2_released(struct FsmInst *fi, int event, void *arg)
1006 struct layer2 *l2 = fi->userdata;
1007 struct sk_buff *skb = arg;
1009 if (!get_PollFlag(l2, skb)) {
1010 l2_mdl_error_ua(fi, event, arg);
1011 return;
1013 dev_kfree_skb(skb);
1014 stop_t200(l2, 6);
1015 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1016 mISDN_FsmChangeState(fi, ST_L2_4);
1017 if (l2->tm)
1018 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1021 static void
1022 l2_reestablish(struct FsmInst *fi, int event, void *arg)
1024 struct layer2 *l2 = fi->userdata;
1025 struct sk_buff *skb = arg;
1027 if (!get_PollFlagFree(l2, skb)) {
1028 establishlink(fi);
1029 test_and_set_bit(FLG_L3_INIT, &l2->flag);
1033 static void
1034 l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
1036 struct layer2 *l2 = fi->userdata;
1037 struct sk_buff *skb = arg;
1039 if (get_PollFlagFree(l2, skb)) {
1040 stop_t200(l2, 7);
1041 if (!test_bit(FLG_L3_INIT, &l2->flag))
1042 skb_queue_purge(&l2->i_queue);
1043 if (test_bit(FLG_LAPB, &l2->flag))
1044 l2down_create(l2, PH_DEACTIVATE_REQ,
1045 l2_newid(l2), 0, NULL);
1046 st5_dl_release_l2l3(l2);
1047 mISDN_FsmChangeState(fi, ST_L2_4);
1048 if (l2->tm)
1049 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1053 static void
1054 l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
1056 struct layer2 *l2 = fi->userdata;
1057 struct sk_buff *skb = arg;
1059 if (get_PollFlagFree(l2, skb)) {
1060 stop_t200(l2, 8);
1061 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1062 mISDN_FsmChangeState(fi, ST_L2_4);
1063 if (l2->tm)
1064 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1068 void
1069 enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
1071 struct sk_buff *skb;
1072 u_char tmp[MAX_L2HEADER_LEN];
1073 int i;
1075 i = sethdraddr(l2, tmp, cr);
1076 if (test_bit(FLG_MOD128, &l2->flag)) {
1077 tmp[i++] = typ;
1078 tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
1079 } else
1080 tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
1081 skb = mI_alloc_skb(i, GFP_ATOMIC);
1082 if (!skb) {
1083 printk(KERN_WARNING
1084 "isdnl2 can't alloc sbbuff for enquiry_cr\n");
1085 return;
1087 memcpy(skb_put(skb, i), tmp, i);
1088 enqueue_super(l2, skb);
1091 inline void
1092 enquiry_response(struct layer2 *l2)
1094 if (test_bit(FLG_OWN_BUSY, &l2->flag))
1095 enquiry_cr(l2, RNR, RSP, 1);
1096 else
1097 enquiry_cr(l2, RR, RSP, 1);
1098 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1101 inline void
1102 transmit_enquiry(struct layer2 *l2)
1104 if (test_bit(FLG_OWN_BUSY, &l2->flag))
1105 enquiry_cr(l2, RNR, CMD, 1);
1106 else
1107 enquiry_cr(l2, RR, CMD, 1);
1108 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1109 start_t200(l2, 9);
1113 static void
1114 nrerrorrecovery(struct FsmInst *fi)
1116 struct layer2 *l2 = fi->userdata;
1118 l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
1119 establishlink(fi);
1120 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1123 static void
1124 invoke_retransmission(struct layer2 *l2, unsigned int nr)
1126 u_int p1;
1128 if (l2->vs != nr) {
1129 while (l2->vs != nr) {
1130 (l2->vs)--;
1131 if (test_bit(FLG_MOD128, &l2->flag)) {
1132 l2->vs %= 128;
1133 p1 = (l2->vs - l2->va) % 128;
1134 } else {
1135 l2->vs %= 8;
1136 p1 = (l2->vs - l2->va) % 8;
1138 p1 = (p1 + l2->sow) % l2->window;
1139 if (l2->windowar[p1])
1140 skb_queue_head(&l2->i_queue, l2->windowar[p1]);
1141 else
1142 printk(KERN_WARNING
1143 "%s: windowar[%d] is NULL\n",
1144 __func__, p1);
1145 l2->windowar[p1] = NULL;
1147 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
1151 static void
1152 l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
1154 struct layer2 *l2 = fi->userdata;
1155 struct sk_buff *skb = arg;
1156 int PollFlag, rsp, typ = RR;
1157 unsigned int nr;
1159 rsp = *skb->data & 0x2;
1160 if (test_bit(FLG_ORIG, &l2->flag))
1161 rsp = !rsp;
1163 skb_pull(skb, l2addrsize(l2));
1164 if (IsRNR(skb->data, l2)) {
1165 set_peer_busy(l2);
1166 typ = RNR;
1167 } else
1168 clear_peer_busy(l2);
1169 if (IsREJ(skb->data, l2))
1170 typ = REJ;
1172 if (test_bit(FLG_MOD128, &l2->flag)) {
1173 PollFlag = (skb->data[1] & 0x1) == 0x1;
1174 nr = skb->data[1] >> 1;
1175 } else {
1176 PollFlag = (skb->data[0] & 0x10);
1177 nr = (skb->data[0] >> 5) & 0x7;
1179 dev_kfree_skb(skb);
1181 if (PollFlag) {
1182 if (rsp)
1183 l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
1184 else
1185 enquiry_response(l2);
1187 if (legalnr(l2, nr)) {
1188 if (typ == REJ) {
1189 setva(l2, nr);
1190 invoke_retransmission(l2, nr);
1191 stop_t200(l2, 10);
1192 if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
1193 EV_L2_T203, NULL, 6))
1194 l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
1195 } else if ((nr == l2->vs) && (typ == RR)) {
1196 setva(l2, nr);
1197 stop_t200(l2, 11);
1198 mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1199 EV_L2_T203, NULL, 7);
1200 } else if ((l2->va != nr) || (typ == RNR)) {
1201 setva(l2, nr);
1202 if (typ != RR)
1203 mISDN_FsmDelTimer(&l2->t203, 9);
1204 restart_t200(l2, 12);
1206 if (skb_queue_len(&l2->i_queue) && (typ == RR))
1207 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1208 } else
1209 nrerrorrecovery(fi);
1212 static void
1213 l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
1215 struct layer2 *l2 = fi->userdata;
1216 struct sk_buff *skb = arg;
1218 if (!test_bit(FLG_L3_INIT, &l2->flag))
1219 skb_queue_tail(&l2->i_queue, skb);
1220 else
1221 dev_kfree_skb(skb);
1224 static void
1225 l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
1227 struct layer2 *l2 = fi->userdata;
1228 struct sk_buff *skb = arg;
1230 skb_queue_tail(&l2->i_queue, skb);
1231 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1234 static void
1235 l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
1237 struct layer2 *l2 = fi->userdata;
1238 struct sk_buff *skb = arg;
1240 skb_queue_tail(&l2->i_queue, skb);
1243 static void
1244 l2_got_iframe(struct FsmInst *fi, int event, void *arg)
1246 struct layer2 *l2 = fi->userdata;
1247 struct sk_buff *skb = arg;
1248 int PollFlag, i;
1249 u_int ns, nr;
1251 i = l2addrsize(l2);
1252 if (test_bit(FLG_MOD128, &l2->flag)) {
1253 PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
1254 ns = skb->data[i] >> 1;
1255 nr = (skb->data[i + 1] >> 1) & 0x7f;
1256 } else {
1257 PollFlag = (skb->data[i] & 0x10);
1258 ns = (skb->data[i] >> 1) & 0x7;
1259 nr = (skb->data[i] >> 5) & 0x7;
1261 if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
1262 dev_kfree_skb(skb);
1263 if (PollFlag)
1264 enquiry_response(l2);
1265 } else {
1266 if (l2->vr == ns) {
1267 l2->vr++;
1268 if (test_bit(FLG_MOD128, &l2->flag))
1269 l2->vr %= 128;
1270 else
1271 l2->vr %= 8;
1272 test_and_clear_bit(FLG_REJEXC, &l2->flag);
1273 if (PollFlag)
1274 enquiry_response(l2);
1275 else
1276 test_and_set_bit(FLG_ACK_PEND, &l2->flag);
1277 skb_pull(skb, l2headersize(l2, 0));
1278 l2up(l2, DL_DATA_IND, skb);
1279 } else {
1280 /* n(s)!=v(r) */
1281 dev_kfree_skb(skb);
1282 if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
1283 if (PollFlag)
1284 enquiry_response(l2);
1285 } else {
1286 enquiry_cr(l2, REJ, RSP, PollFlag);
1287 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1291 if (legalnr(l2, nr)) {
1292 if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
1293 (fi->state == ST_L2_7)) {
1294 if (nr == l2->vs) {
1295 stop_t200(l2, 13);
1296 mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1297 EV_L2_T203, NULL, 7);
1298 } else if (nr != l2->va)
1299 restart_t200(l2, 14);
1301 setva(l2, nr);
1302 } else {
1303 nrerrorrecovery(fi);
1304 return;
1306 if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
1307 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1308 if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
1309 enquiry_cr(l2, RR, RSP, 0);
1312 static void
1313 l2_got_tei(struct FsmInst *fi, int event, void *arg)
1315 struct layer2 *l2 = fi->userdata;
1316 u_int info;
1318 l2->tei = (signed char)(long)arg;
1319 set_channel_address(&l2->ch, l2->sapi, l2->tei);
1320 info = DL_INFO_L2_CONNECT;
1321 l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
1322 if (fi->state == ST_L2_3) {
1323 establishlink(fi);
1324 test_and_set_bit(FLG_L3_INIT, &l2->flag);
1325 } else
1326 mISDN_FsmChangeState(fi, ST_L2_4);
1327 if (skb_queue_len(&l2->ui_queue))
1328 tx_ui(l2);
1331 static void
1332 l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
1334 struct layer2 *l2 = fi->userdata;
1336 if (test_bit(FLG_LAPD, &l2->flag) &&
1337 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1338 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1339 } else if (l2->rc == l2->N200) {
1340 mISDN_FsmChangeState(fi, ST_L2_4);
1341 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1342 skb_queue_purge(&l2->i_queue);
1343 l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
1344 if (test_bit(FLG_LAPB, &l2->flag))
1345 l2down_create(l2, PH_DEACTIVATE_REQ,
1346 l2_newid(l2), 0, NULL);
1347 st5_dl_release_l2l3(l2);
1348 if (l2->tm)
1349 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1350 } else {
1351 l2->rc++;
1352 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1353 send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
1354 SABME : SABM) | 0x10, CMD);
1358 static void
1359 l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
1361 struct layer2 *l2 = fi->userdata;
1363 if (test_bit(FLG_LAPD, &l2->flag) &&
1364 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1365 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1366 } else if (l2->rc == l2->N200) {
1367 mISDN_FsmChangeState(fi, ST_L2_4);
1368 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1369 l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
1370 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1371 if (l2->tm)
1372 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1373 } else {
1374 l2->rc++;
1375 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
1376 NULL, 9);
1377 send_uframe(l2, NULL, DISC | 0x10, CMD);
1381 static void
1382 l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
1384 struct layer2 *l2 = fi->userdata;
1386 if (test_bit(FLG_LAPD, &l2->flag) &&
1387 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1388 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1389 return;
1391 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1392 l2->rc = 0;
1393 mISDN_FsmChangeState(fi, ST_L2_8);
1394 transmit_enquiry(l2);
1395 l2->rc++;
1398 static void
1399 l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
1401 struct layer2 *l2 = fi->userdata;
1403 if (test_bit(FLG_LAPD, &l2->flag) &&
1404 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1405 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1406 return;
1408 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1409 if (l2->rc == l2->N200) {
1410 l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
1411 establishlink(fi);
1412 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1413 } else {
1414 transmit_enquiry(l2);
1415 l2->rc++;
1419 static void
1420 l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
1422 struct layer2 *l2 = fi->userdata;
1424 if (test_bit(FLG_LAPD, &l2->flag) &&
1425 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1426 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
1427 return;
1429 mISDN_FsmChangeState(fi, ST_L2_8);
1430 transmit_enquiry(l2);
1431 l2->rc = 0;
1434 static void
1435 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1437 struct layer2 *l2 = fi->userdata;
1438 struct sk_buff *skb, *nskb, *oskb;
1439 u_char header[MAX_L2HEADER_LEN];
1440 u_int i, p1;
1442 if (!cansend(l2))
1443 return;
1445 skb = skb_dequeue(&l2->i_queue);
1446 if (!skb)
1447 return;
1449 if (test_bit(FLG_MOD128, &l2->flag))
1450 p1 = (l2->vs - l2->va) % 128;
1451 else
1452 p1 = (l2->vs - l2->va) % 8;
1453 p1 = (p1 + l2->sow) % l2->window;
1454 if (l2->windowar[p1]) {
1455 printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
1456 p1);
1457 dev_kfree_skb(l2->windowar[p1]);
1459 l2->windowar[p1] = skb;
1460 i = sethdraddr(l2, header, CMD);
1461 if (test_bit(FLG_MOD128, &l2->flag)) {
1462 header[i++] = l2->vs << 1;
1463 header[i++] = l2->vr << 1;
1464 l2->vs = (l2->vs + 1) % 128;
1465 } else {
1466 header[i++] = (l2->vr << 5) | (l2->vs << 1);
1467 l2->vs = (l2->vs + 1) % 8;
1470 nskb = skb_clone(skb, GFP_ATOMIC);
1471 p1 = skb_headroom(nskb);
1472 if (p1 >= i)
1473 memcpy(skb_push(nskb, i), header, i);
1474 else {
1475 printk(KERN_WARNING
1476 "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
1477 oskb = nskb;
1478 nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
1479 if (!nskb) {
1480 dev_kfree_skb(oskb);
1481 printk(KERN_WARNING "%s: no skb mem\n", __func__);
1482 return;
1484 memcpy(skb_put(nskb, i), header, i);
1485 memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
1486 dev_kfree_skb(oskb);
1488 l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
1489 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1490 if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
1491 mISDN_FsmDelTimer(&l2->t203, 13);
1492 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
1496 static void
1497 l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
1499 struct layer2 *l2 = fi->userdata;
1500 struct sk_buff *skb = arg;
1501 int PollFlag, rsp, rnr = 0;
1502 unsigned int nr;
1504 rsp = *skb->data & 0x2;
1505 if (test_bit(FLG_ORIG, &l2->flag))
1506 rsp = !rsp;
1508 skb_pull(skb, l2addrsize(l2));
1510 if (IsRNR(skb->data, l2)) {
1511 set_peer_busy(l2);
1512 rnr = 1;
1513 } else
1514 clear_peer_busy(l2);
1516 if (test_bit(FLG_MOD128, &l2->flag)) {
1517 PollFlag = (skb->data[1] & 0x1) == 0x1;
1518 nr = skb->data[1] >> 1;
1519 } else {
1520 PollFlag = (skb->data[0] & 0x10);
1521 nr = (skb->data[0] >> 5) & 0x7;
1523 dev_kfree_skb(skb);
1524 if (rsp && PollFlag) {
1525 if (legalnr(l2, nr)) {
1526 if (rnr) {
1527 restart_t200(l2, 15);
1528 } else {
1529 stop_t200(l2, 16);
1530 mISDN_FsmAddTimer(&l2->t203, l2->T203,
1531 EV_L2_T203, NULL, 5);
1532 setva(l2, nr);
1534 invoke_retransmission(l2, nr);
1535 mISDN_FsmChangeState(fi, ST_L2_7);
1536 if (skb_queue_len(&l2->i_queue) && cansend(l2))
1537 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1538 } else
1539 nrerrorrecovery(fi);
1540 } else {
1541 if (!rsp && PollFlag)
1542 enquiry_response(l2);
1543 if (legalnr(l2, nr))
1544 setva(l2, nr);
1545 else
1546 nrerrorrecovery(fi);
1550 static void
1551 l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
1553 struct layer2 *l2 = fi->userdata;
1554 struct sk_buff *skb = arg;
1556 skb_pull(skb, l2addrsize(l2) + 1);
1558 if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
1559 (IsUA(skb->data) && (fi->state == ST_L2_7))) {
1560 l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
1561 establishlink(fi);
1562 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1564 dev_kfree_skb(skb);
1567 static void
1568 l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
1570 struct layer2 *l2 = fi->userdata;
1572 skb_queue_purge(&l2->ui_queue);
1573 l2->tei = GROUP_TEI;
1574 mISDN_FsmChangeState(fi, ST_L2_1);
1577 static void
1578 l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
1580 struct layer2 *l2 = fi->userdata;
1582 skb_queue_purge(&l2->ui_queue);
1583 l2->tei = GROUP_TEI;
1584 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1585 mISDN_FsmChangeState(fi, ST_L2_1);
1588 static void
1589 l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
1591 struct layer2 *l2 = fi->userdata;
1593 skb_queue_purge(&l2->i_queue);
1594 skb_queue_purge(&l2->ui_queue);
1595 freewin(l2);
1596 l2->tei = GROUP_TEI;
1597 stop_t200(l2, 17);
1598 st5_dl_release_l2l3(l2);
1599 mISDN_FsmChangeState(fi, ST_L2_1);
1602 static void
1603 l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
1605 struct layer2 *l2 = fi->userdata;
1607 skb_queue_purge(&l2->ui_queue);
1608 l2->tei = GROUP_TEI;
1609 stop_t200(l2, 18);
1610 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1611 mISDN_FsmChangeState(fi, ST_L2_1);
1614 static void
1615 l2_tei_remove(struct FsmInst *fi, int event, void *arg)
1617 struct layer2 *l2 = fi->userdata;
1619 skb_queue_purge(&l2->i_queue);
1620 skb_queue_purge(&l2->ui_queue);
1621 freewin(l2);
1622 l2->tei = GROUP_TEI;
1623 stop_t200(l2, 17);
1624 mISDN_FsmDelTimer(&l2->t203, 19);
1625 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1626 /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
1627 * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
1628 * 0, NULL, 0);
1630 mISDN_FsmChangeState(fi, ST_L2_1);
1633 static void
1634 l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg)
1636 struct layer2 *l2 = fi->userdata;
1637 struct sk_buff *skb = arg;
1639 skb_queue_purge(&l2->i_queue);
1640 skb_queue_purge(&l2->ui_queue);
1641 if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1642 l2up(l2, DL_RELEASE_IND, skb);
1643 else
1644 dev_kfree_skb(skb);
1647 static void
1648 l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg)
1650 struct layer2 *l2 = fi->userdata;
1651 struct sk_buff *skb = arg;
1653 skb_queue_purge(&l2->i_queue);
1654 skb_queue_purge(&l2->ui_queue);
1655 freewin(l2);
1656 stop_t200(l2, 19);
1657 st5_dl_release_l2l3(l2);
1658 mISDN_FsmChangeState(fi, ST_L2_4);
1659 if (l2->tm)
1660 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1661 dev_kfree_skb(skb);
1664 static void
1665 l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg)
1667 struct layer2 *l2 = fi->userdata;
1668 struct sk_buff *skb = arg;
1670 skb_queue_purge(&l2->ui_queue);
1671 stop_t200(l2, 20);
1672 l2up(l2, DL_RELEASE_CNF, skb);
1673 mISDN_FsmChangeState(fi, ST_L2_4);
1674 if (l2->tm)
1675 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1678 static void
1679 l2_persistant_da(struct FsmInst *fi, int event, void *arg)
1681 struct layer2 *l2 = fi->userdata;
1682 struct sk_buff *skb = arg;
1684 skb_queue_purge(&l2->i_queue);
1685 skb_queue_purge(&l2->ui_queue);
1686 freewin(l2);
1687 stop_t200(l2, 19);
1688 mISDN_FsmDelTimer(&l2->t203, 19);
1689 l2up(l2, DL_RELEASE_IND, skb);
1690 mISDN_FsmChangeState(fi, ST_L2_4);
1691 if (l2->tm)
1692 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1695 static void
1696 l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
1698 struct layer2 *l2 = fi->userdata;
1699 struct sk_buff *skb = arg;
1701 if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
1702 enquiry_cr(l2, RNR, RSP, 0);
1703 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1705 if (skb)
1706 dev_kfree_skb(skb);
1709 static void
1710 l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
1712 struct layer2 *l2 = fi->userdata;
1713 struct sk_buff *skb = arg;
1715 if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
1716 enquiry_cr(l2, RR, RSP, 0);
1717 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1719 if (skb)
1720 dev_kfree_skb(skb);
1723 static void
1724 l2_frame_error(struct FsmInst *fi, int event, void *arg)
1726 struct layer2 *l2 = fi->userdata;
1728 l2mgr(l2, MDL_ERROR_IND, arg);
1731 static void
1732 l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
1734 struct layer2 *l2 = fi->userdata;
1736 l2mgr(l2, MDL_ERROR_IND, arg);
1737 establishlink(fi);
1738 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1741 static struct FsmNode L2FnList[] =
1743 {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
1744 {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
1745 {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
1746 {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
1747 {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1748 {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1749 {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
1750 {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
1751 {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1752 {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1753 {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
1754 {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
1755 {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
1756 {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
1757 {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
1758 {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
1759 {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
1760 {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
1761 {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
1762 {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
1763 {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
1764 {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
1765 {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
1766 {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
1767 {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
1768 {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
1769 {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
1770 {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
1771 {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
1772 {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
1773 {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
1774 {ST_L2_4, EV_L2_SABME, l2_start_multi},
1775 {ST_L2_5, EV_L2_SABME, l2_send_UA},
1776 {ST_L2_6, EV_L2_SABME, l2_send_DM},
1777 {ST_L2_7, EV_L2_SABME, l2_restart_multi},
1778 {ST_L2_8, EV_L2_SABME, l2_restart_multi},
1779 {ST_L2_4, EV_L2_DISC, l2_send_DM},
1780 {ST_L2_5, EV_L2_DISC, l2_send_DM},
1781 {ST_L2_6, EV_L2_DISC, l2_send_UA},
1782 {ST_L2_7, EV_L2_DISC, l2_stop_multi},
1783 {ST_L2_8, EV_L2_DISC, l2_stop_multi},
1784 {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
1785 {ST_L2_5, EV_L2_UA, l2_connected},
1786 {ST_L2_6, EV_L2_UA, l2_released},
1787 {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
1788 {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
1789 {ST_L2_4, EV_L2_DM, l2_reestablish},
1790 {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
1791 {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
1792 {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
1793 {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
1794 {ST_L2_1, EV_L2_UI, l2_got_ui},
1795 {ST_L2_2, EV_L2_UI, l2_got_ui},
1796 {ST_L2_3, EV_L2_UI, l2_got_ui},
1797 {ST_L2_4, EV_L2_UI, l2_got_ui},
1798 {ST_L2_5, EV_L2_UI, l2_got_ui},
1799 {ST_L2_6, EV_L2_UI, l2_got_ui},
1800 {ST_L2_7, EV_L2_UI, l2_got_ui},
1801 {ST_L2_8, EV_L2_UI, l2_got_ui},
1802 {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
1803 {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
1804 {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
1805 {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
1806 {ST_L2_7, EV_L2_I, l2_got_iframe},
1807 {ST_L2_8, EV_L2_I, l2_got_iframe},
1808 {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
1809 {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
1810 {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
1811 {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
1812 {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
1813 {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
1814 {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1815 {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1816 {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1817 {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1818 {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
1819 {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
1820 {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
1821 {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1822 {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1823 {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistant_da},
1824 {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
1825 {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
1826 {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistant_da},
1827 {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistant_da},
1828 {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistant_da},
1829 {ST_L2_7, EV_L1_DEACTIVATE, l2_persistant_da},
1830 {ST_L2_8, EV_L1_DEACTIVATE, l2_persistant_da},
1833 #define L2_FN_COUNT (sizeof(L2FnList)/sizeof(struct FsmNode))
1835 static int
1836 ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1838 u_char *datap = skb->data;
1839 int ret = -EINVAL;
1840 int psapi, ptei;
1841 u_int l;
1842 int c = 0;
1844 l = l2addrsize(l2);
1845 if (skb->len <= l) {
1846 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
1847 return ret;
1849 if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
1850 psapi = *datap++;
1851 ptei = *datap++;
1852 if ((psapi & 1) || !(ptei & 1)) {
1853 printk(KERN_WARNING
1854 "l2 D-channel frame wrong EA0/EA1\n");
1855 return ret;
1857 psapi >>= 2;
1858 ptei >>= 1;
1859 if (psapi != l2->sapi) {
1860 /* not our bussiness
1861 * printk(KERN_DEBUG "%s: sapi %d/%d sapi mismatch\n",
1862 * __func__,
1863 * psapi, l2->sapi);
1865 dev_kfree_skb(skb);
1866 return 0;
1868 if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
1869 /* not our bussiness
1870 * printk(KERN_DEBUG "%s: tei %d/%d sapi %d mismatch\n",
1871 * __func__,
1872 * ptei, l2->tei, psapi);
1874 dev_kfree_skb(skb);
1875 return 0;
1877 } else
1878 datap += l;
1879 if (!(*datap & 1)) { /* I-Frame */
1880 c = iframe_error(l2, skb);
1881 if (!c)
1882 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
1883 } else if (IsSFrame(datap, l2)) { /* S-Frame */
1884 c = super_error(l2, skb);
1885 if (!c)
1886 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
1887 } else if (IsUI(datap)) {
1888 c = UI_error(l2, skb);
1889 if (!c)
1890 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
1891 } else if (IsSABME(datap, l2)) {
1892 c = unnum_error(l2, skb, CMD);
1893 if (!c)
1894 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
1895 } else if (IsUA(datap)) {
1896 c = unnum_error(l2, skb, RSP);
1897 if (!c)
1898 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
1899 } else if (IsDISC(datap)) {
1900 c = unnum_error(l2, skb, CMD);
1901 if (!c)
1902 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
1903 } else if (IsDM(datap)) {
1904 c = unnum_error(l2, skb, RSP);
1905 if (!c)
1906 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
1907 } else if (IsFRMR(datap)) {
1908 c = FRMR_error(l2, skb);
1909 if (!c)
1910 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
1911 } else
1912 c = 'L';
1913 if (c) {
1914 printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
1915 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
1917 return ret;
1920 static int
1921 l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1923 struct layer2 *l2 = container_of(ch, struct layer2, ch);
1924 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1925 int ret = -EINVAL;
1927 if (*debug & DEBUG_L2_RECV)
1928 printk(KERN_DEBUG "%s: prim(%x) id(%x) tei(%d)\n",
1929 __func__, hh->prim, hh->id, l2->tei);
1930 switch (hh->prim) {
1931 case PH_DATA_IND:
1932 ret = ph_data_indication(l2, hh, skb);
1933 break;
1934 case PH_DATA_CNF:
1935 ret = ph_data_confirm(l2, hh, skb);
1936 break;
1937 case PH_ACTIVATE_IND:
1938 test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
1939 l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
1940 if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1941 ret = mISDN_FsmEvent(&l2->l2m,
1942 EV_L2_DL_ESTABLISH_REQ, skb);
1943 break;
1944 case PH_DEACTIVATE_IND:
1945 test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
1946 l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
1947 ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
1948 break;
1949 case MPH_INFORMATION_IND:
1950 if (!l2->up)
1951 break;
1952 ret = l2->up->send(l2->up, skb);
1953 break;
1954 case DL_DATA_REQ:
1955 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
1956 break;
1957 case DL_UNITDATA_REQ:
1958 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
1959 break;
1960 case DL_ESTABLISH_REQ:
1961 if (test_bit(FLG_LAPB, &l2->flag))
1962 test_and_set_bit(FLG_ORIG, &l2->flag);
1963 if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
1964 if (test_bit(FLG_LAPD, &l2->flag) ||
1965 test_bit(FLG_ORIG, &l2->flag))
1966 ret = mISDN_FsmEvent(&l2->l2m,
1967 EV_L2_DL_ESTABLISH_REQ, skb);
1968 } else {
1969 if (test_bit(FLG_LAPD, &l2->flag) ||
1970 test_bit(FLG_ORIG, &l2->flag)) {
1971 test_and_set_bit(FLG_ESTAB_PEND,
1972 &l2->flag);
1974 ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
1975 skb);
1977 break;
1978 case DL_RELEASE_REQ:
1979 if (test_bit(FLG_LAPB, &l2->flag))
1980 l2down_create(l2, PH_DEACTIVATE_REQ,
1981 l2_newid(l2), 0, NULL);
1982 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
1983 skb);
1984 break;
1985 default:
1986 if (*debug & DEBUG_L2)
1987 l2m_debug(&l2->l2m, "l2 unknown pr %04x",
1988 hh->prim);
1990 if (ret) {
1991 dev_kfree_skb(skb);
1992 ret = 0;
1994 return ret;
1998 tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
2000 int ret = -EINVAL;
2002 if (*debug & DEBUG_L2_TEI)
2003 printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
2004 switch (cmd) {
2005 case (MDL_ASSIGN_REQ):
2006 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
2007 break;
2008 case (MDL_REMOVE_REQ):
2009 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
2010 break;
2011 case (MDL_ERROR_IND):
2012 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2013 break;
2014 case (MDL_ERROR_RSP):
2015 /* ETS 300-125 5.3.2.1 Test: TC13010 */
2016 printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
2017 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2018 break;
2020 return ret;
2023 static void
2024 release_l2(struct layer2 *l2)
2026 mISDN_FsmDelTimer(&l2->t200, 21);
2027 mISDN_FsmDelTimer(&l2->t203, 16);
2028 skb_queue_purge(&l2->i_queue);
2029 skb_queue_purge(&l2->ui_queue);
2030 skb_queue_purge(&l2->down_queue);
2031 ReleaseWin(l2);
2032 if (test_bit(FLG_LAPD, &l2->flag)) {
2033 TEIrelease(l2);
2034 if (l2->ch.st)
2035 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
2036 CLOSE_CHANNEL, NULL);
2038 kfree(l2);
2041 static int
2042 l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
2044 struct layer2 *l2 = container_of(ch, struct layer2, ch);
2045 u_int info;
2047 if (*debug & DEBUG_L2_CTRL)
2048 printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
2050 switch (cmd) {
2051 case OPEN_CHANNEL:
2052 if (test_bit(FLG_LAPD, &l2->flag)) {
2053 set_channel_address(&l2->ch, l2->sapi, l2->tei);
2054 info = DL_INFO_L2_CONNECT;
2055 l2up_create(l2, DL_INFORMATION_IND,
2056 sizeof(info), &info);
2058 break;
2059 case CLOSE_CHANNEL:
2060 if (l2->ch.peer)
2061 l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
2062 release_l2(l2);
2063 break;
2065 return 0;
2068 struct layer2 *
2069 create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, u_long arg)
2071 struct layer2 *l2;
2072 struct channel_req rq;
2074 l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
2075 if (!l2) {
2076 printk(KERN_ERR "kzalloc layer2 failed\n");
2077 return NULL;
2079 l2->next_id = 1;
2080 l2->down_id = MISDN_ID_NONE;
2081 l2->up = ch;
2082 l2->ch.st = ch->st;
2083 l2->ch.send = l2_send;
2084 l2->ch.ctrl = l2_ctrl;
2085 switch (protocol) {
2086 case ISDN_P_LAPD_NT:
2087 test_and_set_bit(FLG_LAPD, &l2->flag);
2088 test_and_set_bit(FLG_LAPD_NET, &l2->flag);
2089 test_and_set_bit(FLG_MOD128, &l2->flag);
2090 l2->sapi = 0;
2091 l2->maxlen = MAX_DFRAME_LEN;
2092 if (test_bit(OPTION_L2_PMX, &options))
2093 l2->window = 7;
2094 else
2095 l2->window = 1;
2096 if (test_bit(OPTION_L2_PTP, &options))
2097 test_and_set_bit(FLG_PTP, &l2->flag);
2098 if (test_bit(OPTION_L2_FIXEDTEI, &options))
2099 test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2100 l2->tei = (u_int)arg;
2101 l2->T200 = 1000;
2102 l2->N200 = 3;
2103 l2->T203 = 10000;
2104 if (test_bit(OPTION_L2_PMX, &options))
2105 rq.protocol = ISDN_P_NT_E1;
2106 else
2107 rq.protocol = ISDN_P_NT_S0;
2108 rq.adr.channel = 0;
2109 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2110 break;
2111 case ISDN_P_LAPD_TE:
2112 test_and_set_bit(FLG_LAPD, &l2->flag);
2113 test_and_set_bit(FLG_MOD128, &l2->flag);
2114 test_and_set_bit(FLG_ORIG, &l2->flag);
2115 l2->sapi = 0;
2116 l2->maxlen = MAX_DFRAME_LEN;
2117 if (test_bit(OPTION_L2_PMX, &options))
2118 l2->window = 7;
2119 else
2120 l2->window = 1;
2121 if (test_bit(OPTION_L2_PTP, &options))
2122 test_and_set_bit(FLG_PTP, &l2->flag);
2123 if (test_bit(OPTION_L2_FIXEDTEI, &options))
2124 test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2125 l2->tei = (u_int)arg;
2126 l2->T200 = 1000;
2127 l2->N200 = 3;
2128 l2->T203 = 10000;
2129 if (test_bit(OPTION_L2_PMX, &options))
2130 rq.protocol = ISDN_P_TE_E1;
2131 else
2132 rq.protocol = ISDN_P_TE_S0;
2133 rq.adr.channel = 0;
2134 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2135 break;
2136 case ISDN_P_B_X75SLP:
2137 test_and_set_bit(FLG_LAPB, &l2->flag);
2138 l2->window = 7;
2139 l2->maxlen = MAX_DATA_SIZE;
2140 l2->T200 = 1000;
2141 l2->N200 = 4;
2142 l2->T203 = 5000;
2143 l2->addr.A = 3;
2144 l2->addr.B = 1;
2145 break;
2146 default:
2147 printk(KERN_ERR "layer2 create failed prt %x\n",
2148 protocol);
2149 kfree(l2);
2150 return NULL;
2152 skb_queue_head_init(&l2->i_queue);
2153 skb_queue_head_init(&l2->ui_queue);
2154 skb_queue_head_init(&l2->down_queue);
2155 skb_queue_head_init(&l2->tmp_queue);
2156 InitWin(l2);
2157 l2->l2m.fsm = &l2fsm;
2158 if (test_bit(FLG_LAPB, &l2->flag) ||
2159 test_bit(FLG_PTP, &l2->flag) ||
2160 test_bit(FLG_LAPD_NET, &l2->flag))
2161 l2->l2m.state = ST_L2_4;
2162 else
2163 l2->l2m.state = ST_L2_1;
2164 l2->l2m.debug = *debug;
2165 l2->l2m.userdata = l2;
2166 l2->l2m.userint = 0;
2167 l2->l2m.printdebug = l2m_debug;
2169 mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
2170 mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
2171 return l2;
2174 static int
2175 x75create(struct channel_req *crq)
2177 struct layer2 *l2;
2179 if (crq->protocol != ISDN_P_B_X75SLP)
2180 return -EPROTONOSUPPORT;
2181 l2 = create_l2(crq->ch, crq->protocol, 0, 0);
2182 if (!l2)
2183 return -ENOMEM;
2184 crq->ch = &l2->ch;
2185 crq->protocol = ISDN_P_B_HDLC;
2186 return 0;
2189 static struct Bprotocol X75SLP = {
2190 .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
2191 .name = "X75SLP",
2192 .create = x75create
2196 Isdnl2_Init(u_int *deb)
2198 debug = deb;
2199 mISDN_register_Bprotocol(&X75SLP);
2200 l2fsm.state_count = L2_STATE_COUNT;
2201 l2fsm.event_count = L2_EVENT_COUNT;
2202 l2fsm.strEvent = strL2Event;
2203 l2fsm.strState = strL2State;
2204 mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
2205 TEIInit(deb);
2206 return 0;
2209 void
2210 Isdnl2_cleanup(void)
2212 mISDN_unregister_Bprotocol(&X75SLP);
2213 TEIFree();
2214 mISDN_FsmFree(&l2fsm);