net: use NETDEV_TX_OK instead of 0 in ndo_start_xmit() functions
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / wan / hdlc_fr.c
blob52438c76bf8ad890099aac709cd0edb610b74ba4
1 /*
2 * Generic HDLC support routines for Linux
3 * Frame Relay support
5 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
12 Theory of PVC state
14 DCE mode:
16 (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
17 0,x -> 1,1 if "link reliable" when sending FULL STATUS
18 1,1 -> 1,0 if received FULL STATUS ACK
20 (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
21 -> 1 when "PVC up" and (exist,new) = 1,0
23 DTE mode:
24 (exist,new,active) = FULL STATUS if "link reliable"
25 = 0, 0, 0 if "link unreliable"
26 No LMI:
27 active = open and "link reliable"
28 exist = new = not used
30 CCITT LMI: ITU-T Q.933 Annex A
31 ANSI LMI: ANSI T1.617 Annex D
32 CISCO LMI: the original, aka "Gang of Four" LMI
36 #include <linux/errno.h>
37 #include <linux/etherdevice.h>
38 #include <linux/hdlc.h>
39 #include <linux/if_arp.h>
40 #include <linux/inetdevice.h>
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/pkt_sched.h>
45 #include <linux/poll.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/skbuff.h>
48 #include <linux/slab.h>
50 #undef DEBUG_PKT
51 #undef DEBUG_ECN
52 #undef DEBUG_LINK
53 #undef DEBUG_PROTO
54 #undef DEBUG_PVC
56 #define FR_UI 0x03
57 #define FR_PAD 0x00
59 #define NLPID_IP 0xCC
60 #define NLPID_IPV6 0x8E
61 #define NLPID_SNAP 0x80
62 #define NLPID_PAD 0x00
63 #define NLPID_CCITT_ANSI_LMI 0x08
64 #define NLPID_CISCO_LMI 0x09
67 #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
68 #define LMI_CISCO_DLCI 1023
70 #define LMI_CALLREF 0x00 /* Call Reference */
71 #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
72 #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
73 #define LMI_CCITT_REPTYPE 0x51
74 #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
75 #define LMI_CCITT_ALIVE 0x53
76 #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
77 #define LMI_CCITT_PVCSTAT 0x57
79 #define LMI_FULLREP 0x00 /* full report */
80 #define LMI_INTEGRITY 0x01 /* link integrity report */
81 #define LMI_SINGLE 0x02 /* single PVC report */
83 #define LMI_STATUS_ENQUIRY 0x75
84 #define LMI_STATUS 0x7D /* reply */
86 #define LMI_REPT_LEN 1 /* report type element length */
87 #define LMI_INTEG_LEN 2 /* link integrity element length */
89 #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
90 #define LMI_ANSI_LENGTH 14
93 typedef struct {
94 #if defined(__LITTLE_ENDIAN_BITFIELD)
95 unsigned ea1: 1;
96 unsigned cr: 1;
97 unsigned dlcih: 6;
99 unsigned ea2: 1;
100 unsigned de: 1;
101 unsigned becn: 1;
102 unsigned fecn: 1;
103 unsigned dlcil: 4;
104 #else
105 unsigned dlcih: 6;
106 unsigned cr: 1;
107 unsigned ea1: 1;
109 unsigned dlcil: 4;
110 unsigned fecn: 1;
111 unsigned becn: 1;
112 unsigned de: 1;
113 unsigned ea2: 1;
114 #endif
115 }__attribute__ ((packed)) fr_hdr;
118 typedef struct pvc_device_struct {
119 struct net_device *frad;
120 struct net_device *main;
121 struct net_device *ether; /* bridged Ethernet interface */
122 struct pvc_device_struct *next; /* Sorted in ascending DLCI order */
123 int dlci;
124 int open_count;
126 struct {
127 unsigned int new: 1;
128 unsigned int active: 1;
129 unsigned int exist: 1;
130 unsigned int deleted: 1;
131 unsigned int fecn: 1;
132 unsigned int becn: 1;
133 unsigned int bandwidth; /* Cisco LMI reporting only */
134 }state;
135 }pvc_device;
137 struct frad_state {
138 fr_proto settings;
139 pvc_device *first_pvc;
140 int dce_pvc_count;
142 struct timer_list timer;
143 unsigned long last_poll;
144 int reliable;
145 int dce_changed;
146 int request;
147 int fullrep_sent;
148 u32 last_errors; /* last errors bit list */
149 u8 n391cnt;
150 u8 txseq; /* TX sequence number */
151 u8 rxseq; /* RX sequence number */
155 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
158 static inline u16 q922_to_dlci(u8 *hdr)
160 return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
164 static inline void dlci_to_q922(u8 *hdr, u16 dlci)
166 hdr[0] = (dlci >> 2) & 0xFC;
167 hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
171 static inline struct frad_state* state(hdlc_device *hdlc)
173 return(struct frad_state *)(hdlc->state);
177 static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
179 pvc_device *pvc = state(hdlc)->first_pvc;
181 while (pvc) {
182 if (pvc->dlci == dlci)
183 return pvc;
184 if (pvc->dlci > dlci)
185 return NULL; /* the listed is sorted */
186 pvc = pvc->next;
189 return NULL;
193 static pvc_device* add_pvc(struct net_device *dev, u16 dlci)
195 hdlc_device *hdlc = dev_to_hdlc(dev);
196 pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
198 while (*pvc_p) {
199 if ((*pvc_p)->dlci == dlci)
200 return *pvc_p;
201 if ((*pvc_p)->dlci > dlci)
202 break; /* the list is sorted */
203 pvc_p = &(*pvc_p)->next;
206 pvc = kzalloc(sizeof(pvc_device), GFP_ATOMIC);
207 #ifdef DEBUG_PVC
208 printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
209 #endif
210 if (!pvc)
211 return NULL;
213 pvc->dlci = dlci;
214 pvc->frad = dev;
215 pvc->next = *pvc_p; /* Put it in the chain */
216 *pvc_p = pvc;
217 return pvc;
221 static inline int pvc_is_used(pvc_device *pvc)
223 return pvc->main || pvc->ether;
227 static inline void pvc_carrier(int on, pvc_device *pvc)
229 if (on) {
230 if (pvc->main)
231 if (!netif_carrier_ok(pvc->main))
232 netif_carrier_on(pvc->main);
233 if (pvc->ether)
234 if (!netif_carrier_ok(pvc->ether))
235 netif_carrier_on(pvc->ether);
236 } else {
237 if (pvc->main)
238 if (netif_carrier_ok(pvc->main))
239 netif_carrier_off(pvc->main);
240 if (pvc->ether)
241 if (netif_carrier_ok(pvc->ether))
242 netif_carrier_off(pvc->ether);
247 static inline void delete_unused_pvcs(hdlc_device *hdlc)
249 pvc_device **pvc_p = &state(hdlc)->first_pvc;
251 while (*pvc_p) {
252 if (!pvc_is_used(*pvc_p)) {
253 pvc_device *pvc = *pvc_p;
254 #ifdef DEBUG_PVC
255 printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
256 #endif
257 *pvc_p = pvc->next;
258 kfree(pvc);
259 continue;
261 pvc_p = &(*pvc_p)->next;
266 static inline struct net_device** get_dev_p(pvc_device *pvc, int type)
268 if (type == ARPHRD_ETHER)
269 return &pvc->ether;
270 else
271 return &pvc->main;
275 static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
277 u16 head_len;
278 struct sk_buff *skb = *skb_p;
280 switch (skb->protocol) {
281 case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
282 head_len = 4;
283 skb_push(skb, head_len);
284 skb->data[3] = NLPID_CCITT_ANSI_LMI;
285 break;
287 case cpu_to_be16(NLPID_CISCO_LMI):
288 head_len = 4;
289 skb_push(skb, head_len);
290 skb->data[3] = NLPID_CISCO_LMI;
291 break;
293 case cpu_to_be16(ETH_P_IP):
294 head_len = 4;
295 skb_push(skb, head_len);
296 skb->data[3] = NLPID_IP;
297 break;
299 case cpu_to_be16(ETH_P_IPV6):
300 head_len = 4;
301 skb_push(skb, head_len);
302 skb->data[3] = NLPID_IPV6;
303 break;
305 case cpu_to_be16(ETH_P_802_3):
306 head_len = 10;
307 if (skb_headroom(skb) < head_len) {
308 struct sk_buff *skb2 = skb_realloc_headroom(skb,
309 head_len);
310 if (!skb2)
311 return -ENOBUFS;
312 dev_kfree_skb(skb);
313 skb = *skb_p = skb2;
315 skb_push(skb, head_len);
316 skb->data[3] = FR_PAD;
317 skb->data[4] = NLPID_SNAP;
318 skb->data[5] = FR_PAD;
319 skb->data[6] = 0x80;
320 skb->data[7] = 0xC2;
321 skb->data[8] = 0x00;
322 skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
323 break;
325 default:
326 head_len = 10;
327 skb_push(skb, head_len);
328 skb->data[3] = FR_PAD;
329 skb->data[4] = NLPID_SNAP;
330 skb->data[5] = FR_PAD;
331 skb->data[6] = FR_PAD;
332 skb->data[7] = FR_PAD;
333 *(__be16*)(skb->data + 8) = skb->protocol;
336 dlci_to_q922(skb->data, dlci);
337 skb->data[2] = FR_UI;
338 return 0;
343 static int pvc_open(struct net_device *dev)
345 pvc_device *pvc = dev->ml_priv;
347 if ((pvc->frad->flags & IFF_UP) == 0)
348 return -EIO; /* Frad must be UP in order to activate PVC */
350 if (pvc->open_count++ == 0) {
351 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
352 if (state(hdlc)->settings.lmi == LMI_NONE)
353 pvc->state.active = netif_carrier_ok(pvc->frad);
355 pvc_carrier(pvc->state.active, pvc);
356 state(hdlc)->dce_changed = 1;
358 return 0;
363 static int pvc_close(struct net_device *dev)
365 pvc_device *pvc = dev->ml_priv;
367 if (--pvc->open_count == 0) {
368 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
369 if (state(hdlc)->settings.lmi == LMI_NONE)
370 pvc->state.active = 0;
372 if (state(hdlc)->settings.dce) {
373 state(hdlc)->dce_changed = 1;
374 pvc->state.active = 0;
377 return 0;
382 static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
384 pvc_device *pvc = dev->ml_priv;
385 fr_proto_pvc_info info;
387 if (ifr->ifr_settings.type == IF_GET_PROTO) {
388 if (dev->type == ARPHRD_ETHER)
389 ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
390 else
391 ifr->ifr_settings.type = IF_PROTO_FR_PVC;
393 if (ifr->ifr_settings.size < sizeof(info)) {
394 /* data size wanted */
395 ifr->ifr_settings.size = sizeof(info);
396 return -ENOBUFS;
399 info.dlci = pvc->dlci;
400 memcpy(info.master, pvc->frad->name, IFNAMSIZ);
401 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
402 &info, sizeof(info)))
403 return -EFAULT;
404 return 0;
407 return -EINVAL;
410 static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
412 pvc_device *pvc = dev->ml_priv;
414 if (pvc->state.active) {
415 if (dev->type == ARPHRD_ETHER) {
416 int pad = ETH_ZLEN - skb->len;
417 if (pad > 0) { /* Pad the frame with zeros */
418 int len = skb->len;
419 if (skb_tailroom(skb) < pad)
420 if (pskb_expand_head(skb, 0, pad,
421 GFP_ATOMIC)) {
422 dev->stats.tx_dropped++;
423 dev_kfree_skb(skb);
424 return NETDEV_TX_OK;
426 skb_put(skb, pad);
427 memset(skb->data + len, 0, pad);
429 skb->protocol = cpu_to_be16(ETH_P_802_3);
431 if (!fr_hard_header(&skb, pvc->dlci)) {
432 dev->stats.tx_bytes += skb->len;
433 dev->stats.tx_packets++;
434 if (pvc->state.fecn) /* TX Congestion counter */
435 dev->stats.tx_compressed++;
436 skb->dev = pvc->frad;
437 dev_queue_xmit(skb);
438 return NETDEV_TX_OK;
442 dev->stats.tx_dropped++;
443 dev_kfree_skb(skb);
444 return NETDEV_TX_OK;
447 static inline void fr_log_dlci_active(pvc_device *pvc)
449 printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n",
450 pvc->frad->name,
451 pvc->dlci,
452 pvc->main ? pvc->main->name : "",
453 pvc->main && pvc->ether ? " " : "",
454 pvc->ether ? pvc->ether->name : "",
455 pvc->state.new ? " new" : "",
456 !pvc->state.exist ? "deleted" :
457 pvc->state.active ? "active" : "inactive");
462 static inline u8 fr_lmi_nextseq(u8 x)
464 x++;
465 return x ? x : 1;
469 static void fr_lmi_send(struct net_device *dev, int fullrep)
471 hdlc_device *hdlc = dev_to_hdlc(dev);
472 struct sk_buff *skb;
473 pvc_device *pvc = state(hdlc)->first_pvc;
474 int lmi = state(hdlc)->settings.lmi;
475 int dce = state(hdlc)->settings.dce;
476 int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
477 int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
478 u8 *data;
479 int i = 0;
481 if (dce && fullrep) {
482 len += state(hdlc)->dce_pvc_count * (2 + stat_len);
483 if (len > HDLC_MAX_MRU) {
484 printk(KERN_WARNING "%s: Too many PVCs while sending "
485 "LMI full report\n", dev->name);
486 return;
490 skb = dev_alloc_skb(len);
491 if (!skb) {
492 printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n",
493 dev->name);
494 return;
496 memset(skb->data, 0, len);
497 skb_reserve(skb, 4);
498 if (lmi == LMI_CISCO) {
499 skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
500 fr_hard_header(&skb, LMI_CISCO_DLCI);
501 } else {
502 skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
503 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
505 data = skb_tail_pointer(skb);
506 data[i++] = LMI_CALLREF;
507 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
508 if (lmi == LMI_ANSI)
509 data[i++] = LMI_ANSI_LOCKSHIFT;
510 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
511 LMI_ANSI_CISCO_REPTYPE;
512 data[i++] = LMI_REPT_LEN;
513 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
514 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
515 data[i++] = LMI_INTEG_LEN;
516 data[i++] = state(hdlc)->txseq =
517 fr_lmi_nextseq(state(hdlc)->txseq);
518 data[i++] = state(hdlc)->rxseq;
520 if (dce && fullrep) {
521 while (pvc) {
522 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
523 LMI_ANSI_CISCO_PVCSTAT;
524 data[i++] = stat_len;
526 /* LMI start/restart */
527 if (state(hdlc)->reliable && !pvc->state.exist) {
528 pvc->state.exist = pvc->state.new = 1;
529 fr_log_dlci_active(pvc);
532 /* ifconfig PVC up */
533 if (pvc->open_count && !pvc->state.active &&
534 pvc->state.exist && !pvc->state.new) {
535 pvc_carrier(1, pvc);
536 pvc->state.active = 1;
537 fr_log_dlci_active(pvc);
540 if (lmi == LMI_CISCO) {
541 data[i] = pvc->dlci >> 8;
542 data[i + 1] = pvc->dlci & 0xFF;
543 } else {
544 data[i] = (pvc->dlci >> 4) & 0x3F;
545 data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
546 data[i + 2] = 0x80;
549 if (pvc->state.new)
550 data[i + 2] |= 0x08;
551 else if (pvc->state.active)
552 data[i + 2] |= 0x02;
554 i += stat_len;
555 pvc = pvc->next;
559 skb_put(skb, i);
560 skb->priority = TC_PRIO_CONTROL;
561 skb->dev = dev;
562 skb_reset_network_header(skb);
564 dev_queue_xmit(skb);
569 static void fr_set_link_state(int reliable, struct net_device *dev)
571 hdlc_device *hdlc = dev_to_hdlc(dev);
572 pvc_device *pvc = state(hdlc)->first_pvc;
574 state(hdlc)->reliable = reliable;
575 if (reliable) {
576 netif_dormant_off(dev);
577 state(hdlc)->n391cnt = 0; /* Request full status */
578 state(hdlc)->dce_changed = 1;
580 if (state(hdlc)->settings.lmi == LMI_NONE) {
581 while (pvc) { /* Activate all PVCs */
582 pvc_carrier(1, pvc);
583 pvc->state.exist = pvc->state.active = 1;
584 pvc->state.new = 0;
585 pvc = pvc->next;
588 } else {
589 netif_dormant_on(dev);
590 while (pvc) { /* Deactivate all PVCs */
591 pvc_carrier(0, pvc);
592 pvc->state.exist = pvc->state.active = 0;
593 pvc->state.new = 0;
594 if (!state(hdlc)->settings.dce)
595 pvc->state.bandwidth = 0;
596 pvc = pvc->next;
602 static void fr_timer(unsigned long arg)
604 struct net_device *dev = (struct net_device *)arg;
605 hdlc_device *hdlc = dev_to_hdlc(dev);
606 int i, cnt = 0, reliable;
607 u32 list;
609 if (state(hdlc)->settings.dce) {
610 reliable = state(hdlc)->request &&
611 time_before(jiffies, state(hdlc)->last_poll +
612 state(hdlc)->settings.t392 * HZ);
613 state(hdlc)->request = 0;
614 } else {
615 state(hdlc)->last_errors <<= 1; /* Shift the list */
616 if (state(hdlc)->request) {
617 if (state(hdlc)->reliable)
618 printk(KERN_INFO "%s: No LMI status reply "
619 "received\n", dev->name);
620 state(hdlc)->last_errors |= 1;
623 list = state(hdlc)->last_errors;
624 for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
625 cnt += (list & 1); /* errors count */
627 reliable = (cnt < state(hdlc)->settings.n392);
630 if (state(hdlc)->reliable != reliable) {
631 printk(KERN_INFO "%s: Link %sreliable\n", dev->name,
632 reliable ? "" : "un");
633 fr_set_link_state(reliable, dev);
636 if (state(hdlc)->settings.dce)
637 state(hdlc)->timer.expires = jiffies +
638 state(hdlc)->settings.t392 * HZ;
639 else {
640 if (state(hdlc)->n391cnt)
641 state(hdlc)->n391cnt--;
643 fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
645 state(hdlc)->last_poll = jiffies;
646 state(hdlc)->request = 1;
647 state(hdlc)->timer.expires = jiffies +
648 state(hdlc)->settings.t391 * HZ;
651 state(hdlc)->timer.function = fr_timer;
652 state(hdlc)->timer.data = arg;
653 add_timer(&state(hdlc)->timer);
657 static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
659 hdlc_device *hdlc = dev_to_hdlc(dev);
660 pvc_device *pvc;
661 u8 rxseq, txseq;
662 int lmi = state(hdlc)->settings.lmi;
663 int dce = state(hdlc)->settings.dce;
664 int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
666 if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
667 LMI_CCITT_CISCO_LENGTH)) {
668 printk(KERN_INFO "%s: Short LMI frame\n", dev->name);
669 return 1;
672 if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
673 NLPID_CCITT_ANSI_LMI)) {
674 printk(KERN_INFO "%s: Received non-LMI frame with LMI DLCI\n",
675 dev->name);
676 return 1;
679 if (skb->data[4] != LMI_CALLREF) {
680 printk(KERN_INFO "%s: Invalid LMI Call reference (0x%02X)\n",
681 dev->name, skb->data[4]);
682 return 1;
685 if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
686 printk(KERN_INFO "%s: Invalid LMI Message type (0x%02X)\n",
687 dev->name, skb->data[5]);
688 return 1;
691 if (lmi == LMI_ANSI) {
692 if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
693 printk(KERN_INFO "%s: Not ANSI locking shift in LMI"
694 " message (0x%02X)\n", dev->name, skb->data[6]);
695 return 1;
697 i = 7;
698 } else
699 i = 6;
701 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
702 LMI_ANSI_CISCO_REPTYPE)) {
703 printk(KERN_INFO "%s: Not an LMI Report type IE (0x%02X)\n",
704 dev->name, skb->data[i]);
705 return 1;
708 if (skb->data[++i] != LMI_REPT_LEN) {
709 printk(KERN_INFO "%s: Invalid LMI Report type IE length"
710 " (%u)\n", dev->name, skb->data[i]);
711 return 1;
714 reptype = skb->data[++i];
715 if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
716 printk(KERN_INFO "%s: Unsupported LMI Report type (0x%02X)\n",
717 dev->name, reptype);
718 return 1;
721 if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
722 LMI_ANSI_CISCO_ALIVE)) {
723 printk(KERN_INFO "%s: Not an LMI Link integrity verification"
724 " IE (0x%02X)\n", dev->name, skb->data[i]);
725 return 1;
728 if (skb->data[++i] != LMI_INTEG_LEN) {
729 printk(KERN_INFO "%s: Invalid LMI Link integrity verification"
730 " IE length (%u)\n", dev->name, skb->data[i]);
731 return 1;
733 i++;
735 state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
736 rxseq = skb->data[i++]; /* Should confirm our sequence */
738 txseq = state(hdlc)->txseq;
740 if (dce)
741 state(hdlc)->last_poll = jiffies;
743 error = 0;
744 if (!state(hdlc)->reliable)
745 error = 1;
747 if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
748 state(hdlc)->n391cnt = 0;
749 error = 1;
752 if (dce) {
753 if (state(hdlc)->fullrep_sent && !error) {
754 /* Stop sending full report - the last one has been confirmed by DTE */
755 state(hdlc)->fullrep_sent = 0;
756 pvc = state(hdlc)->first_pvc;
757 while (pvc) {
758 if (pvc->state.new) {
759 pvc->state.new = 0;
761 /* Tell DTE that new PVC is now active */
762 state(hdlc)->dce_changed = 1;
764 pvc = pvc->next;
768 if (state(hdlc)->dce_changed) {
769 reptype = LMI_FULLREP;
770 state(hdlc)->fullrep_sent = 1;
771 state(hdlc)->dce_changed = 0;
774 state(hdlc)->request = 1; /* got request */
775 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
776 return 0;
779 /* DTE */
781 state(hdlc)->request = 0; /* got response, no request pending */
783 if (error)
784 return 0;
786 if (reptype != LMI_FULLREP)
787 return 0;
789 pvc = state(hdlc)->first_pvc;
791 while (pvc) {
792 pvc->state.deleted = 1;
793 pvc = pvc->next;
796 no_ram = 0;
797 while (skb->len >= i + 2 + stat_len) {
798 u16 dlci;
799 u32 bw;
800 unsigned int active, new;
802 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
803 LMI_ANSI_CISCO_PVCSTAT)) {
804 printk(KERN_INFO "%s: Not an LMI PVC status IE"
805 " (0x%02X)\n", dev->name, skb->data[i]);
806 return 1;
809 if (skb->data[++i] != stat_len) {
810 printk(KERN_INFO "%s: Invalid LMI PVC status IE length"
811 " (%u)\n", dev->name, skb->data[i]);
812 return 1;
814 i++;
816 new = !! (skb->data[i + 2] & 0x08);
817 active = !! (skb->data[i + 2] & 0x02);
818 if (lmi == LMI_CISCO) {
819 dlci = (skb->data[i] << 8) | skb->data[i + 1];
820 bw = (skb->data[i + 3] << 16) |
821 (skb->data[i + 4] << 8) |
822 (skb->data[i + 5]);
823 } else {
824 dlci = ((skb->data[i] & 0x3F) << 4) |
825 ((skb->data[i + 1] & 0x78) >> 3);
826 bw = 0;
829 pvc = add_pvc(dev, dlci);
831 if (!pvc && !no_ram) {
832 printk(KERN_WARNING
833 "%s: Memory squeeze on fr_lmi_recv()\n",
834 dev->name);
835 no_ram = 1;
838 if (pvc) {
839 pvc->state.exist = 1;
840 pvc->state.deleted = 0;
841 if (active != pvc->state.active ||
842 new != pvc->state.new ||
843 bw != pvc->state.bandwidth ||
844 !pvc->state.exist) {
845 pvc->state.new = new;
846 pvc->state.active = active;
847 pvc->state.bandwidth = bw;
848 pvc_carrier(active, pvc);
849 fr_log_dlci_active(pvc);
853 i += stat_len;
856 pvc = state(hdlc)->first_pvc;
858 while (pvc) {
859 if (pvc->state.deleted && pvc->state.exist) {
860 pvc_carrier(0, pvc);
861 pvc->state.active = pvc->state.new = 0;
862 pvc->state.exist = 0;
863 pvc->state.bandwidth = 0;
864 fr_log_dlci_active(pvc);
866 pvc = pvc->next;
869 /* Next full report after N391 polls */
870 state(hdlc)->n391cnt = state(hdlc)->settings.n391;
872 return 0;
876 static int fr_rx(struct sk_buff *skb)
878 struct net_device *frad = skb->dev;
879 hdlc_device *hdlc = dev_to_hdlc(frad);
880 fr_hdr *fh = (fr_hdr*)skb->data;
881 u8 *data = skb->data;
882 u16 dlci;
883 pvc_device *pvc;
884 struct net_device *dev = NULL;
886 if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
887 goto rx_error;
889 dlci = q922_to_dlci(skb->data);
891 if ((dlci == LMI_CCITT_ANSI_DLCI &&
892 (state(hdlc)->settings.lmi == LMI_ANSI ||
893 state(hdlc)->settings.lmi == LMI_CCITT)) ||
894 (dlci == LMI_CISCO_DLCI &&
895 state(hdlc)->settings.lmi == LMI_CISCO)) {
896 if (fr_lmi_recv(frad, skb))
897 goto rx_error;
898 dev_kfree_skb_any(skb);
899 return NET_RX_SUCCESS;
902 pvc = find_pvc(hdlc, dlci);
903 if (!pvc) {
904 #ifdef DEBUG_PKT
905 printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n",
906 frad->name, dlci);
907 #endif
908 dev_kfree_skb_any(skb);
909 return NET_RX_DROP;
912 if (pvc->state.fecn != fh->fecn) {
913 #ifdef DEBUG_ECN
914 printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
915 dlci, fh->fecn ? "N" : "FF");
916 #endif
917 pvc->state.fecn ^= 1;
920 if (pvc->state.becn != fh->becn) {
921 #ifdef DEBUG_ECN
922 printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
923 dlci, fh->becn ? "N" : "FF");
924 #endif
925 pvc->state.becn ^= 1;
929 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
930 frad->stats.rx_dropped++;
931 return NET_RX_DROP;
934 if (data[3] == NLPID_IP) {
935 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
936 dev = pvc->main;
937 skb->protocol = htons(ETH_P_IP);
939 } else if (data[3] == NLPID_IPV6) {
940 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
941 dev = pvc->main;
942 skb->protocol = htons(ETH_P_IPV6);
944 } else if (skb->len > 10 && data[3] == FR_PAD &&
945 data[4] == NLPID_SNAP && data[5] == FR_PAD) {
946 u16 oui = ntohs(*(__be16*)(data + 6));
947 u16 pid = ntohs(*(__be16*)(data + 8));
948 skb_pull(skb, 10);
950 switch ((((u32)oui) << 16) | pid) {
951 case ETH_P_ARP: /* routed frame with SNAP */
952 case ETH_P_IPX:
953 case ETH_P_IP: /* a long variant */
954 case ETH_P_IPV6:
955 dev = pvc->main;
956 skb->protocol = htons(pid);
957 break;
959 case 0x80C20007: /* bridged Ethernet frame */
960 if ((dev = pvc->ether) != NULL)
961 skb->protocol = eth_type_trans(skb, dev);
962 break;
964 default:
965 printk(KERN_INFO "%s: Unsupported protocol, OUI=%x "
966 "PID=%x\n", frad->name, oui, pid);
967 dev_kfree_skb_any(skb);
968 return NET_RX_DROP;
970 } else {
971 printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x "
972 "length = %i\n", frad->name, data[3], skb->len);
973 dev_kfree_skb_any(skb);
974 return NET_RX_DROP;
977 if (dev) {
978 dev->stats.rx_packets++; /* PVC traffic */
979 dev->stats.rx_bytes += skb->len;
980 if (pvc->state.becn)
981 dev->stats.rx_compressed++;
982 skb->dev = dev;
983 netif_rx(skb);
984 return NET_RX_SUCCESS;
985 } else {
986 dev_kfree_skb_any(skb);
987 return NET_RX_DROP;
990 rx_error:
991 frad->stats.rx_errors++; /* Mark error */
992 dev_kfree_skb_any(skb);
993 return NET_RX_DROP;
998 static void fr_start(struct net_device *dev)
1000 hdlc_device *hdlc = dev_to_hdlc(dev);
1001 #ifdef DEBUG_LINK
1002 printk(KERN_DEBUG "fr_start\n");
1003 #endif
1004 if (state(hdlc)->settings.lmi != LMI_NONE) {
1005 state(hdlc)->reliable = 0;
1006 state(hdlc)->dce_changed = 1;
1007 state(hdlc)->request = 0;
1008 state(hdlc)->fullrep_sent = 0;
1009 state(hdlc)->last_errors = 0xFFFFFFFF;
1010 state(hdlc)->n391cnt = 0;
1011 state(hdlc)->txseq = state(hdlc)->rxseq = 0;
1013 init_timer(&state(hdlc)->timer);
1014 /* First poll after 1 s */
1015 state(hdlc)->timer.expires = jiffies + HZ;
1016 state(hdlc)->timer.function = fr_timer;
1017 state(hdlc)->timer.data = (unsigned long)dev;
1018 add_timer(&state(hdlc)->timer);
1019 } else
1020 fr_set_link_state(1, dev);
1024 static void fr_stop(struct net_device *dev)
1026 hdlc_device *hdlc = dev_to_hdlc(dev);
1027 #ifdef DEBUG_LINK
1028 printk(KERN_DEBUG "fr_stop\n");
1029 #endif
1030 if (state(hdlc)->settings.lmi != LMI_NONE)
1031 del_timer_sync(&state(hdlc)->timer);
1032 fr_set_link_state(0, dev);
1036 static void fr_close(struct net_device *dev)
1038 hdlc_device *hdlc = dev_to_hdlc(dev);
1039 pvc_device *pvc = state(hdlc)->first_pvc;
1041 while (pvc) { /* Shutdown all PVCs for this FRAD */
1042 if (pvc->main)
1043 dev_close(pvc->main);
1044 if (pvc->ether)
1045 dev_close(pvc->ether);
1046 pvc = pvc->next;
1051 static void pvc_setup(struct net_device *dev)
1053 dev->type = ARPHRD_DLCI;
1054 dev->flags = IFF_POINTOPOINT;
1055 dev->hard_header_len = 10;
1056 dev->addr_len = 2;
1057 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1060 static const struct net_device_ops pvc_ops = {
1061 .ndo_open = pvc_open,
1062 .ndo_stop = pvc_close,
1063 .ndo_change_mtu = hdlc_change_mtu,
1064 .ndo_start_xmit = pvc_xmit,
1065 .ndo_do_ioctl = pvc_ioctl,
1068 static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1070 hdlc_device *hdlc = dev_to_hdlc(frad);
1071 pvc_device *pvc;
1072 struct net_device *dev;
1073 int result, used;
1075 if ((pvc = add_pvc(frad, dlci)) == NULL) {
1076 printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
1077 frad->name);
1078 return -ENOBUFS;
1081 if (*get_dev_p(pvc, type))
1082 return -EEXIST;
1084 used = pvc_is_used(pvc);
1086 if (type == ARPHRD_ETHER)
1087 dev = alloc_netdev(0, "pvceth%d", ether_setup);
1088 else
1089 dev = alloc_netdev(0, "pvc%d", pvc_setup);
1091 if (!dev) {
1092 printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
1093 frad->name);
1094 delete_unused_pvcs(hdlc);
1095 return -ENOBUFS;
1098 if (type == ARPHRD_ETHER)
1099 random_ether_addr(dev->dev_addr);
1100 else {
1101 *(__be16*)dev->dev_addr = htons(dlci);
1102 dlci_to_q922(dev->broadcast, dlci);
1104 dev->netdev_ops = &pvc_ops;
1105 dev->mtu = HDLC_MAX_MTU;
1106 dev->tx_queue_len = 0;
1107 dev->ml_priv = pvc;
1109 result = dev_alloc_name(dev, dev->name);
1110 if (result < 0) {
1111 free_netdev(dev);
1112 delete_unused_pvcs(hdlc);
1113 return result;
1116 if (register_netdevice(dev) != 0) {
1117 free_netdev(dev);
1118 delete_unused_pvcs(hdlc);
1119 return -EIO;
1122 dev->destructor = free_netdev;
1123 *get_dev_p(pvc, type) = dev;
1124 if (!used) {
1125 state(hdlc)->dce_changed = 1;
1126 state(hdlc)->dce_pvc_count++;
1128 return 0;
1133 static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
1135 pvc_device *pvc;
1136 struct net_device *dev;
1138 if ((pvc = find_pvc(hdlc, dlci)) == NULL)
1139 return -ENOENT;
1141 if ((dev = *get_dev_p(pvc, type)) == NULL)
1142 return -ENOENT;
1144 if (dev->flags & IFF_UP)
1145 return -EBUSY; /* PVC in use */
1147 unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
1148 *get_dev_p(pvc, type) = NULL;
1150 if (!pvc_is_used(pvc)) {
1151 state(hdlc)->dce_pvc_count--;
1152 state(hdlc)->dce_changed = 1;
1154 delete_unused_pvcs(hdlc);
1155 return 0;
1160 static void fr_destroy(struct net_device *frad)
1162 hdlc_device *hdlc = dev_to_hdlc(frad);
1163 pvc_device *pvc = state(hdlc)->first_pvc;
1164 state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
1165 state(hdlc)->dce_pvc_count = 0;
1166 state(hdlc)->dce_changed = 1;
1168 while (pvc) {
1169 pvc_device *next = pvc->next;
1170 /* destructors will free_netdev() main and ether */
1171 if (pvc->main)
1172 unregister_netdevice(pvc->main);
1174 if (pvc->ether)
1175 unregister_netdevice(pvc->ether);
1177 kfree(pvc);
1178 pvc = next;
1183 static struct hdlc_proto proto = {
1184 .close = fr_close,
1185 .start = fr_start,
1186 .stop = fr_stop,
1187 .detach = fr_destroy,
1188 .ioctl = fr_ioctl,
1189 .netif_rx = fr_rx,
1190 .module = THIS_MODULE,
1194 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1196 fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
1197 const size_t size = sizeof(fr_proto);
1198 fr_proto new_settings;
1199 hdlc_device *hdlc = dev_to_hdlc(dev);
1200 fr_proto_pvc pvc;
1201 int result;
1203 switch (ifr->ifr_settings.type) {
1204 case IF_GET_PROTO:
1205 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1206 return -EINVAL;
1207 ifr->ifr_settings.type = IF_PROTO_FR;
1208 if (ifr->ifr_settings.size < size) {
1209 ifr->ifr_settings.size = size; /* data size wanted */
1210 return -ENOBUFS;
1212 if (copy_to_user(fr_s, &state(hdlc)->settings, size))
1213 return -EFAULT;
1214 return 0;
1216 case IF_PROTO_FR:
1217 if(!capable(CAP_NET_ADMIN))
1218 return -EPERM;
1220 if(dev->flags & IFF_UP)
1221 return -EBUSY;
1223 if (copy_from_user(&new_settings, fr_s, size))
1224 return -EFAULT;
1226 if (new_settings.lmi == LMI_DEFAULT)
1227 new_settings.lmi = LMI_ANSI;
1229 if ((new_settings.lmi != LMI_NONE &&
1230 new_settings.lmi != LMI_ANSI &&
1231 new_settings.lmi != LMI_CCITT &&
1232 new_settings.lmi != LMI_CISCO) ||
1233 new_settings.t391 < 1 ||
1234 new_settings.t392 < 2 ||
1235 new_settings.n391 < 1 ||
1236 new_settings.n392 < 1 ||
1237 new_settings.n393 < new_settings.n392 ||
1238 new_settings.n393 > 32 ||
1239 (new_settings.dce != 0 &&
1240 new_settings.dce != 1))
1241 return -EINVAL;
1243 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
1244 if (result)
1245 return result;
1247 if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
1248 result = attach_hdlc_protocol(dev, &proto,
1249 sizeof(struct frad_state));
1250 if (result)
1251 return result;
1252 state(hdlc)->first_pvc = NULL;
1253 state(hdlc)->dce_pvc_count = 0;
1255 memcpy(&state(hdlc)->settings, &new_settings, size);
1256 dev->type = ARPHRD_FRAD;
1257 return 0;
1259 case IF_PROTO_FR_ADD_PVC:
1260 case IF_PROTO_FR_DEL_PVC:
1261 case IF_PROTO_FR_ADD_ETH_PVC:
1262 case IF_PROTO_FR_DEL_ETH_PVC:
1263 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1264 return -EINVAL;
1266 if(!capable(CAP_NET_ADMIN))
1267 return -EPERM;
1269 if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
1270 sizeof(fr_proto_pvc)))
1271 return -EFAULT;
1273 if (pvc.dlci <= 0 || pvc.dlci >= 1024)
1274 return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
1276 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
1277 ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
1278 result = ARPHRD_ETHER; /* bridged Ethernet device */
1279 else
1280 result = ARPHRD_DLCI;
1282 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
1283 ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
1284 return fr_add_pvc(dev, pvc.dlci, result);
1285 else
1286 return fr_del_pvc(hdlc, pvc.dlci, result);
1289 return -EINVAL;
1293 static int __init mod_init(void)
1295 register_hdlc_protocol(&proto);
1296 return 0;
1300 static void __exit mod_exit(void)
1302 unregister_hdlc_protocol(&proto);
1306 module_init(mod_init);
1307 module_exit(mod_exit);
1309 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
1310 MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
1311 MODULE_LICENSE("GPL v2");