Staging: rtl8192e: Add #include <linux/vmalloc.h>
[linux-2.6/mini2440.git] / drivers / isdn / mISDN / hwchannel.c
blobe8049be552aa88800dee7d749489616628a7d20a
1 /*
3 * Author Karsten Keil <kkeil@novell.com>
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/module.h>
19 #include <linux/mISDNhw.h>
21 static void
22 dchannel_bh(struct work_struct *ws)
24 struct dchannel *dch = container_of(ws, struct dchannel, workq);
25 struct sk_buff *skb;
26 int err;
28 if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
29 while ((skb = skb_dequeue(&dch->rqueue))) {
30 if (likely(dch->dev.D.peer)) {
31 err = dch->dev.D.recv(dch->dev.D.peer, skb);
32 if (err)
33 dev_kfree_skb(skb);
34 } else
35 dev_kfree_skb(skb);
38 if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
39 if (dch->phfunc)
40 dch->phfunc(dch);
44 static void
45 bchannel_bh(struct work_struct *ws)
47 struct bchannel *bch = container_of(ws, struct bchannel, workq);
48 struct sk_buff *skb;
49 int err;
51 if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
52 while ((skb = skb_dequeue(&bch->rqueue))) {
53 bch->rcount--;
54 if (likely(bch->ch.peer)) {
55 err = bch->ch.recv(bch->ch.peer, skb);
56 if (err)
57 dev_kfree_skb(skb);
58 } else
59 dev_kfree_skb(skb);
64 int
65 mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
67 test_and_set_bit(FLG_HDLC, &ch->Flags);
68 ch->maxlen = maxlen;
69 ch->hw = NULL;
70 ch->rx_skb = NULL;
71 ch->tx_skb = NULL;
72 ch->tx_idx = 0;
73 ch->phfunc = phf;
74 skb_queue_head_init(&ch->squeue);
75 skb_queue_head_init(&ch->rqueue);
76 INIT_LIST_HEAD(&ch->dev.bchannels);
77 INIT_WORK(&ch->workq, dchannel_bh);
78 return 0;
80 EXPORT_SYMBOL(mISDN_initdchannel);
82 int
83 mISDN_initbchannel(struct bchannel *ch, int maxlen)
85 ch->Flags = 0;
86 ch->maxlen = maxlen;
87 ch->hw = NULL;
88 ch->rx_skb = NULL;
89 ch->tx_skb = NULL;
90 ch->tx_idx = 0;
91 skb_queue_head_init(&ch->rqueue);
92 ch->rcount = 0;
93 ch->next_skb = NULL;
94 INIT_WORK(&ch->workq, bchannel_bh);
95 return 0;
97 EXPORT_SYMBOL(mISDN_initbchannel);
99 int
100 mISDN_freedchannel(struct dchannel *ch)
102 if (ch->tx_skb) {
103 dev_kfree_skb(ch->tx_skb);
104 ch->tx_skb = NULL;
106 if (ch->rx_skb) {
107 dev_kfree_skb(ch->rx_skb);
108 ch->rx_skb = NULL;
110 skb_queue_purge(&ch->squeue);
111 skb_queue_purge(&ch->rqueue);
112 flush_scheduled_work();
113 return 0;
115 EXPORT_SYMBOL(mISDN_freedchannel);
117 void
118 mISDN_clear_bchannel(struct bchannel *ch)
120 if (ch->tx_skb) {
121 dev_kfree_skb(ch->tx_skb);
122 ch->tx_skb = NULL;
124 ch->tx_idx = 0;
125 if (ch->rx_skb) {
126 dev_kfree_skb(ch->rx_skb);
127 ch->rx_skb = NULL;
129 if (ch->next_skb) {
130 dev_kfree_skb(ch->next_skb);
131 ch->next_skb = NULL;
133 test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
134 test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
135 test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
137 EXPORT_SYMBOL(mISDN_clear_bchannel);
140 mISDN_freebchannel(struct bchannel *ch)
142 mISDN_clear_bchannel(ch);
143 skb_queue_purge(&ch->rqueue);
144 ch->rcount = 0;
145 flush_scheduled_work();
146 return 0;
148 EXPORT_SYMBOL(mISDN_freebchannel);
150 static inline u_int
151 get_sapi_tei(u_char *p)
153 u_int sapi, tei;
155 sapi = *p >> 2;
156 tei = p[1] >> 1;
157 return sapi | (tei << 8);
160 void
161 recv_Dchannel(struct dchannel *dch)
163 struct mISDNhead *hh;
165 if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
166 dev_kfree_skb(dch->rx_skb);
167 dch->rx_skb = NULL;
168 return;
170 hh = mISDN_HEAD_P(dch->rx_skb);
171 hh->prim = PH_DATA_IND;
172 hh->id = get_sapi_tei(dch->rx_skb->data);
173 skb_queue_tail(&dch->rqueue, dch->rx_skb);
174 dch->rx_skb = NULL;
175 schedule_event(dch, FLG_RECVQUEUE);
177 EXPORT_SYMBOL(recv_Dchannel);
179 void
180 recv_Echannel(struct dchannel *ech, struct dchannel *dch)
182 struct mISDNhead *hh;
184 if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
185 dev_kfree_skb(ech->rx_skb);
186 ech->rx_skb = NULL;
187 return;
189 hh = mISDN_HEAD_P(ech->rx_skb);
190 hh->prim = PH_DATA_E_IND;
191 hh->id = get_sapi_tei(ech->rx_skb->data);
192 skb_queue_tail(&dch->rqueue, ech->rx_skb);
193 ech->rx_skb = NULL;
194 schedule_event(dch, FLG_RECVQUEUE);
196 EXPORT_SYMBOL(recv_Echannel);
198 void
199 recv_Bchannel(struct bchannel *bch, unsigned int id)
201 struct mISDNhead *hh;
203 hh = mISDN_HEAD_P(bch->rx_skb);
204 hh->prim = PH_DATA_IND;
205 hh->id = id;
206 if (bch->rcount >= 64) {
207 printk(KERN_WARNING "B-channel %p receive queue overflow, "
208 "fushing!\n", bch);
209 skb_queue_purge(&bch->rqueue);
210 bch->rcount = 0;
211 return;
213 bch->rcount++;
214 skb_queue_tail(&bch->rqueue, bch->rx_skb);
215 bch->rx_skb = NULL;
216 schedule_event(bch, FLG_RECVQUEUE);
218 EXPORT_SYMBOL(recv_Bchannel);
220 void
221 recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
223 skb_queue_tail(&dch->rqueue, skb);
224 schedule_event(dch, FLG_RECVQUEUE);
226 EXPORT_SYMBOL(recv_Dchannel_skb);
228 void
229 recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
231 if (bch->rcount >= 64) {
232 printk(KERN_WARNING "B-channel %p receive queue overflow, "
233 "fushing!\n", bch);
234 skb_queue_purge(&bch->rqueue);
235 bch->rcount = 0;
237 bch->rcount++;
238 skb_queue_tail(&bch->rqueue, skb);
239 schedule_event(bch, FLG_RECVQUEUE);
241 EXPORT_SYMBOL(recv_Bchannel_skb);
243 static void
244 confirm_Dsend(struct dchannel *dch)
246 struct sk_buff *skb;
248 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
249 0, NULL, GFP_ATOMIC);
250 if (!skb) {
251 printk(KERN_ERR "%s: no skb id %x\n", __func__,
252 mISDN_HEAD_ID(dch->tx_skb));
253 return;
255 skb_queue_tail(&dch->rqueue, skb);
256 schedule_event(dch, FLG_RECVQUEUE);
260 get_next_dframe(struct dchannel *dch)
262 dch->tx_idx = 0;
263 dch->tx_skb = skb_dequeue(&dch->squeue);
264 if (dch->tx_skb) {
265 confirm_Dsend(dch);
266 return 1;
268 dch->tx_skb = NULL;
269 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
270 return 0;
272 EXPORT_SYMBOL(get_next_dframe);
274 void
275 confirm_Bsend(struct bchannel *bch)
277 struct sk_buff *skb;
279 if (bch->rcount >= 64) {
280 printk(KERN_WARNING "B-channel %p receive queue overflow, "
281 "fushing!\n", bch);
282 skb_queue_purge(&bch->rqueue);
283 bch->rcount = 0;
285 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
286 0, NULL, GFP_ATOMIC);
287 if (!skb) {
288 printk(KERN_ERR "%s: no skb id %x\n", __func__,
289 mISDN_HEAD_ID(bch->tx_skb));
290 return;
292 bch->rcount++;
293 skb_queue_tail(&bch->rqueue, skb);
294 schedule_event(bch, FLG_RECVQUEUE);
296 EXPORT_SYMBOL(confirm_Bsend);
299 get_next_bframe(struct bchannel *bch)
301 bch->tx_idx = 0;
302 if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
303 bch->tx_skb = bch->next_skb;
304 if (bch->tx_skb) {
305 bch->next_skb = NULL;
306 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
307 if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
308 confirm_Bsend(bch); /* not for transparent */
309 return 1;
310 } else {
311 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
312 printk(KERN_WARNING "B TX_NEXT without skb\n");
315 bch->tx_skb = NULL;
316 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
317 return 0;
319 EXPORT_SYMBOL(get_next_bframe);
321 void
322 queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
324 struct mISDNhead *hh;
326 if (!skb) {
327 _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
328 } else {
329 if (ch->peer) {
330 hh = mISDN_HEAD_P(skb);
331 hh->prim = pr;
332 hh->id = id;
333 if (!ch->recv(ch->peer, skb))
334 return;
336 dev_kfree_skb(skb);
339 EXPORT_SYMBOL(queue_ch_frame);
342 dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
344 /* check oversize */
345 if (skb->len <= 0) {
346 printk(KERN_WARNING "%s: skb too small\n", __func__);
347 return -EINVAL;
349 if (skb->len > ch->maxlen) {
350 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
351 __func__, skb->len, ch->maxlen);
352 return -EINVAL;
354 /* HW lock must be obtained */
355 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
356 skb_queue_tail(&ch->squeue, skb);
357 return 0;
358 } else {
359 /* write to fifo */
360 ch->tx_skb = skb;
361 ch->tx_idx = 0;
362 return 1;
365 EXPORT_SYMBOL(dchannel_senddata);
368 bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
371 /* check oversize */
372 if (skb->len <= 0) {
373 printk(KERN_WARNING "%s: skb too small\n", __func__);
374 return -EINVAL;
376 if (skb->len > ch->maxlen) {
377 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
378 __func__, skb->len, ch->maxlen);
379 return -EINVAL;
381 /* HW lock must be obtained */
382 /* check for pending next_skb */
383 if (ch->next_skb) {
384 printk(KERN_WARNING
385 "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
386 __func__, skb->len, ch->next_skb->len);
387 return -EBUSY;
389 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
390 test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
391 ch->next_skb = skb;
392 return 0;
393 } else {
394 /* write to fifo */
395 ch->tx_skb = skb;
396 ch->tx_idx = 0;
397 return 1;
400 EXPORT_SYMBOL(bchannel_senddata);