staging: ozwpan: Remove redundant null check before kfree in ozproto.c
[linux-2.6.git] / drivers / staging / ozwpan / ozproto.c
blobe00a53915daa6b1d9b79b721c7bb52685eec628d
1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
12 #include <linux/ieee80211.h>
13 #include "ozconfig.h"
14 #include "ozprotocol.h"
15 #include "ozeltbuf.h"
16 #include "ozpd.h"
17 #include "ozproto.h"
18 #include "ozusbsvc.h"
19 #include "oztrace.h"
20 #include "ozappif.h"
21 #include "ozevent.h"
22 #include <asm/unaligned.h>
23 #include <linux/uaccess.h>
24 #include <net/psnap.h>
25 /*------------------------------------------------------------------------------
27 #define OZ_CF_CONN_SUCCESS 1
28 #define OZ_CF_CONN_FAILURE 2
30 #define OZ_DO_STOP 1
31 #define OZ_DO_SLEEP 2
33 /* States of the timer.
35 #define OZ_TIMER_IDLE 0
36 #define OZ_TIMER_SET 1
37 #define OZ_TIMER_IN_HANDLER 2
39 #define OZ_MAX_TIMER_POOL_SIZE 16
41 /*------------------------------------------------------------------------------
43 struct oz_binding {
44 struct packet_type ptype;
45 char name[OZ_MAX_BINDING_LEN];
46 struct oz_binding *next;
49 struct oz_timer {
50 struct list_head link;
51 struct oz_pd *pd;
52 unsigned long due_time;
53 int type;
55 /*------------------------------------------------------------------------------
56 * Static external variables.
58 static DEFINE_SPINLOCK(g_polling_lock);
59 static LIST_HEAD(g_pd_list);
60 static struct oz_binding *g_binding ;
61 static DEFINE_SPINLOCK(g_binding_lock);
62 static struct sk_buff_head g_rx_queue;
63 static u8 g_session_id;
64 static u16 g_apps = 0x1;
65 static int g_processing_rx;
66 static struct timer_list g_timer;
67 static struct oz_timer *g_cur_timer;
68 static struct list_head *g_timer_pool;
69 static int g_timer_pool_count;
70 static int g_timer_state = OZ_TIMER_IDLE;
71 static LIST_HEAD(g_timer_list);
72 /*------------------------------------------------------------------------------
74 static void oz_protocol_timer_start(void);
75 /*------------------------------------------------------------------------------
76 * Context: softirq-serialized
78 static u8 oz_get_new_session_id(u8 exclude)
80 if (++g_session_id == 0)
81 g_session_id = 1;
82 if (g_session_id == exclude) {
83 if (++g_session_id == 0)
84 g_session_id = 1;
86 return g_session_id;
88 /*------------------------------------------------------------------------------
89 * Context: softirq-serialized
91 static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
93 struct sk_buff *skb;
94 struct net_device *dev = pd->net_dev;
95 struct oz_hdr *oz_hdr;
96 struct oz_elt *elt;
97 struct oz_elt_connect_rsp *body;
98 int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
99 sizeof(struct oz_elt_connect_rsp);
100 skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
101 if (skb == 0)
102 return;
103 skb_reserve(skb, LL_RESERVED_SPACE(dev));
104 skb_reset_network_header(skb);
105 oz_hdr = (struct oz_hdr *)skb_put(skb, sz);
106 elt = (struct oz_elt *)(oz_hdr+1);
107 body = (struct oz_elt_connect_rsp *)(elt+1);
108 skb->dev = dev;
109 skb->protocol = htons(OZ_ETHERTYPE);
110 /* Fill in device header */
111 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
112 dev->dev_addr, skb->len) < 0) {
113 kfree_skb(skb);
114 return;
116 oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT);
117 oz_hdr->last_pkt_num = 0;
118 put_unaligned(0, &oz_hdr->pkt_num);
119 oz_event_log(OZ_EVT_CONNECT_RSP, 0, 0, 0, 0);
120 elt->type = OZ_ELT_CONNECT_RSP;
121 elt->length = sizeof(struct oz_elt_connect_rsp);
122 memset(body, 0, sizeof(struct oz_elt_connect_rsp));
123 body->status = status;
124 if (status == 0) {
125 body->mode = pd->mode;
126 body->session_id = pd->session_id;
127 put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
129 oz_trace("TX: OZ_ELT_CONNECT_RSP %d", status);
130 dev_queue_xmit(skb);
131 return;
133 /*------------------------------------------------------------------------------
134 * Context: softirq-serialized
136 static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
138 unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK;
140 switch (kalive & OZ_KALIVE_TYPE_MASK) {
141 case OZ_KALIVE_SPECIAL:
142 pd->keep_alive_j =
143 oz_ms_to_jiffies(keep_alive * 1000*60*60*24*20);
144 break;
145 case OZ_KALIVE_SECS:
146 pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000);
147 break;
148 case OZ_KALIVE_MINS:
149 pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60);
150 break;
151 case OZ_KALIVE_HOURS:
152 pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60*60);
153 break;
154 default:
155 pd->keep_alive_j = 0;
157 oz_trace("Keepalive = %lu jiffies\n", pd->keep_alive_j);
159 /*------------------------------------------------------------------------------
160 * Context: softirq-serialized
162 static void pd_set_presleep(struct oz_pd *pd, u8 presleep)
164 if (presleep)
165 pd->presleep_j = oz_ms_to_jiffies(presleep*100);
166 else
167 pd->presleep_j = OZ_PRESLEEP_TOUT_J;
168 oz_trace("Presleep time = %lu jiffies\n", pd->presleep_j);
170 /*------------------------------------------------------------------------------
171 * Context: softirq-serialized
173 static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
174 u8 *pd_addr, struct net_device *net_dev)
176 struct oz_pd *pd;
177 struct oz_elt_connect_req *body =
178 (struct oz_elt_connect_req *)(elt+1);
179 u8 rsp_status = OZ_STATUS_SUCCESS;
180 u8 stop_needed = 0;
181 u16 new_apps = g_apps;
182 struct net_device *old_net_dev = 0;
183 struct oz_pd *free_pd = 0;
184 if (cur_pd) {
185 pd = cur_pd;
186 spin_lock_bh(&g_polling_lock);
187 } else {
188 struct oz_pd *pd2 = 0;
189 struct list_head *e;
190 pd = oz_pd_alloc(pd_addr);
191 if (pd == 0)
192 return 0;
193 pd->last_rx_time_j = jiffies;
194 spin_lock_bh(&g_polling_lock);
195 list_for_each(e, &g_pd_list) {
196 pd2 = container_of(e, struct oz_pd, link);
197 if (memcmp(pd2->mac_addr, pd_addr, ETH_ALEN) == 0) {
198 free_pd = pd;
199 pd = pd2;
200 break;
203 if (pd != pd2)
204 list_add_tail(&pd->link, &g_pd_list);
206 if (pd == 0) {
207 spin_unlock_bh(&g_polling_lock);
208 return 0;
210 if (pd->net_dev != net_dev) {
211 old_net_dev = pd->net_dev;
212 dev_hold(net_dev);
213 pd->net_dev = net_dev;
215 oz_trace("Host vendor: %d\n", body->host_vendor);
216 pd->max_tx_size = OZ_MAX_TX_SIZE;
217 pd->mode = body->mode;
218 pd->pd_info = body->pd_info;
219 if (pd->mode & OZ_F_ISOC_NO_ELTS) {
220 pd->ms_per_isoc = body->ms_per_isoc;
221 if (!pd->ms_per_isoc)
222 pd->ms_per_isoc = 4;
224 switch (body->ms_isoc_latency & OZ_LATENCY_MASK) {
225 case OZ_ONE_MS_LATENCY:
226 pd->isoc_latency = (body->ms_isoc_latency &
227 ~OZ_LATENCY_MASK) / pd->ms_per_isoc;
228 break;
229 case OZ_TEN_MS_LATENCY:
230 pd->isoc_latency = ((body->ms_isoc_latency &
231 ~OZ_LATENCY_MASK) * 10) / pd->ms_per_isoc;
232 break;
233 default:
234 pd->isoc_latency = OZ_MAX_TX_QUEUE_ISOC;
237 if (body->max_len_div16)
238 pd->max_tx_size = ((u16)body->max_len_div16)<<4;
239 oz_trace("Max frame:%u Ms per isoc:%u\n",
240 pd->max_tx_size, pd->ms_per_isoc);
241 pd->max_stream_buffering = 3*1024;
242 pd->timeout_time_j = jiffies + OZ_CONNECTION_TOUT_J;
243 pd->pulse_period_j = OZ_QUANTUM_J;
244 pd_set_presleep(pd, body->presleep);
245 pd_set_keepalive(pd, body->keep_alive);
247 new_apps &= le16_to_cpu(get_unaligned(&body->apps));
248 if ((new_apps & 0x1) && (body->session_id)) {
249 if (pd->session_id) {
250 if (pd->session_id != body->session_id) {
251 rsp_status = OZ_STATUS_SESSION_MISMATCH;
252 goto done;
254 } else {
255 new_apps &= ~0x1; /* Resume not permitted */
256 pd->session_id =
257 oz_get_new_session_id(body->session_id);
259 } else {
260 if (pd->session_id && !body->session_id) {
261 rsp_status = OZ_STATUS_SESSION_TEARDOWN;
262 stop_needed = 1;
263 } else {
264 new_apps &= ~0x1; /* Resume not permitted */
265 pd->session_id =
266 oz_get_new_session_id(body->session_id);
269 done:
270 if (rsp_status == OZ_STATUS_SUCCESS) {
271 u16 start_apps = new_apps & ~pd->total_apps & ~0x1;
272 u16 stop_apps = pd->total_apps & ~new_apps & ~0x1;
273 u16 resume_apps = new_apps & pd->paused_apps & ~0x1;
274 spin_unlock_bh(&g_polling_lock);
275 oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
276 oz_timer_delete(pd, OZ_TIMER_STOP);
277 oz_trace("new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
278 new_apps, pd->total_apps, pd->paused_apps);
279 if (start_apps) {
280 if (oz_services_start(pd, start_apps, 0))
281 rsp_status = OZ_STATUS_TOO_MANY_PDS;
283 if (resume_apps)
284 if (oz_services_start(pd, resume_apps, 1))
285 rsp_status = OZ_STATUS_TOO_MANY_PDS;
286 if (stop_apps)
287 oz_services_stop(pd, stop_apps, 0);
288 oz_pd_request_heartbeat(pd);
289 } else {
290 spin_unlock_bh(&g_polling_lock);
292 oz_send_conn_rsp(pd, rsp_status);
293 if (rsp_status != OZ_STATUS_SUCCESS) {
294 if (stop_needed)
295 oz_pd_stop(pd);
296 oz_pd_put(pd);
297 pd = 0;
299 if (old_net_dev)
300 dev_put(old_net_dev);
301 if (free_pd)
302 oz_pd_destroy(free_pd);
303 return pd;
305 /*------------------------------------------------------------------------------
306 * Context: softirq-serialized
308 static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
309 u8 *report, u8 len)
311 struct oz_farewell *f;
312 struct oz_farewell *f2;
313 int found = 0;
314 f = kmalloc(sizeof(struct oz_farewell) + len - 1, GFP_ATOMIC);
315 if (!f)
316 return;
317 f->ep_num = ep_num;
318 f->index = index;
319 memcpy(f->report, report, len);
320 oz_trace("RX: Adding farewell report\n");
321 spin_lock(&g_polling_lock);
322 list_for_each_entry(f2, &pd->farewell_list, link) {
323 if ((f2->ep_num == ep_num) && (f2->index == index)) {
324 found = 1;
325 list_del(&f2->link);
326 break;
329 list_add_tail(&f->link, &pd->farewell_list);
330 spin_unlock(&g_polling_lock);
331 if (found)
332 kfree(f2);
334 /*------------------------------------------------------------------------------
335 * Context: softirq-serialized
337 static void oz_rx_frame(struct sk_buff *skb)
339 u8 *mac_hdr;
340 u8 *src_addr;
341 struct oz_elt *elt;
342 int length;
343 struct oz_pd *pd = 0;
344 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
345 int dup = 0;
346 u32 pkt_num;
348 oz_event_log(OZ_EVT_RX_PROCESS, 0,
349 (((u16)oz_hdr->control)<<8)|oz_hdr->last_pkt_num,
350 0, oz_hdr->pkt_num);
351 oz_trace2(OZ_TRACE_RX_FRAMES,
352 "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
353 oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
354 mac_hdr = skb_mac_header(skb);
355 src_addr = &mac_hdr[ETH_ALEN] ;
356 length = skb->len;
358 /* Check the version field */
359 if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
360 oz_trace("Incorrect protocol version: %d\n",
361 oz_get_prot_ver(oz_hdr->control));
362 goto done;
365 pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num));
367 pd = oz_pd_find(src_addr);
368 if (pd) {
369 pd->last_rx_time_j = jiffies;
370 oz_timer_add(pd, OZ_TIMER_TOUT,
371 pd->last_rx_time_j + pd->presleep_j, 1);
372 if (pkt_num != pd->last_rx_pkt_num) {
373 pd->last_rx_pkt_num = pkt_num;
374 } else {
375 dup = 1;
376 oz_trace("Duplicate frame\n");
380 if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
381 oz_trace2(OZ_TRACE_RX_FRAMES, "Received TRIGGER Frame\n");
382 pd->last_sent_frame = &pd->tx_queue;
383 if (oz_hdr->control & OZ_F_ACK) {
384 /* Retire completed frames */
385 oz_retire_tx_frames(pd, oz_hdr->last_pkt_num);
387 if ((oz_hdr->control & OZ_F_ACK_REQUESTED) &&
388 (pd->state == OZ_PD_S_CONNECTED)) {
389 int backlog = pd->nb_queued_frames;
390 pd->trigger_pkt_num = pkt_num;
391 /* Send queued frames */
392 oz_send_queued_frames(pd, backlog);
396 length -= sizeof(struct oz_hdr);
397 elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
399 while (length >= sizeof(struct oz_elt)) {
400 length -= sizeof(struct oz_elt) + elt->length;
401 if (length < 0)
402 break;
403 switch (elt->type) {
404 case OZ_ELT_CONNECT_REQ:
405 oz_event_log(OZ_EVT_CONNECT_REQ, 0, 0, 0, 0);
406 oz_trace("RX: OZ_ELT_CONNECT_REQ\n");
407 pd = oz_connect_req(pd, elt, src_addr, skb->dev);
408 break;
409 case OZ_ELT_DISCONNECT:
410 oz_trace("RX: OZ_ELT_DISCONNECT\n");
411 if (pd)
412 oz_pd_sleep(pd);
413 break;
414 case OZ_ELT_UPDATE_PARAM_REQ: {
415 struct oz_elt_update_param *body =
416 (struct oz_elt_update_param *)(elt + 1);
417 oz_trace("RX: OZ_ELT_UPDATE_PARAM_REQ\n");
418 if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
419 spin_lock(&g_polling_lock);
420 pd_set_keepalive(pd, body->keepalive);
421 pd_set_presleep(pd, body->presleep);
422 spin_unlock(&g_polling_lock);
425 break;
426 case OZ_ELT_FAREWELL_REQ: {
427 struct oz_elt_farewell *body =
428 (struct oz_elt_farewell *)(elt + 1);
429 oz_trace("RX: OZ_ELT_FAREWELL_REQ\n");
430 oz_add_farewell(pd, body->ep_num,
431 body->index, body->report,
432 elt->length + 1 - sizeof(*body));
434 break;
435 case OZ_ELT_APP_DATA:
436 if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
437 struct oz_app_hdr *app_hdr =
438 (struct oz_app_hdr *)(elt+1);
439 if (dup)
440 break;
441 oz_handle_app_elt(pd, app_hdr->app_id, elt);
443 break;
444 default:
445 oz_trace("RX: Unknown elt %02x\n", elt->type);
447 elt = oz_next_elt(elt);
449 done:
450 if (pd)
451 oz_pd_put(pd);
452 consume_skb(skb);
454 /*------------------------------------------------------------------------------
455 * Context: process
457 void oz_protocol_term(void)
459 struct list_head *chain = 0;
460 del_timer_sync(&g_timer);
461 /* Walk the list of bindings and remove each one.
463 spin_lock_bh(&g_binding_lock);
464 while (g_binding) {
465 struct oz_binding *b = g_binding;
466 g_binding = b->next;
467 spin_unlock_bh(&g_binding_lock);
468 dev_remove_pack(&b->ptype);
469 if (b->ptype.dev)
470 dev_put(b->ptype.dev);
471 kfree(b);
472 spin_lock_bh(&g_binding_lock);
474 spin_unlock_bh(&g_binding_lock);
475 /* Walk the list of PDs and stop each one. This causes the PD to be
476 * removed from the list so we can just pull each one from the head
477 * of the list.
479 spin_lock_bh(&g_polling_lock);
480 while (!list_empty(&g_pd_list)) {
481 struct oz_pd *pd =
482 list_first_entry(&g_pd_list, struct oz_pd, link);
483 oz_pd_get(pd);
484 spin_unlock_bh(&g_polling_lock);
485 oz_pd_stop(pd);
486 oz_pd_put(pd);
487 spin_lock_bh(&g_polling_lock);
489 chain = g_timer_pool;
490 g_timer_pool = 0;
491 spin_unlock_bh(&g_polling_lock);
492 while (chain) {
493 struct oz_timer *t = container_of(chain, struct oz_timer, link);
494 chain = chain->next;
495 kfree(t);
497 oz_trace("Protocol stopped\n");
499 /*------------------------------------------------------------------------------
500 * Context: softirq
502 static void oz_pd_handle_timer(struct oz_pd *pd, int type)
504 switch (type) {
505 case OZ_TIMER_TOUT:
506 oz_pd_sleep(pd);
507 break;
508 case OZ_TIMER_STOP:
509 oz_pd_stop(pd);
510 break;
511 case OZ_TIMER_HEARTBEAT: {
512 u16 apps = 0;
513 spin_lock_bh(&g_polling_lock);
514 pd->heartbeat_requested = 0;
515 if (pd->state & OZ_PD_S_CONNECTED)
516 apps = pd->total_apps;
517 spin_unlock_bh(&g_polling_lock);
518 if (apps)
519 oz_pd_heartbeat(pd, apps);
521 break;
524 /*------------------------------------------------------------------------------
525 * Context: softirq
527 static void oz_protocol_timer(unsigned long arg)
529 struct oz_timer *t;
530 struct oz_timer *t2;
531 struct oz_pd *pd;
532 spin_lock_bh(&g_polling_lock);
533 if (!g_cur_timer) {
534 /* This happens if we remove the current timer but can't stop
535 * the timer from firing. In this case just get out.
537 oz_event_log(OZ_EVT_TIMER, 0, 0, 0, 0);
538 spin_unlock_bh(&g_polling_lock);
539 return;
541 g_timer_state = OZ_TIMER_IN_HANDLER;
542 t = g_cur_timer;
543 g_cur_timer = 0;
544 list_del(&t->link);
545 spin_unlock_bh(&g_polling_lock);
546 do {
547 pd = t->pd;
548 oz_event_log(OZ_EVT_TIMER, 0, t->type, 0, 0);
549 oz_pd_handle_timer(pd, t->type);
550 spin_lock_bh(&g_polling_lock);
551 if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
552 t->link.next = g_timer_pool;
553 g_timer_pool = &t->link;
554 g_timer_pool_count++;
555 t = 0;
557 if (!list_empty(&g_timer_list)) {
558 t2 = container_of(g_timer_list.next,
559 struct oz_timer, link);
560 if (time_before_eq(t2->due_time, jiffies))
561 list_del(&t2->link);
562 else
563 t2 = 0;
564 } else {
565 t2 = 0;
567 spin_unlock_bh(&g_polling_lock);
568 oz_pd_put(pd);
569 kfree(t);
570 t = t2;
571 } while (t);
572 g_timer_state = OZ_TIMER_IDLE;
573 oz_protocol_timer_start();
575 /*------------------------------------------------------------------------------
576 * Context: softirq
578 static void oz_protocol_timer_start(void)
580 spin_lock_bh(&g_polling_lock);
581 if (!list_empty(&g_timer_list)) {
582 g_cur_timer =
583 container_of(g_timer_list.next, struct oz_timer, link);
584 if (g_timer_state == OZ_TIMER_SET) {
585 oz_event_log(OZ_EVT_TIMER_CTRL, 3,
586 (u16)g_cur_timer->type, 0,
587 (unsigned)g_cur_timer->due_time);
588 mod_timer(&g_timer, g_cur_timer->due_time);
589 } else {
590 oz_event_log(OZ_EVT_TIMER_CTRL, 4,
591 (u16)g_cur_timer->type, 0,
592 (unsigned)g_cur_timer->due_time);
593 g_timer.expires = g_cur_timer->due_time;
594 g_timer.function = oz_protocol_timer;
595 g_timer.data = 0;
596 add_timer(&g_timer);
598 g_timer_state = OZ_TIMER_SET;
599 } else {
600 oz_trace("No queued timers\n");
602 spin_unlock_bh(&g_polling_lock);
604 /*------------------------------------------------------------------------------
605 * Context: softirq or process
607 void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
608 int remove)
610 struct list_head *e;
611 struct oz_timer *t = 0;
612 int restart_needed = 0;
613 oz_event_log(OZ_EVT_TIMER_CTRL, 1, (u16)type, 0, (unsigned)due_time);
614 spin_lock(&g_polling_lock);
615 if (remove) {
616 list_for_each(e, &g_timer_list) {
617 t = container_of(e, struct oz_timer, link);
618 if ((t->pd == pd) && (t->type == type)) {
619 if (g_cur_timer == t) {
620 restart_needed = 1;
621 g_cur_timer = 0;
623 list_del(e);
624 break;
626 t = 0;
629 if (!t) {
630 if (g_timer_pool) {
631 t = container_of(g_timer_pool, struct oz_timer, link);
632 g_timer_pool = g_timer_pool->next;
633 g_timer_pool_count--;
634 } else {
635 t = kmalloc(sizeof(struct oz_timer), GFP_ATOMIC);
637 if (t) {
638 t->pd = pd;
639 t->type = type;
640 oz_pd_get(pd);
643 if (t) {
644 struct oz_timer *t2;
645 t->due_time = due_time;
646 list_for_each(e, &g_timer_list) {
647 t2 = container_of(e, struct oz_timer, link);
648 if (time_before(due_time, t2->due_time)) {
649 if (t2 == g_cur_timer) {
650 g_cur_timer = 0;
651 restart_needed = 1;
653 break;
656 list_add_tail(&t->link, e);
658 if (g_timer_state == OZ_TIMER_IDLE)
659 restart_needed = 1;
660 else if (g_timer_state == OZ_TIMER_IN_HANDLER)
661 restart_needed = 0;
662 spin_unlock(&g_polling_lock);
663 if (restart_needed)
664 oz_protocol_timer_start();
666 /*------------------------------------------------------------------------------
667 * Context: softirq or process
669 void oz_timer_delete(struct oz_pd *pd, int type)
671 struct list_head *chain = 0;
672 struct oz_timer *t;
673 struct oz_timer *n;
674 int restart_needed = 0;
675 int release = 0;
676 oz_event_log(OZ_EVT_TIMER_CTRL, 2, (u16)type, 0, 0);
677 spin_lock(&g_polling_lock);
678 list_for_each_entry_safe(t, n, &g_timer_list, link) {
679 if ((t->pd == pd) && ((type == 0) || (t->type == type))) {
680 if (g_cur_timer == t) {
681 restart_needed = 1;
682 g_cur_timer = 0;
683 del_timer(&g_timer);
685 list_del(&t->link);
686 release++;
687 if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
688 t->link.next = g_timer_pool;
689 g_timer_pool = &t->link;
690 g_timer_pool_count++;
691 } else {
692 t->link.next = chain;
693 chain = &t->link;
695 if (type)
696 break;
699 if (g_timer_state == OZ_TIMER_IN_HANDLER)
700 restart_needed = 0;
701 else if (restart_needed)
702 g_timer_state = OZ_TIMER_IDLE;
703 spin_unlock(&g_polling_lock);
704 if (restart_needed)
705 oz_protocol_timer_start();
706 while (release--)
707 oz_pd_put(pd);
708 while (chain) {
709 t = container_of(chain, struct oz_timer, link);
710 chain = chain->next;
711 kfree(t);
714 /*------------------------------------------------------------------------------
715 * Context: softirq or process
717 void oz_pd_request_heartbeat(struct oz_pd *pd)
719 unsigned long now = jiffies;
720 unsigned long t;
721 spin_lock(&g_polling_lock);
722 if (pd->heartbeat_requested) {
723 spin_unlock(&g_polling_lock);
724 return;
726 if (pd->pulse_period_j)
727 t = ((now / pd->pulse_period_j) + 1) * pd->pulse_period_j;
728 else
729 t = now + 1;
730 pd->heartbeat_requested = 1;
731 spin_unlock(&g_polling_lock);
732 oz_timer_add(pd, OZ_TIMER_HEARTBEAT, t, 0);
734 /*------------------------------------------------------------------------------
735 * Context: softirq or process
737 struct oz_pd *oz_pd_find(u8 *mac_addr)
739 struct oz_pd *pd;
740 struct list_head *e;
741 spin_lock_bh(&g_polling_lock);
742 list_for_each(e, &g_pd_list) {
743 pd = container_of(e, struct oz_pd, link);
744 if (memcmp(pd->mac_addr, mac_addr, ETH_ALEN) == 0) {
745 atomic_inc(&pd->ref_count);
746 spin_unlock_bh(&g_polling_lock);
747 return pd;
750 spin_unlock_bh(&g_polling_lock);
751 return 0;
753 /*------------------------------------------------------------------------------
754 * Context: process
756 void oz_app_enable(int app_id, int enable)
758 if (app_id <= OZ_APPID_MAX) {
759 spin_lock_bh(&g_polling_lock);
760 if (enable)
761 g_apps |= (1<<app_id);
762 else
763 g_apps &= ~(1<<app_id);
764 spin_unlock_bh(&g_polling_lock);
767 /*------------------------------------------------------------------------------
768 * Context: softirq
770 static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
771 struct packet_type *pt, struct net_device *orig_dev)
773 oz_event_log(OZ_EVT_RX_FRAME, 0, 0, 0, 0);
774 skb = skb_share_check(skb, GFP_ATOMIC);
775 if (skb == 0)
776 return 0;
777 spin_lock_bh(&g_rx_queue.lock);
778 if (g_processing_rx) {
779 /* We already hold the lock so use __ variant.
781 __skb_queue_head(&g_rx_queue, skb);
782 spin_unlock_bh(&g_rx_queue.lock);
783 } else {
784 g_processing_rx = 1;
785 do {
787 spin_unlock_bh(&g_rx_queue.lock);
788 oz_rx_frame(skb);
789 spin_lock_bh(&g_rx_queue.lock);
790 if (skb_queue_empty(&g_rx_queue)) {
791 g_processing_rx = 0;
792 spin_unlock_bh(&g_rx_queue.lock);
793 break;
795 /* We already hold the lock so use __ variant.
797 skb = __skb_dequeue(&g_rx_queue);
798 } while (1);
800 return 0;
802 /*------------------------------------------------------------------------------
803 * Context: process
805 void oz_binding_add(char *net_dev)
807 struct oz_binding *binding;
809 binding = kmalloc(sizeof(struct oz_binding), GFP_KERNEL);
810 if (binding) {
811 binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
812 binding->ptype.func = oz_pkt_recv;
813 memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
814 if (net_dev && *net_dev) {
815 oz_trace("Adding binding: %s\n", net_dev);
816 binding->ptype.dev =
817 dev_get_by_name(&init_net, net_dev);
818 if (binding->ptype.dev == 0) {
819 oz_trace("Netdev %s not found\n", net_dev);
820 kfree(binding);
821 binding = 0;
823 } else {
824 oz_trace("Binding to all netcards\n");
825 binding->ptype.dev = 0;
827 if (binding) {
828 dev_add_pack(&binding->ptype);
829 spin_lock_bh(&g_binding_lock);
830 binding->next = g_binding;
831 g_binding = binding;
832 spin_unlock_bh(&g_binding_lock);
836 /*------------------------------------------------------------------------------
837 * Context: process
839 static int compare_binding_name(char *s1, char *s2)
841 int i;
842 for (i = 0; i < OZ_MAX_BINDING_LEN; i++) {
843 if (*s1 != *s2)
844 return 0;
845 if (!*s1++)
846 return 1;
847 s2++;
849 return 1;
851 /*------------------------------------------------------------------------------
852 * Context: process
854 static void pd_stop_all_for_device(struct net_device *net_dev)
856 struct list_head h;
857 struct oz_pd *pd;
858 struct oz_pd *n;
859 INIT_LIST_HEAD(&h);
860 spin_lock_bh(&g_polling_lock);
861 list_for_each_entry_safe(pd, n, &g_pd_list, link) {
862 if (pd->net_dev == net_dev) {
863 list_move(&pd->link, &h);
864 oz_pd_get(pd);
867 spin_unlock_bh(&g_polling_lock);
868 while (!list_empty(&h)) {
869 pd = list_first_entry(&h, struct oz_pd, link);
870 oz_pd_stop(pd);
871 oz_pd_put(pd);
874 /*------------------------------------------------------------------------------
875 * Context: process
877 void oz_binding_remove(char *net_dev)
879 struct oz_binding *binding = 0;
880 struct oz_binding **link;
881 oz_trace("Removing binding: %s\n", net_dev);
882 spin_lock_bh(&g_binding_lock);
883 binding = g_binding;
884 link = &g_binding;
885 while (binding) {
886 if (compare_binding_name(binding->name, net_dev)) {
887 oz_trace("Binding '%s' found\n", net_dev);
888 *link = binding->next;
889 break;
890 } else {
891 link = &binding;
892 binding = binding->next;
895 spin_unlock_bh(&g_binding_lock);
896 if (binding) {
897 dev_remove_pack(&binding->ptype);
898 if (binding->ptype.dev) {
899 dev_put(binding->ptype.dev);
900 pd_stop_all_for_device(binding->ptype.dev);
902 kfree(binding);
905 /*------------------------------------------------------------------------------
906 * Context: process
908 static char *oz_get_next_device_name(char *s, char *dname, int max_size)
910 while (*s == ',')
911 s++;
912 while (*s && (*s != ',') && max_size > 1) {
913 *dname++ = *s++;
914 max_size--;
916 *dname = 0;
917 return s;
919 /*------------------------------------------------------------------------------
920 * Context: process
922 int oz_protocol_init(char *devs)
924 skb_queue_head_init(&g_rx_queue);
925 if (devs && (devs[0] == '*')) {
926 oz_binding_add(0);
927 } else {
928 char d[32];
929 while (*devs) {
930 devs = oz_get_next_device_name(devs, d, sizeof(d));
931 if (d[0])
932 oz_binding_add(d);
935 init_timer(&g_timer);
936 return 0;
938 /*------------------------------------------------------------------------------
939 * Context: process
941 int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
943 struct oz_pd *pd;
944 struct list_head *e;
945 int count = 0;
946 spin_lock_bh(&g_polling_lock);
947 list_for_each(e, &g_pd_list) {
948 if (count >= max_count)
949 break;
950 pd = container_of(e, struct oz_pd, link);
951 memcpy(&addr[count++], pd->mac_addr, ETH_ALEN);
953 spin_unlock_bh(&g_polling_lock);
954 return count;
956 /*------------------------------------------------------------------------------
958 void oz_polling_lock_bh(void)
960 spin_lock_bh(&g_polling_lock);
962 /*------------------------------------------------------------------------------
964 void oz_polling_unlock_bh(void)
966 spin_unlock_bh(&g_polling_lock);