igb: Simplify how we populate the RSS key
[linux-2.6/cjktty.git] / net / caif / caif_dev.c
blob1ae1d9cb278d4047083214ba0336dd846f4822a1
1 /*
2 * CAIF Interface registration.
3 * Copyright (C) ST-Ericsson AB 2010
4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
8 * and Sakari Ailus <sakari.ailus@nokia.com>
9 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
13 #include <linux/kernel.h>
14 #include <linux/if_arp.h>
15 #include <linux/net.h>
16 #include <linux/netdevice.h>
17 #include <linux/mutex.h>
18 #include <linux/module.h>
19 #include <linux/spinlock.h>
20 #include <net/netns/generic.h>
21 #include <net/net_namespace.h>
22 #include <net/pkt_sched.h>
23 #include <net/caif/caif_device.h>
24 #include <net/caif/caif_layer.h>
25 #include <net/caif/cfpkt.h>
26 #include <net/caif/cfcnfg.h>
27 #include <net/caif/cfserl.h>
29 MODULE_LICENSE("GPL");
31 /* Used for local tracking of the CAIF net devices */
32 struct caif_device_entry {
33 struct cflayer layer;
34 struct list_head list;
35 struct net_device *netdev;
36 int __percpu *pcpu_refcnt;
37 spinlock_t flow_lock;
38 struct sk_buff *xoff_skb;
39 void (*xoff_skb_dtor)(struct sk_buff *skb);
40 bool xoff;
43 struct caif_device_entry_list {
44 struct list_head list;
45 /* Protects simulanous deletes in list */
46 struct mutex lock;
49 struct caif_net {
50 struct cfcnfg *cfg;
51 struct caif_device_entry_list caifdevs;
54 static int caif_net_id;
55 static int q_high = 50; /* Percent */
57 struct cfcnfg *get_cfcnfg(struct net *net)
59 struct caif_net *caifn;
60 caifn = net_generic(net, caif_net_id);
61 return caifn->cfg;
63 EXPORT_SYMBOL(get_cfcnfg);
65 static struct caif_device_entry_list *caif_device_list(struct net *net)
67 struct caif_net *caifn;
68 caifn = net_generic(net, caif_net_id);
69 return &caifn->caifdevs;
72 static void caifd_put(struct caif_device_entry *e)
74 this_cpu_dec(*e->pcpu_refcnt);
77 static void caifd_hold(struct caif_device_entry *e)
79 this_cpu_inc(*e->pcpu_refcnt);
82 static int caifd_refcnt_read(struct caif_device_entry *e)
84 int i, refcnt = 0;
85 for_each_possible_cpu(i)
86 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
87 return refcnt;
90 /* Allocate new CAIF device. */
91 static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
93 struct caif_device_entry *caifd;
95 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
96 if (!caifd)
97 return NULL;
98 caifd->pcpu_refcnt = alloc_percpu(int);
99 if (!caifd->pcpu_refcnt) {
100 kfree(caifd);
101 return NULL;
103 caifd->netdev = dev;
104 dev_hold(dev);
105 return caifd;
108 static struct caif_device_entry *caif_get(struct net_device *dev)
110 struct caif_device_entry_list *caifdevs =
111 caif_device_list(dev_net(dev));
112 struct caif_device_entry *caifd;
114 list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
115 if (caifd->netdev == dev)
116 return caifd;
118 return NULL;
121 void caif_flow_cb(struct sk_buff *skb)
123 struct caif_device_entry *caifd;
124 void (*dtor)(struct sk_buff *skb) = NULL;
125 bool send_xoff;
127 WARN_ON(skb->dev == NULL);
129 rcu_read_lock();
130 caifd = caif_get(skb->dev);
132 WARN_ON(caifd == NULL);
133 if (caifd == NULL)
134 return;
136 caifd_hold(caifd);
137 rcu_read_unlock();
139 spin_lock_bh(&caifd->flow_lock);
140 send_xoff = caifd->xoff;
141 caifd->xoff = 0;
142 dtor = caifd->xoff_skb_dtor;
144 if (WARN_ON(caifd->xoff_skb != skb))
145 skb = NULL;
147 caifd->xoff_skb = NULL;
148 caifd->xoff_skb_dtor = NULL;
150 spin_unlock_bh(&caifd->flow_lock);
152 if (dtor && skb)
153 dtor(skb);
155 if (send_xoff)
156 caifd->layer.up->
157 ctrlcmd(caifd->layer.up,
158 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
159 caifd->layer.id);
160 caifd_put(caifd);
163 static int transmit(struct cflayer *layer, struct cfpkt *pkt)
165 int err, high = 0, qlen = 0;
166 struct caif_device_entry *caifd =
167 container_of(layer, struct caif_device_entry, layer);
168 struct sk_buff *skb;
169 struct netdev_queue *txq;
171 rcu_read_lock_bh();
173 skb = cfpkt_tonative(pkt);
174 skb->dev = caifd->netdev;
175 skb_reset_network_header(skb);
176 skb->protocol = htons(ETH_P_CAIF);
178 /* Check if we need to handle xoff */
179 if (likely(caifd->netdev->tx_queue_len == 0))
180 goto noxoff;
182 if (unlikely(caifd->xoff))
183 goto noxoff;
185 if (likely(!netif_queue_stopped(caifd->netdev))) {
186 /* If we run with a TX queue, check if the queue is too long*/
187 txq = netdev_get_tx_queue(skb->dev, 0);
188 qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
190 if (likely(qlen == 0))
191 goto noxoff;
193 high = (caifd->netdev->tx_queue_len * q_high) / 100;
194 if (likely(qlen < high))
195 goto noxoff;
198 /* Hold lock while accessing xoff */
199 spin_lock_bh(&caifd->flow_lock);
200 if (caifd->xoff) {
201 spin_unlock_bh(&caifd->flow_lock);
202 goto noxoff;
206 * Handle flow off, we do this by temporary hi-jacking this
207 * skb's destructor function, and replace it with our own
208 * flow-on callback. The callback will set flow-on and call
209 * the original destructor.
212 pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
213 netif_queue_stopped(caifd->netdev),
214 qlen, high);
215 caifd->xoff = 1;
216 caifd->xoff_skb = skb;
217 caifd->xoff_skb_dtor = skb->destructor;
218 skb->destructor = caif_flow_cb;
219 spin_unlock_bh(&caifd->flow_lock);
221 caifd->layer.up->ctrlcmd(caifd->layer.up,
222 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
223 caifd->layer.id);
224 noxoff:
225 rcu_read_unlock_bh();
227 err = dev_queue_xmit(skb);
228 if (err > 0)
229 err = -EIO;
231 return err;
235 * Stuff received packets into the CAIF stack.
236 * On error, returns non-zero and releases the skb.
238 static int receive(struct sk_buff *skb, struct net_device *dev,
239 struct packet_type *pkttype, struct net_device *orig_dev)
241 struct cfpkt *pkt;
242 struct caif_device_entry *caifd;
243 int err;
245 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
247 rcu_read_lock();
248 caifd = caif_get(dev);
250 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
251 !netif_oper_up(caifd->netdev)) {
252 rcu_read_unlock();
253 kfree_skb(skb);
254 return NET_RX_DROP;
257 /* Hold reference to netdevice while using CAIF stack */
258 caifd_hold(caifd);
259 rcu_read_unlock();
261 err = caifd->layer.up->receive(caifd->layer.up, pkt);
263 /* For -EILSEQ the packet is not freed so so it now */
264 if (err == -EILSEQ)
265 cfpkt_destroy(pkt);
267 /* Release reference to stack upwards */
268 caifd_put(caifd);
270 if (err != 0)
271 err = NET_RX_DROP;
272 return err;
275 static struct packet_type caif_packet_type __read_mostly = {
276 .type = cpu_to_be16(ETH_P_CAIF),
277 .func = receive,
280 static void dev_flowctrl(struct net_device *dev, int on)
282 struct caif_device_entry *caifd;
284 rcu_read_lock();
286 caifd = caif_get(dev);
287 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
288 rcu_read_unlock();
289 return;
292 caifd_hold(caifd);
293 rcu_read_unlock();
295 caifd->layer.up->ctrlcmd(caifd->layer.up,
296 on ?
297 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
298 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
299 caifd->layer.id);
300 caifd_put(caifd);
303 void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
304 struct cflayer *link_support, int head_room,
305 struct cflayer **layer, int (**rcv_func)(
306 struct sk_buff *, struct net_device *,
307 struct packet_type *, struct net_device *))
309 struct caif_device_entry *caifd;
310 enum cfcnfg_phy_preference pref;
311 struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
312 struct caif_device_entry_list *caifdevs;
314 caifdevs = caif_device_list(dev_net(dev));
315 caifd = caif_device_alloc(dev);
316 if (!caifd)
317 return;
318 *layer = &caifd->layer;
319 spin_lock_init(&caifd->flow_lock);
321 switch (caifdev->link_select) {
322 case CAIF_LINK_HIGH_BANDW:
323 pref = CFPHYPREF_HIGH_BW;
324 break;
325 case CAIF_LINK_LOW_LATENCY:
326 pref = CFPHYPREF_LOW_LAT;
327 break;
328 default:
329 pref = CFPHYPREF_HIGH_BW;
330 break;
332 mutex_lock(&caifdevs->lock);
333 list_add_rcu(&caifd->list, &caifdevs->list);
335 strncpy(caifd->layer.name, dev->name,
336 sizeof(caifd->layer.name) - 1);
337 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
338 caifd->layer.transmit = transmit;
339 cfcnfg_add_phy_layer(cfg,
340 dev,
341 &caifd->layer,
342 pref,
343 link_support,
344 caifdev->use_fcs,
345 head_room);
346 mutex_unlock(&caifdevs->lock);
347 if (rcv_func)
348 *rcv_func = receive;
350 EXPORT_SYMBOL(caif_enroll_dev);
352 /* notify Caif of device events */
353 static int caif_device_notify(struct notifier_block *me, unsigned long what,
354 void *arg)
356 struct net_device *dev = arg;
357 struct caif_device_entry *caifd = NULL;
358 struct caif_dev_common *caifdev;
359 struct cfcnfg *cfg;
360 struct cflayer *layer, *link_support;
361 int head_room = 0;
362 struct caif_device_entry_list *caifdevs;
364 cfg = get_cfcnfg(dev_net(dev));
365 caifdevs = caif_device_list(dev_net(dev));
367 caifd = caif_get(dev);
368 if (caifd == NULL && dev->type != ARPHRD_CAIF)
369 return 0;
371 switch (what) {
372 case NETDEV_REGISTER:
373 if (caifd != NULL)
374 break;
376 caifdev = netdev_priv(dev);
378 link_support = NULL;
379 if (caifdev->use_frag) {
380 head_room = 1;
381 link_support = cfserl_create(dev->ifindex,
382 caifdev->use_stx);
383 if (!link_support) {
384 pr_warn("Out of memory\n");
385 break;
388 caif_enroll_dev(dev, caifdev, link_support, head_room,
389 &layer, NULL);
390 caifdev->flowctrl = dev_flowctrl;
391 break;
393 case NETDEV_UP:
394 rcu_read_lock();
396 caifd = caif_get(dev);
397 if (caifd == NULL) {
398 rcu_read_unlock();
399 break;
402 caifd->xoff = 0;
403 cfcnfg_set_phy_state(cfg, &caifd->layer, true);
404 rcu_read_unlock();
406 break;
408 case NETDEV_DOWN:
409 rcu_read_lock();
411 caifd = caif_get(dev);
412 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
413 rcu_read_unlock();
414 return -EINVAL;
417 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
418 caifd_hold(caifd);
419 rcu_read_unlock();
421 caifd->layer.up->ctrlcmd(caifd->layer.up,
422 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
423 caifd->layer.id);
425 spin_lock_bh(&caifd->flow_lock);
428 * Replace our xoff-destructor with original destructor.
429 * We trust that skb->destructor *always* is called before
430 * the skb reference is invalid. The hijacked SKB destructor
431 * takes the flow_lock so manipulating the skb->destructor here
432 * should be safe.
434 if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
435 caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
437 caifd->xoff = 0;
438 caifd->xoff_skb_dtor = NULL;
439 caifd->xoff_skb = NULL;
441 spin_unlock_bh(&caifd->flow_lock);
442 caifd_put(caifd);
443 break;
445 case NETDEV_UNREGISTER:
446 mutex_lock(&caifdevs->lock);
448 caifd = caif_get(dev);
449 if (caifd == NULL) {
450 mutex_unlock(&caifdevs->lock);
451 break;
453 list_del_rcu(&caifd->list);
456 * NETDEV_UNREGISTER is called repeatedly until all reference
457 * counts for the net-device are released. If references to
458 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
459 * the next call to NETDEV_UNREGISTER.
461 * If any packets are in flight down the CAIF Stack,
462 * cfcnfg_del_phy_layer will return nonzero.
463 * If no packets are in flight, the CAIF Stack associated
464 * with the net-device un-registering is freed.
467 if (caifd_refcnt_read(caifd) != 0 ||
468 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
470 pr_info("Wait for device inuse\n");
471 /* Enrole device if CAIF Stack is still in use */
472 list_add_rcu(&caifd->list, &caifdevs->list);
473 mutex_unlock(&caifdevs->lock);
474 break;
477 synchronize_rcu();
478 dev_put(caifd->netdev);
479 free_percpu(caifd->pcpu_refcnt);
480 kfree(caifd);
482 mutex_unlock(&caifdevs->lock);
483 break;
485 return 0;
488 static struct notifier_block caif_device_notifier = {
489 .notifier_call = caif_device_notify,
490 .priority = 0,
493 /* Per-namespace Caif devices handling */
494 static int caif_init_net(struct net *net)
496 struct caif_net *caifn = net_generic(net, caif_net_id);
497 INIT_LIST_HEAD(&caifn->caifdevs.list);
498 mutex_init(&caifn->caifdevs.lock);
500 caifn->cfg = cfcnfg_create();
501 if (!caifn->cfg)
502 return -ENOMEM;
504 return 0;
507 static void caif_exit_net(struct net *net)
509 struct caif_device_entry *caifd, *tmp;
510 struct caif_device_entry_list *caifdevs =
511 caif_device_list(net);
512 struct cfcnfg *cfg = get_cfcnfg(net);
514 rtnl_lock();
515 mutex_lock(&caifdevs->lock);
517 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
518 int i = 0;
519 list_del_rcu(&caifd->list);
520 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
522 while (i < 10 &&
523 (caifd_refcnt_read(caifd) != 0 ||
524 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
526 pr_info("Wait for device inuse\n");
527 msleep(250);
528 i++;
530 synchronize_rcu();
531 dev_put(caifd->netdev);
532 free_percpu(caifd->pcpu_refcnt);
533 kfree(caifd);
535 cfcnfg_remove(cfg);
537 mutex_unlock(&caifdevs->lock);
538 rtnl_unlock();
541 static struct pernet_operations caif_net_ops = {
542 .init = caif_init_net,
543 .exit = caif_exit_net,
544 .id = &caif_net_id,
545 .size = sizeof(struct caif_net),
548 /* Initialize Caif devices list */
549 static int __init caif_device_init(void)
551 int result;
553 result = register_pernet_subsys(&caif_net_ops);
555 if (result)
556 return result;
558 register_netdevice_notifier(&caif_device_notifier);
559 dev_add_pack(&caif_packet_type);
561 return result;
564 static void __exit caif_device_exit(void)
566 unregister_netdevice_notifier(&caif_device_notifier);
567 dev_remove_pack(&caif_packet_type);
568 unregister_pernet_subsys(&caif_net_ops);
571 module_init(caif_device_init);
572 module_exit(caif_device_exit);