staging: unisys: add error messages to visornic
[linux-2.6/btrfs-unstable.git] / drivers / staging / unisys / visornic / visornic_main.c
blobde05ac79662a1a41db4174d03b09d0aaad767656
1 /* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2 * All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or (at
7 * your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more
13 * details.
16 /* This driver lives in a spar partition, and registers to ethernet io
17 * channels from the visorbus driver. It creates netdev devices and
18 * forwards transmit to the IO channel and accepts rcvs from the IO
19 * Partition via the IO channel.
22 #include <linux/debugfs.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/kthread.h>
28 #include "visorbus.h"
29 #include "iochannel.h"
31 #define VISORNIC_INFINITE_RESPONSE_WAIT 0
32 #define VISORNICSOPENMAX 32
33 #define MAXDEVICES 16384
35 /* MAX_BUF = 64 lines x 32 MAXVNIC x 80 characters
36 * = 163840 bytes
38 #define MAX_BUF 163840
40 static spinlock_t dev_num_pool_lock;
41 static void *dev_num_pool; /**< pool to grab device numbers from */
43 static int visornic_probe(struct visor_device *dev);
44 static void visornic_remove(struct visor_device *dev);
45 static int visornic_pause(struct visor_device *dev,
46 visorbus_state_complete_func complete_func);
47 static int visornic_resume(struct visor_device *dev,
48 visorbus_state_complete_func complete_func);
50 /* DEBUGFS declarations */
51 static ssize_t info_debugfs_read(struct file *file, char __user *buf,
52 size_t len, loff_t *offset);
53 static ssize_t enable_ints_write(struct file *file, const char __user *buf,
54 size_t len, loff_t *ppos);
55 static struct dentry *visornic_debugfs_dir;
56 static const struct file_operations debugfs_info_fops = {
57 .read = info_debugfs_read,
60 static const struct file_operations debugfs_enable_ints_fops = {
61 .write = enable_ints_write,
64 static struct workqueue_struct *visornic_serverdown_workqueue;
65 static struct workqueue_struct *visornic_timeout_reset_workqueue;
67 /* GUIDS for director channel type supported by this driver. */
68 static struct visor_channeltype_descriptor visornic_channel_types[] = {
69 /* Note that the only channel type we expect to be reported by the
70 * bus driver is the SPAR_VNIC channel.
72 { SPAR_VNIC_CHANNEL_PROTOCOL_UUID, "ultravnic" },
73 { NULL_UUID_LE, NULL }
76 /* This is used to tell the visor bus driver which types of visor devices
77 * we support, and what functions to call when a visor device that we support
78 * is attached or removed.
80 static struct visor_driver visornic_driver = {
81 .name = "visornic",
82 .version = "1.0.0.0",
83 .vertag = NULL,
84 .owner = THIS_MODULE,
85 .channel_types = visornic_channel_types,
86 .probe = visornic_probe,
87 .remove = visornic_remove,
88 .pause = visornic_pause,
89 .resume = visornic_resume,
90 .channel_interrupt = NULL,
93 struct visor_thread_info {
94 struct task_struct *task;
95 struct completion has_stopped;
96 int id;
99 struct chanstat {
100 unsigned long got_rcv;
101 unsigned long got_enbdisack;
102 unsigned long got_xmit_done;
103 unsigned long xmit_fail;
104 unsigned long sent_enbdis;
105 unsigned long sent_promisc;
106 unsigned long sent_post;
107 unsigned long sent_xmit;
108 unsigned long reject_count;
109 unsigned long extra_rcvbufs_sent;
112 struct visornic_devdata {
113 int devnum;
114 int thread_wait_ms;
115 unsigned short enabled; /* 0 disabled 1 enabled to receive */
116 unsigned short enab_dis_acked; /* NET_RCV_ENABLE/DISABLE acked by
117 * IOPART
119 struct visor_device *dev;
120 char name[99];
121 struct list_head list_all; /* < link within list_all_devices list */
122 struct kref kref;
123 struct net_device *netdev;
124 struct net_device_stats net_stats;
125 atomic_t interrupt_rcvd;
126 wait_queue_head_t rsp_queue;
127 struct sk_buff **rcvbuf;
128 u64 uniquenum; /* TODO figure out why not used */
129 unsigned short old_flags; /* flags as they were prior to
130 * set_multicast_list
132 atomic_t usage; /* count of users */
133 int num_rcv_bufs; /* indicates how many rcv buffers
134 * the vnic will post
136 int num_rcv_bufs_could_not_alloc;
137 atomic_t num_rcvbuf_in_iovm;
138 unsigned long alloc_failed_in_if_needed_cnt;
139 unsigned long alloc_failed_in_repost_rtn_cnt;
140 int max_outstanding_net_xmits; /* absolute max number of outstanding
141 * xmits - should never hit this
143 int upper_threshold_net_xmits; /* high water mark for calling
144 * netif_stop_queue()
146 int lower_threshold_net_xmits; /* high water mark for calling
147 * netif_wake_queue()
149 struct sk_buff_head xmitbufhead; /* xmitbufhead is the head of the
150 * xmit buffer list that have been
151 * sent to the IOPART end
153 struct work_struct serverdown_completion;
154 visorbus_state_complete_func server_down_complete_func;
155 struct work_struct timeout_reset;
156 struct uiscmdrsp *cmdrsp_rcv; /* cmdrsp_rcv is used for
157 * posting/unposting rcv buffers
159 struct uiscmdrsp *xmit_cmdrsp; /* used to issue NET_XMIT - there is
160 * never more that one xmit in
161 * progress at a time
163 bool server_down; /* IOPART is down */
164 bool server_change_state; /* Processing SERVER_CHANGESTATE msg */
165 struct dentry *eth_debugfs_dir;
166 struct visor_thread_info threadinfo;
167 u64 interrupts_rcvd;
168 u64 interrupts_notme;
169 u64 interrupts_disabled;
170 u64 busy_cnt;
171 spinlock_t priv_lock; /* spinlock to access devdata structures */
173 /* flow control counter */
174 u64 flow_control_upper_hits;
175 u64 flow_control_lower_hits;
177 /* debug counters */
178 unsigned long n_rcv0; /* # rcvs of 0 buffers */
179 unsigned long n_rcv1; /* # rcvs of 1 buffers */
180 unsigned long n_rcv2; /* # rcvs of 2 buffers */
181 unsigned long n_rcvx; /* # rcvs of >2 buffers */
182 unsigned long found_repost_rcvbuf_cnt; /* # times we called
183 * repost_rcvbuf_cnt
185 unsigned long repost_found_skb_cnt; /* # times found the skb */
186 unsigned long n_repost_deficit; /* # times we couldn't find
187 * all of the rcv buffers
189 unsigned long bad_rcv_buf; /* # times we negleted to
190 * free the rcv skb because
191 * we didn't know where it
192 * came from
194 unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */
196 int queuefullmsg_logged;
197 struct chanstat chstat;
200 /* array of open devices maintained by open() and close() */
201 static struct net_device *num_visornic_open[VISORNICSOPENMAX];
203 /* List of all visornic_devdata structs,
204 * linked via the list_all member
206 static LIST_HEAD(list_all_devices);
207 static DEFINE_SPINLOCK(lock_all_devices);
210 * visor_copy_fragsinfo_from_skb(
211 * @skb_in: skbuff that we are pulling the frags from
212 * @firstfraglen: length of first fragment in skb
213 * @frags_max: max len of frags array
214 * @frags: frags array filled in on output
216 * Copy the fragment list in the SKB to a phys_info
217 * array that the IOPART understands.
218 * Return value indicates number of entries filled in frags
219 * Negative values indicate an error.
221 static unsigned int
222 visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
223 unsigned int frags_max,
224 struct phys_info frags[])
226 unsigned int count = 0, ii, size, offset = 0, numfrags;
228 numfrags = skb_shinfo(skb)->nr_frags;
230 while (firstfraglen) {
231 if (count == frags_max)
232 return -EINVAL;
234 frags[count].pi_pfn =
235 page_to_pfn(virt_to_page(skb->data + offset));
236 frags[count].pi_off =
237 (unsigned long)(skb->data + offset) & PI_PAGE_MASK;
238 size = min_t(unsigned int, firstfraglen,
239 PI_PAGE_SIZE - frags[count].pi_off);
241 /* can take smallest of firstfraglen (what's left) OR
242 * bytes left in the page
244 frags[count].pi_len = size;
245 firstfraglen -= size;
246 offset += size;
247 count++;
249 if (numfrags) {
250 if ((count + numfrags) > frags_max)
251 return -EINVAL;
253 for (ii = 0; ii < numfrags; ii++) {
254 count = add_physinfo_entries(page_to_pfn(
255 skb_frag_page(&skb_shinfo(skb)->frags[ii])),
256 skb_shinfo(skb)->frags[ii].
257 page_offset,
258 skb_shinfo(skb)->frags[ii].
259 size, count, frags_max, frags);
260 if (!count)
261 return -EIO;
264 if (skb_shinfo(skb)->frag_list) {
265 struct sk_buff *skbinlist;
266 int c;
268 for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
269 skbinlist = skbinlist->next) {
270 c = visor_copy_fragsinfo_from_skb(skbinlist,
271 skbinlist->len -
272 skbinlist->data_len,
273 frags_max - count,
274 &frags[count]);
275 if (c < 0)
276 return c;
277 count += c;
280 return count;
284 * visort_thread_start - starts thread for the device
285 * @thrinfo: The thread to start
286 * @threadfn: Function the thread starts
287 * @thrcontext: Context to pass to the thread, i.e. devdata
288 * @name: string describing name of thread
290 * Starts a thread for the device, currently only thread is
291 * process_incoming_rsps
292 * Returns 0 on success;
294 static int visor_thread_start(struct visor_thread_info *thrinfo,
295 int (*threadfn)(void *),
296 void *thrcontext, char *name)
298 /* used to stop the thread */
299 init_completion(&thrinfo->has_stopped);
300 thrinfo->task = kthread_run(threadfn, thrcontext, name);
301 if (IS_ERR(thrinfo->task)) {
302 pr_debug("%s failed (%ld)\n",
303 __func__, PTR_ERR(thrinfo->task));
304 thrinfo->id = 0;
305 return -EINVAL;
307 thrinfo->id = thrinfo->task->pid;
308 return 0;
312 * visor_thread_stop - stop a thread for the device
313 * @thrinfo: The thread to stop
315 * Stop the thread and wait for completion for a minute
316 * Returns void.
318 static void visor_thread_stop(struct visor_thread_info *thrinfo)
320 if (!thrinfo->id)
321 return; /* thread not running */
323 kthread_stop(thrinfo->task);
324 /* give up if the thread has NOT died in 1 minute */
325 if (wait_for_completion_timeout(&thrinfo->has_stopped, 60 * HZ))
326 thrinfo->id = 0;
329 /* DebugFS code */
330 static ssize_t info_debugfs_read(struct file *file, char __user *buf,
331 size_t len, loff_t *offset)
333 int i;
334 ssize_t bytes_read = 0;
335 int str_pos = 0;
336 struct visornic_devdata *devdata;
337 char *vbuf;
339 if (len > MAX_BUF)
340 len = MAX_BUF;
341 vbuf = kzalloc(len, GFP_KERNEL);
342 if (!vbuf)
343 return -ENOMEM;
345 /* for each vnic channel
346 * dump out channel specific data
348 for (i = 0; i < VISORNICSOPENMAX; i++) {
349 if (!num_visornic_open[i])
350 continue;
352 devdata = netdev_priv(num_visornic_open[i]);
353 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
354 "Vnic i = %d\n", i);
355 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
356 "netdev = %s (0x%p), MAC Addr %pM\n",
357 num_visornic_open[i]->name,
358 num_visornic_open[i],
359 num_visornic_open[i]->dev_addr);
360 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
361 "VisorNic Dev Info = 0x%p\n", devdata);
362 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
363 " num_rcv_bufs = %d\n",
364 devdata->num_rcv_bufs);
365 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
366 " max_oustanding_next_xmits = %d\n",
367 devdata->max_outstanding_net_xmits);
368 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
369 " upper_threshold_net_xmits = %d\n",
370 devdata->upper_threshold_net_xmits);
371 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
372 " lower_threshold_net_xmits = %d\n",
373 devdata->lower_threshold_net_xmits);
374 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
375 " queuefullmsg_logged = %d\n",
376 devdata->queuefullmsg_logged);
377 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
378 " chstat.got_rcv = %lu\n",
379 devdata->chstat.got_rcv);
380 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
381 " chstat.got_enbdisack = %lu\n",
382 devdata->chstat.got_enbdisack);
383 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
384 " chstat.got_xmit_done = %lu\n",
385 devdata->chstat.got_xmit_done);
386 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
387 " chstat.xmit_fail = %lu\n",
388 devdata->chstat.xmit_fail);
389 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
390 " chstat.sent_enbdis = %lu\n",
391 devdata->chstat.sent_enbdis);
392 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
393 " chstat.sent_promisc = %lu\n",
394 devdata->chstat.sent_promisc);
395 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
396 " chstat.sent_post = %lu\n",
397 devdata->chstat.sent_post);
398 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
399 " chstat.sent_xmit = %lu\n",
400 devdata->chstat.sent_xmit);
401 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
402 " chstat.reject_count = %lu\n",
403 devdata->chstat.reject_count);
404 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
405 " chstat.extra_rcvbufs_sent = %lu\n",
406 devdata->chstat.extra_rcvbufs_sent);
407 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
408 " n_rcv0 = %lu\n", devdata->n_rcv0);
409 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
410 " n_rcv1 = %lu\n", devdata->n_rcv1);
411 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
412 " n_rcv2 = %lu\n", devdata->n_rcv2);
413 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
414 " n_rcvx = %lu\n", devdata->n_rcvx);
415 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
416 " num_rcvbuf_in_iovm = %d\n",
417 atomic_read(&devdata->num_rcvbuf_in_iovm));
418 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
419 " alloc_failed_in_if_needed_cnt = %lu\n",
420 devdata->alloc_failed_in_if_needed_cnt);
421 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
422 " alloc_failed_in_repost_rtn_cnt = %lu\n",
423 devdata->alloc_failed_in_repost_rtn_cnt);
424 /* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
425 * " inner_loop_limit_reached_cnt = %lu\n",
426 * devdata->inner_loop_limit_reached_cnt);
428 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
429 " found_repost_rcvbuf_cnt = %lu\n",
430 devdata->found_repost_rcvbuf_cnt);
431 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
432 " repost_found_skb_cnt = %lu\n",
433 devdata->repost_found_skb_cnt);
434 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
435 " n_repost_deficit = %lu\n",
436 devdata->n_repost_deficit);
437 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
438 " bad_rcv_buf = %lu\n",
439 devdata->bad_rcv_buf);
440 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
441 " n_rcv_packets_not_accepted = %lu\n",
442 devdata->n_rcv_packets_not_accepted);
443 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
444 " interrupts_rcvd = %llu\n",
445 devdata->interrupts_rcvd);
446 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
447 " interrupts_notme = %llu\n",
448 devdata->interrupts_notme);
449 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
450 " interrupts_disabled = %llu\n",
451 devdata->interrupts_disabled);
452 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
453 " busy_cnt = %llu\n",
454 devdata->busy_cnt);
455 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
456 " flow_control_upper_hits = %llu\n",
457 devdata->flow_control_upper_hits);
458 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
459 " flow_control_lower_hits = %llu\n",
460 devdata->flow_control_lower_hits);
461 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
462 " thread_wait_ms = %d\n",
463 devdata->thread_wait_ms);
464 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
465 " netif_queue = %s\n",
466 netif_queue_stopped(devdata->netdev) ?
467 "stopped" : "running");
469 bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
470 kfree(vbuf);
471 return bytes_read;
474 static ssize_t enable_ints_write(struct file *file,
475 const char __user *buffer,
476 size_t count, loff_t *ppos)
478 char buf[4];
479 int i, new_value;
480 struct visornic_devdata *devdata;
482 if (count >= ARRAY_SIZE(buf))
483 return -EINVAL;
485 buf[count] = '\0';
486 if (copy_from_user(buf, buffer, count))
487 return -EFAULT;
489 i = kstrtoint(buf, 10, &new_value);
490 if (i != 0)
491 return -EFAULT;
493 /* set all counts to new_value usually 0 */
494 for (i = 0; i < VISORNICSOPENMAX; i++) {
495 if (num_visornic_open[i]) {
496 devdata = netdev_priv(num_visornic_open[i]);
497 /* TODO update features bit in channel */
501 return count;
505 * visornic_serverdown_complete - IOPART went down, need to pause
506 * device
507 * @work: Work queue it was scheduled on
509 * The IO partition has gone down and we need to do some cleanup
510 * for when it comes back. Treat the IO partition as the link
511 * being down.
512 * Returns void.
514 static void
515 visornic_serverdown_complete(struct work_struct *work)
517 struct visornic_devdata *devdata;
518 struct net_device *netdev;
519 unsigned long flags;
520 int i = 0, count = 0;
522 devdata = container_of(work, struct visornic_devdata,
523 serverdown_completion);
524 netdev = devdata->netdev;
526 /* Stop using datachan */
527 visor_thread_stop(&devdata->threadinfo);
529 /* Inform Linux that the link is down */
530 netif_carrier_off(netdev);
531 netif_stop_queue(netdev);
533 /* Free the skb for XMITs that haven't been serviced by the server
534 * We shouldn't have to inform Linux about these IOs because they
535 * are "lost in the ethernet"
537 skb_queue_purge(&devdata->xmitbufhead);
539 spin_lock_irqsave(&devdata->priv_lock, flags);
540 /* free rcv buffers */
541 for (i = 0; i < devdata->num_rcv_bufs; i++) {
542 if (devdata->rcvbuf[i]) {
543 kfree_skb(devdata->rcvbuf[i]);
544 devdata->rcvbuf[i] = NULL;
545 count++;
548 atomic_set(&devdata->num_rcvbuf_in_iovm, 0);
549 spin_unlock_irqrestore(&devdata->priv_lock, flags);
551 if (devdata->server_down_complete_func)
552 (*devdata->server_down_complete_func)(devdata->dev, 0);
554 devdata->server_down = true;
555 devdata->server_change_state = false;
556 devdata->server_down_complete_func = NULL;
560 * visornic_serverdown - Command has notified us that IOPARt is down
561 * @devdata: device that is being managed by IOPART
563 * Schedule the work needed to handle the server down request. Make
564 * sure we haven't already handled the server change state event.
565 * Returns 0 if we scheduled the work, -EINVAL on error.
567 static int
568 visornic_serverdown(struct visornic_devdata *devdata,
569 visorbus_state_complete_func complete_func)
571 if (!devdata->server_down && !devdata->server_change_state) {
572 devdata->server_change_state = true;
573 devdata->server_down_complete_func = complete_func;
574 queue_work(visornic_serverdown_workqueue,
575 &devdata->serverdown_completion);
576 } else if (devdata->server_change_state) {
577 dev_dbg(&devdata->dev->device, "%s changing state\n",
578 __func__);
579 return -EINVAL;
581 return 0;
585 * alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition.
586 * @netdev: network adapter the rcv bufs are attached too.
588 * Create an sk_buff (rcv_buf) that will be passed to the IO Partition
589 * so that it can write rcv data into our memory space.
590 * Return pointer to sk_buff
592 static struct sk_buff *
593 alloc_rcv_buf(struct net_device *netdev)
595 struct sk_buff *skb;
597 /* NOTE: the first fragment in each rcv buffer is pointed to by
598 * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
599 * in length, so the firstfrag is large enough to hold 1514.
601 skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
602 if (!skb)
603 return NULL;
604 skb->dev = netdev;
605 skb->len = RCVPOST_BUF_SIZE;
606 /* current value of mtu doesn't come into play here; large
607 * packets will just end up using multiple rcv buffers all of
608 * same size
610 skb->data_len = 0; /* dev_alloc_skb already zeroes it out
611 * for clarification.
613 return skb;
617 * post_skb - post a skb to the IO Partition.
618 * @cmdrsp: cmdrsp packet to be send to the IO Partition
619 * @devdata: visornic_devdata to post the skb too
620 * @skb: skb to give to the IO partition
622 * Send the skb to the IO Partition.
623 * Returns void
625 static inline void
626 post_skb(struct uiscmdrsp *cmdrsp,
627 struct visornic_devdata *devdata, struct sk_buff *skb)
629 cmdrsp->net.buf = skb;
630 cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
631 cmdrsp->net.rcvpost.frag.pi_off =
632 (unsigned long)skb->data & PI_PAGE_MASK;
633 cmdrsp->net.rcvpost.frag.pi_len = skb->len;
634 cmdrsp->net.rcvpost.unique_num = devdata->uniquenum;
636 if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) <= PI_PAGE_SIZE) {
637 cmdrsp->net.type = NET_RCV_POST;
638 cmdrsp->cmdtype = CMD_NET_TYPE;
639 visorchannel_signalinsert(devdata->dev->visorchannel,
640 IOCHAN_TO_IOPART,
641 cmdrsp);
642 atomic_inc(&devdata->num_rcvbuf_in_iovm);
643 devdata->chstat.sent_post++;
648 * send_enbdis - send NET_RCV_ENBDIS to IO Partition
649 * @netdev: netdevice we are enable/disable, used as context
650 * return value
651 * @state: enable = 1/disable = 0
652 * @devdata: visornic device we are enabling/disabling
654 * Send the enable/disable message to the IO Partition.
655 * Returns void
657 static void
658 send_enbdis(struct net_device *netdev, int state,
659 struct visornic_devdata *devdata)
661 devdata->cmdrsp_rcv->net.enbdis.enable = state;
662 devdata->cmdrsp_rcv->net.enbdis.context = netdev;
663 devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
664 devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
665 visorchannel_signalinsert(devdata->dev->visorchannel,
666 IOCHAN_TO_IOPART,
667 devdata->cmdrsp_rcv);
668 devdata->chstat.sent_enbdis++;
672 * visornic_disable_with_timeout - Disable network adapter
673 * @netdev: netdevice to disale
674 * @timeout: timeout to wait for disable
676 * Disable the network adapter and inform the IO Partition that we
677 * are disabled, reclaim memory from rcv bufs.
678 * Returns 0 on success, negative for failure of IO Partition
679 * responding.
682 static int
683 visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
685 struct visornic_devdata *devdata = netdev_priv(netdev);
686 int i;
687 unsigned long flags;
688 int wait = 0;
690 /* stop the transmit queue so nothing more can be transmitted */
691 netif_stop_queue(netdev);
693 /* send a msg telling the other end we are stopping incoming pkts */
694 spin_lock_irqsave(&devdata->priv_lock, flags);
695 devdata->enabled = 0;
696 devdata->enab_dis_acked = 0; /* must wait for ack */
697 spin_unlock_irqrestore(&devdata->priv_lock, flags);
699 /* send disable and wait for ack -- don't hold lock when sending
700 * disable because if the queue is full, insert might sleep.
702 send_enbdis(netdev, 0, devdata);
704 /* wait for ack to arrive before we try to free rcv buffers
705 * NOTE: the other end automatically unposts the rcv buffers when
706 * when it gets a disable.
708 spin_lock_irqsave(&devdata->priv_lock, flags);
709 while ((timeout == VISORNIC_INFINITE_RESPONSE_WAIT) ||
710 (wait < timeout)) {
711 if (devdata->enab_dis_acked)
712 break;
713 if (devdata->server_down || devdata->server_change_state) {
714 spin_unlock_irqrestore(&devdata->priv_lock, flags);
715 dev_dbg(&netdev->dev, "%s server went away\n",
716 __func__);
717 return -EIO;
719 set_current_state(TASK_INTERRUPTIBLE);
720 spin_unlock_irqrestore(&devdata->priv_lock, flags);
721 wait += schedule_timeout(msecs_to_jiffies(10));
722 spin_lock_irqsave(&devdata->priv_lock, flags);
725 /* Wait for usage to go to 1 (no other users) before freeing
726 * rcv buffers
728 if (atomic_read(&devdata->usage) > 1) {
729 while (1) {
730 set_current_state(TASK_INTERRUPTIBLE);
731 spin_unlock_irqrestore(&devdata->priv_lock, flags);
732 schedule_timeout(msecs_to_jiffies(10));
733 spin_lock_irqsave(&devdata->priv_lock, flags);
734 if (atomic_read(&devdata->usage))
735 break;
739 /* we've set enabled to 0, so we can give up the lock. */
740 spin_unlock_irqrestore(&devdata->priv_lock, flags);
742 /* Free rcv buffers - other end has automatically unposed them on
743 * disable
745 for (i = 0; i < devdata->num_rcv_bufs; i++) {
746 if (devdata->rcvbuf[i]) {
747 kfree_skb(devdata->rcvbuf[i]);
748 devdata->rcvbuf[i] = NULL;
752 /* remove references from array */
753 for (i = 0; i < VISORNICSOPENMAX; i++)
754 if (num_visornic_open[i] == netdev) {
755 num_visornic_open[i] = NULL;
756 break;
759 return 0;
763 * init_rcv_bufs -- initialize receive bufs and send them to the IO Part
764 * @netdev: struct netdevice
765 * @devdata: visornic_devdata
767 * Allocate rcv buffers and post them to the IO Partition.
768 * Return 0 for success, and negative for failure.
770 static int
771 init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
773 int i, count;
775 /* allocate fixed number of receive buffers to post to uisnic
776 * post receive buffers after we've allocated a required amount
778 for (i = 0; i < devdata->num_rcv_bufs; i++) {
779 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
780 if (!devdata->rcvbuf[i])
781 break; /* if we failed to allocate one let us stop */
783 if (i == 0) /* couldn't even allocate one -- bail out */
784 return -ENOMEM;
785 count = i;
787 /* Ensure we can alloc 2/3rd of the requeested number of buffers.
788 * 2/3 is an arbitrary choice; used also in ndis init.c
790 if (count < ((2 * devdata->num_rcv_bufs) / 3)) {
791 /* free receive buffers we did alloc and then bail out */
792 for (i = 0; i < count; i++) {
793 kfree_skb(devdata->rcvbuf[i]);
794 devdata->rcvbuf[i] = NULL;
796 return -ENOMEM;
799 /* post receive buffers to receive incoming input - without holding
800 * lock - we've not enabled nor started the queue so there shouldn't
801 * be any rcv or xmit activity
803 for (i = 0; i < count; i++)
804 post_skb(devdata->cmdrsp_rcv, devdata, devdata->rcvbuf[i]);
806 return 0;
810 * visornic_enable_with_timeout - send enable to IO Part
811 * @netdev: struct net_device
812 * @timeout: Time to wait for the ACK from the enable
814 * Sends enable to IOVM, inits, and posts receive buffers to IOVM
815 * timeout is defined in msecs (timeout of 0 specifies infinite wait)
816 * Return 0 for success, negavite for failure.
818 static int
819 visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
821 int i;
822 struct visornic_devdata *devdata = netdev_priv(netdev);
823 unsigned long flags;
824 int wait = 0;
826 /* NOTE: the other end automatically unposts the rcv buffers when it
827 * gets a disable.
829 i = init_rcv_bufs(netdev, devdata);
830 if (i < 0) {
831 dev_err(&netdev->dev,
832 "%s failed to init rcv bufs (%d)\n", __func__, i);
833 return i;
836 spin_lock_irqsave(&devdata->priv_lock, flags);
837 devdata->enabled = 1;
839 /* now we're ready, let's send an ENB to uisnic but until we get
840 * an ACK back from uisnic, we'll drop the packets
842 devdata->n_rcv_packets_not_accepted = 0;
843 spin_unlock_irqrestore(&devdata->priv_lock, flags);
845 /* send enable and wait for ack -- don't hold lock when sending enable
846 * because if the queue is full, insert might sleep.
848 send_enbdis(netdev, 1, devdata);
850 spin_lock_irqsave(&devdata->priv_lock, flags);
851 while ((timeout == VISORNIC_INFINITE_RESPONSE_WAIT) ||
852 (wait < timeout)) {
853 if (devdata->enab_dis_acked)
854 break;
855 if (devdata->server_down || devdata->server_change_state) {
856 spin_unlock_irqrestore(&devdata->priv_lock, flags);
857 dev_dbg(&netdev->dev, "%s server went away\n",
858 __func__);
859 return -EIO;
861 set_current_state(TASK_INTERRUPTIBLE);
862 spin_unlock_irqrestore(&devdata->priv_lock, flags);
863 wait += schedule_timeout(msecs_to_jiffies(10));
864 spin_lock_irqsave(&devdata->priv_lock, flags);
867 spin_unlock_irqrestore(&devdata->priv_lock, flags);
869 if (!devdata->enab_dis_acked) {
870 dev_err(&netdev->dev, "%s missing ACK\n", __func__);
871 return -EIO;
874 /* find an open slot in the array to save off VisorNic references
875 * for debug
877 for (i = 0; i < VISORNICSOPENMAX; i++) {
878 if (!num_visornic_open[i]) {
879 num_visornic_open[i] = netdev;
880 break;
884 return 0;
888 * visornic_timeout_reset - handle xmit timeout resets
889 * @work work item that scheduled the work
891 * Transmit Timeouts are typically handled by resetting the
892 * device for our virtual NIC we will send a Disable and Enable
893 * to the IOVM. If it doesn't respond we will trigger a serverdown.
895 static void
896 visornic_timeout_reset(struct work_struct *work)
898 struct visornic_devdata *devdata;
899 struct net_device *netdev;
900 int response = 0;
902 devdata = container_of(work, struct visornic_devdata, timeout_reset);
903 netdev = devdata->netdev;
905 netif_stop_queue(netdev);
906 response = visornic_disable_with_timeout(netdev, 100);
907 if (response)
908 goto call_serverdown;
910 response = visornic_enable_with_timeout(netdev, 100);
911 if (response)
912 goto call_serverdown;
913 netif_wake_queue(netdev);
915 return;
917 call_serverdown:
918 visornic_serverdown(devdata, NULL);
922 * visornic_open - Enable the visornic device and mark the queue started
923 * @netdev: netdevice to start
925 * Enable the device and start the transmit queue.
926 * Return 0 for success
928 static int
929 visornic_open(struct net_device *netdev)
931 visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RESPONSE_WAIT);
933 /* start the interface's transmit queue, allowing it to accept
934 * packets for transmission
936 netif_start_queue(netdev);
938 return 0;
942 * visornic_close - Disables the visornic device and stops the queues
943 * @netdev: netdevice to start
945 * Disable the device and stop the transmit queue.
946 * Return 0 for success
948 static int
949 visornic_close(struct net_device *netdev)
951 netif_stop_queue(netdev);
952 visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RESPONSE_WAIT);
954 return 0;
958 * visornic_xmit - send a packet to the IO Partition
959 * @skb: Packet to be sent
960 * @netdev: net device the packet is being sent from
962 * Convert the skb to a cmdrsp so the IO Partition can undersand it.
963 * Send the XMIT command to the IO Partition for processing. This
964 * function is protected from concurrent calls by a spinlock xmit_lock
965 * in the net_device struct, but as soon as the function returns it
966 * can be called again.
967 * Returns NETDEV_TX_OK for success, NETDEV_TX_BUSY for error.
969 static int
970 visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
972 struct visornic_devdata *devdata;
973 int len, firstfraglen, padlen;
974 struct uiscmdrsp *cmdrsp = NULL;
975 unsigned long flags;
977 devdata = netdev_priv(netdev);
978 spin_lock_irqsave(&devdata->priv_lock, flags);
980 if (netif_queue_stopped(netdev) || devdata->server_down ||
981 devdata->server_change_state) {
982 spin_unlock_irqrestore(&devdata->priv_lock, flags);
983 devdata->busy_cnt++;
984 dev_dbg(&netdev->dev,
985 "%s busy - queue stopped\n", __func__);
986 return NETDEV_TX_BUSY;
989 /* sk_buff struct is used to host network data throughout all the
990 * linux network subsystems
992 len = skb->len;
994 /* skb->len is the FULL length of data (including fragmentary portion)
995 * skb->data_len is the length of the fragment portion in frags
996 * skb->len - skb->data_len is size of the 1st fragment in skb->data
997 * calculate the length of the first fragment that skb->data is
998 * pointing to
1000 firstfraglen = skb->len - skb->data_len;
1001 if (firstfraglen < ETH_HEADER_SIZE) {
1002 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1003 devdata->busy_cnt++;
1004 dev_err(&netdev->dev,
1005 "%s busy - first frag too small (%d)\n",
1006 __func__, firstfraglen);
1007 return NETDEV_TX_BUSY;
1010 if ((len < ETH_MIN_PACKET_SIZE) &&
1011 ((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
1012 /* pad the packet out to minimum size */
1013 padlen = ETH_MIN_PACKET_SIZE - len;
1014 memset(&skb->data[len], 0, padlen);
1015 skb->tail += padlen;
1016 skb->len += padlen;
1017 len += padlen;
1018 firstfraglen += padlen;
1021 cmdrsp = devdata->xmit_cmdrsp;
1022 /* clear cmdrsp */
1023 memset(cmdrsp, 0, SIZEOF_CMDRSP);
1024 cmdrsp->net.type = NET_XMIT;
1025 cmdrsp->cmdtype = CMD_NET_TYPE;
1027 /* save the pointer to skb -- we'll need it for completion */
1028 cmdrsp->net.buf = skb;
1030 if (((devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done) &&
1031 (devdata->chstat.sent_xmit - devdata->chstat.got_xmit_done >=
1032 devdata->max_outstanding_net_xmits)) ||
1033 ((devdata->chstat.sent_xmit < devdata->chstat.got_xmit_done) &&
1034 (ULONG_MAX - devdata->chstat.got_xmit_done +
1035 devdata->chstat.sent_xmit >=
1036 devdata->max_outstanding_net_xmits))) {
1037 /* too many NET_XMITs queued over to IOVM - need to wait
1039 devdata->chstat.reject_count++;
1040 if (!devdata->queuefullmsg_logged &&
1041 ((devdata->chstat.reject_count & 0x3ff) == 1))
1042 devdata->queuefullmsg_logged = 1;
1043 netif_stop_queue(netdev);
1044 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1045 devdata->busy_cnt++;
1046 dev_dbg(&netdev->dev,
1047 "%s busy - waiting for iovm to catch up\n",
1048 __func__);
1049 return NETDEV_TX_BUSY;
1051 if (devdata->queuefullmsg_logged)
1052 devdata->queuefullmsg_logged = 0;
1054 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1055 cmdrsp->net.xmt.lincsum.valid = 1;
1056 cmdrsp->net.xmt.lincsum.protocol = skb->protocol;
1057 if (skb_transport_header(skb) > skb->data) {
1058 cmdrsp->net.xmt.lincsum.hrawoff =
1059 skb_transport_header(skb) - skb->data;
1060 cmdrsp->net.xmt.lincsum.hrawoff = 1;
1062 if (skb_network_header(skb) > skb->data) {
1063 cmdrsp->net.xmt.lincsum.nhrawoff =
1064 skb_network_header(skb) - skb->data;
1065 cmdrsp->net.xmt.lincsum.nhrawoffv = 1;
1067 cmdrsp->net.xmt.lincsum.csum = skb->csum;
1068 } else {
1069 cmdrsp->net.xmt.lincsum.valid = 0;
1072 /* save off the length of the entire data packet */
1073 cmdrsp->net.xmt.len = len;
1075 /* copy ethernet header from first frag into ocmdrsp
1076 * - everything else will be pass in frags & DMA'ed
1078 memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HEADER_SIZE);
1079 /* copy frags info - from skb->data we need to only provide access
1080 * beyond eth header
1082 cmdrsp->net.xmt.num_frags =
1083 visor_copy_fragsinfo_from_skb(skb, firstfraglen,
1084 MAX_PHYS_INFO,
1085 cmdrsp->net.xmt.frags);
1086 if (cmdrsp->net.xmt.num_frags == -1) {
1087 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1088 devdata->busy_cnt++;
1089 dev_err(&netdev->dev,
1090 "%s busy - copy frags failed\n", __func__);
1091 return NETDEV_TX_BUSY;
1094 if (!visorchannel_signalinsert(devdata->dev->visorchannel,
1095 IOCHAN_TO_IOPART, cmdrsp)) {
1096 netif_stop_queue(netdev);
1097 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1098 devdata->busy_cnt++;
1099 dev_dbg(&netdev->dev,
1100 "%s busy - signalinsert failed\n", __func__);
1101 return NETDEV_TX_BUSY;
1104 /* Track the skbs that have been sent to the IOVM for XMIT */
1105 skb_queue_head(&devdata->xmitbufhead, skb);
1107 /* set the last transmission start time
1108 * linux doc says: Do not forget to update netdev->trans_start to
1109 * jiffies after each new tx packet is given to the hardware.
1111 netdev->trans_start = jiffies;
1113 /* update xmt stats */
1114 devdata->net_stats.tx_packets++;
1115 devdata->net_stats.tx_bytes += skb->len;
1116 devdata->chstat.sent_xmit++;
1118 /* check to see if we have hit the high watermark for
1119 * netif_stop_queue()
1121 if (((devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done) &&
1122 (devdata->chstat.sent_xmit - devdata->chstat.got_xmit_done >=
1123 devdata->upper_threshold_net_xmits)) ||
1124 ((devdata->chstat.sent_xmit < devdata->chstat.got_xmit_done) &&
1125 (ULONG_MAX - devdata->chstat.got_xmit_done +
1126 devdata->chstat.sent_xmit >=
1127 devdata->upper_threshold_net_xmits))) {
1128 /* too many NET_XMITs queued over to IOVM - need to wait */
1129 netif_stop_queue(netdev); /* calling stop queue - call
1130 * netif_wake_queue() after lower
1131 * threshold
1133 dev_dbg(&netdev->dev,
1134 "%s busy - invoking iovm flow control\n",
1135 __func__);
1136 devdata->flow_control_upper_hits++;
1138 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1140 /* skb will be freed when we get back NET_XMIT_DONE */
1141 return NETDEV_TX_OK;
1145 * visornic_get_stats - returns net_stats of the visornic device
1146 * @netdev: netdevice
1148 * Returns the net_device_stats for the device
1150 static struct net_device_stats *
1151 visornic_get_stats(struct net_device *netdev)
1153 struct visornic_devdata *devdata = netdev_priv(netdev);
1155 return &devdata->net_stats;
1159 * visornic_ioctl - ioctl function for netdevice.
1160 * @netdev: netdevice
1161 * @ifr: ignored
1162 * @cmd: ignored
1164 * Currently not supported.
1165 * Returns EOPNOTSUPP
1167 static int
1168 visornic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1170 return -EOPNOTSUPP;
1174 * visornic_change_mtu - changes mtu of device.
1175 * @netdev: netdevice
1176 * @new_mtu: value of new mtu
1178 * MTU cannot be changed by system, must be changed via
1179 * CONTROLVM message. All vnics and pnics in a switch have
1180 * to have the same MTU for everything to work.
1181 * Currently not supported.
1182 * Returns EINVAL
1184 static int
1185 visornic_change_mtu(struct net_device *netdev, int new_mtu)
1187 return -EINVAL;
1191 * visornic_set_multi - changes mtu of device.
1192 * @netdev: netdevice
1194 * Only flag we support currently is IFF_PROMISC
1195 * Returns void
1197 static void
1198 visornic_set_multi(struct net_device *netdev)
1200 struct uiscmdrsp *cmdrsp;
1201 struct visornic_devdata *devdata = netdev_priv(netdev);
1203 /* any filtering changes */
1204 if (devdata->old_flags != netdev->flags) {
1205 if ((netdev->flags & IFF_PROMISC) !=
1206 (devdata->old_flags & IFF_PROMISC)) {
1207 cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1208 if (!cmdrsp)
1209 return;
1210 cmdrsp->cmdtype = CMD_NET_TYPE;
1211 cmdrsp->net.type = NET_RCV_PROMISC;
1212 cmdrsp->net.enbdis.context = netdev;
1213 cmdrsp->net.enbdis.enable =
1214 (netdev->flags & IFF_PROMISC);
1215 visorchannel_signalinsert(devdata->dev->visorchannel,
1216 IOCHAN_TO_IOPART,
1217 cmdrsp);
1218 kfree(cmdrsp);
1220 devdata->old_flags = netdev->flags;
1225 * visornic_xmit_timeout - request to timeout the xmit
1226 * @netdev
1228 * Queue the work and return. Make sure we have not already
1229 * been informed the IO Partition is gone, if it is gone
1230 * we will already timeout the xmits.
1232 static void
1233 visornic_xmit_timeout(struct net_device *netdev)
1235 struct visornic_devdata *devdata = netdev_priv(netdev);
1236 unsigned long flags;
1238 spin_lock_irqsave(&devdata->priv_lock, flags);
1239 /* Ensure that a ServerDown message hasn't been received */
1240 if (!devdata->enabled ||
1241 (devdata->server_down && !devdata->server_change_state)) {
1242 dev_dbg(&netdev->dev, "%s no processing\n",
1243 __func__);
1244 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1245 return;
1247 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1249 queue_work(visornic_timeout_reset_workqueue, &devdata->timeout_reset);
1253 * repost_return - repost rcv bufs that have come back
1254 * @cmdrsp: io channel command struct to post
1255 * @devdata: visornic devdata for the device
1256 * @skb: skb
1257 * @netdev: netdevice
1259 * Repost rcv buffers that have been returned to us when
1260 * we are finished with them.
1261 * Returns 0 for success, -1 for error.
1263 static inline int
1264 repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
1265 struct sk_buff *skb, struct net_device *netdev)
1267 struct net_pkt_rcv copy;
1268 int i = 0, cc, numreposted;
1269 int found_skb = 0;
1270 int status = 0;
1272 copy = cmdrsp->net.rcv;
1273 switch (copy.numrcvbufs) {
1274 case 0:
1275 devdata->n_rcv0++;
1276 break;
1277 case 1:
1278 devdata->n_rcv1++;
1279 break;
1280 case 2:
1281 devdata->n_rcv2++;
1282 break;
1283 default:
1284 devdata->n_rcvx++;
1285 break;
1287 for (cc = 0, numreposted = 0; cc < copy.numrcvbufs; cc++) {
1288 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1289 if (devdata->rcvbuf[i] != copy.rcvbuf[cc])
1290 continue;
1292 if ((skb) && devdata->rcvbuf[i] == skb) {
1293 devdata->found_repost_rcvbuf_cnt++;
1294 found_skb = 1;
1295 devdata->repost_found_skb_cnt++;
1297 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1298 if (!devdata->rcvbuf[i]) {
1299 devdata->num_rcv_bufs_could_not_alloc++;
1300 devdata->alloc_failed_in_repost_rtn_cnt++;
1301 status = -ENOMEM;
1302 break;
1304 post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1305 numreposted++;
1306 break;
1309 if (numreposted != copy.numrcvbufs) {
1310 devdata->n_repost_deficit++;
1311 status = -EINVAL;
1313 if (skb) {
1314 if (found_skb) {
1315 kfree_skb(skb);
1316 } else {
1317 status = -EINVAL;
1318 devdata->bad_rcv_buf++;
1321 atomic_dec(&devdata->usage);
1322 return status;
1326 * visornic_rx - Handle receive packets coming back from IO Part
1327 * @cmdrsp: Receive packet returned from IO Part
1329 * Got a receive packet back from the IO Part, handle it and send
1330 * it up the stack.
1331 * Returns void
1333 static void
1334 visornic_rx(struct uiscmdrsp *cmdrsp)
1336 struct visornic_devdata *devdata;
1337 struct sk_buff *skb, *prev, *curr;
1338 struct net_device *netdev;
1339 int cc, currsize, off, status;
1340 struct ethhdr *eth;
1341 unsigned long flags;
1342 #ifdef DEBUG
1343 struct phys_info testfrags[MAX_PHYS_INFO];
1344 #endif
1346 /* post new rcv buf to the other end using the cmdrsp we have at hand
1347 * post it without holding lock - but we'll use the signal lock to
1348 * synchronize the queue insert the cmdrsp that contains the net.rcv
1349 * is the one we are using to repost, so copy the info we need from it.
1351 skb = cmdrsp->net.buf;
1352 netdev = skb->dev;
1354 if (!netdev) {
1355 /* We must have previously downed this network device and
1356 * this skb and device is no longer valid. This also means
1357 * the skb reference was removed from devdata->rcvbuf so no
1358 * need to search for it.
1359 * All we can do is free the skb and return.
1360 * Note: We crash if we try to log this here.
1362 kfree_skb(skb);
1363 return;
1366 devdata = netdev_priv(netdev);
1368 spin_lock_irqsave(&devdata->priv_lock, flags);
1369 atomic_dec(&devdata->num_rcvbuf_in_iovm);
1371 /* update rcv stats - call it with priv_lock held */
1372 devdata->net_stats.rx_packets++;
1373 devdata->net_stats.rx_bytes = skb->len;
1375 atomic_inc(&devdata->usage); /* don't want a close to happen before
1376 * we're done here
1379 /* set length to how much was ACTUALLY received -
1380 * NOTE: rcv_done_len includes actual length of data rcvd
1381 * including ethhdr
1383 skb->len = cmdrsp->net.rcv.rcv_done_len;
1385 /* test enabled while holding lock */
1386 if (!(devdata->enabled && devdata->enab_dis_acked)) {
1387 /* don't process it unless we're in enable mode and until
1388 * we've gotten an ACK saying the other end got our RCV enable
1390 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1391 repost_return(cmdrsp, devdata, skb, netdev);
1392 return;
1395 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1397 /* when skb was allocated, skb->dev, skb->data, skb->len and
1398 * skb->data_len were setup. AND, data has already put into the
1399 * skb (both first frag and in frags pages)
1400 * NOTE: firstfragslen is the amount of data in skb->data and that
1401 * which is not in nr_frags or frag_list. This is now simply
1402 * RCVPOST_BUF_SIZE. bump tail to show how much data is in
1403 * firstfrag & set data_len to show rest see if we have to chain
1404 * frag_list.
1406 if (skb->len > RCVPOST_BUF_SIZE) { /* do PRECAUTIONARY check */
1407 if (cmdrsp->net.rcv.numrcvbufs < 2) {
1408 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1409 dev_err(&devdata->netdev->dev,
1410 "repost_return failed");
1411 return;
1413 /* length rcvd is greater than firstfrag in this skb rcv buf */
1414 skb->tail += RCVPOST_BUF_SIZE; /* amount in skb->data */
1415 skb->data_len = skb->len - RCVPOST_BUF_SIZE; /* amount that
1416 will be in
1417 frag_list */
1418 } else {
1419 /* data fits in this skb - no chaining - do
1420 * PRECAUTIONARY check
1422 if (cmdrsp->net.rcv.numrcvbufs != 1) { /* should be 1 */
1423 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1424 dev_err(&devdata->netdev->dev,
1425 "repost_return failed");
1426 return;
1428 skb->tail += skb->len;
1429 skb->data_len = 0; /* nothing rcvd in frag_list */
1431 off = skb_tail_pointer(skb) - skb->data;
1433 /* amount we bumped tail by in the head skb
1434 * it is used to calculate the size of each chained skb below
1435 * it is also used to index into bufline to continue the copy
1436 * (for chansocktwopc)
1437 * if necessary chain the rcv skbs together.
1438 * NOTE: index 0 has the same as cmdrsp->net.rcv.skb; we need to
1439 * chain the rest to that one.
1440 * - do PRECAUTIONARY check
1442 if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
1443 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1444 dev_err(&devdata->netdev->dev, "repost_return failed");
1445 return;
1448 if (cmdrsp->net.rcv.numrcvbufs > 1) {
1449 /* chain the various rcv buffers into the skb's frag_list. */
1450 /* Note: off was initialized above */
1451 for (cc = 1, prev = NULL;
1452 cc < cmdrsp->net.rcv.numrcvbufs; cc++) {
1453 curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc];
1454 curr->next = NULL;
1455 if (!prev) /* start of list- set head */
1456 skb_shinfo(skb)->frag_list = curr;
1457 else
1458 prev->next = curr;
1459 prev = curr;
1461 /* should we set skb->len and skb->data_len for each
1462 * buffer being chained??? can't hurt!
1464 currsize = min(skb->len - off,
1465 (unsigned int)RCVPOST_BUF_SIZE);
1466 curr->len = currsize;
1467 curr->tail += currsize;
1468 curr->data_len = 0;
1469 off += currsize;
1471 #ifdef DEBUG
1472 /* assert skb->len == off */
1473 if (skb->len != off) {
1474 dev_err(&devdata->netdev->dev,
1475 "%s something wrong; skb->len:%d != off:%d\n",
1476 netdev->name, skb->len, off);
1478 /* test code */
1479 cc = util_copy_fragsinfo_from_skb("rcvchaintest", skb,
1480 RCVPOST_BUF_SIZE,
1481 MAX_PHYS_INFO, testfrags);
1482 if (cc != cmdrsp->net.rcv.numrcvbufs) {
1483 dev_err(&devdata->netdev->dev,
1484 "**** %s Something wrong; rcvd chain length %d different from one we calculated %d\n",
1485 netdev->name, cmdrsp->net.rcv.numrcvbufs, cc);
1487 for (i = 0; i < cc; i++) {
1488 dev_inf(&devdata->netdev->dev,
1489 "test:RCVPOST_BUF_SIZE:%d[%d] pfn:%llu off:0x%x len:%d\n",
1490 RCVPOST_BUF_SIZE, i, testfrags[i].pi_pfn,
1491 testfrags[i].pi_off, testfrags[i].pi_len);
1493 #endif
1496 /* set up packet's protocl type using ethernet header - this
1497 * sets up skb->pkt_type & it also PULLS out the eth header
1499 skb->protocol = eth_type_trans(skb, netdev);
1501 eth = eth_hdr(skb);
1503 skb->csum = 0;
1504 skb->ip_summed = CHECKSUM_NONE;
1506 do {
1507 if (netdev->flags & IFF_PROMISC)
1508 break; /* accept all packets */
1509 if (skb->pkt_type == PACKET_BROADCAST) {
1510 if (netdev->flags & IFF_BROADCAST)
1511 break; /* accept all broadcast packets */
1512 } else if (skb->pkt_type == PACKET_MULTICAST) {
1513 if ((netdev->flags & IFF_MULTICAST) &&
1514 (netdev_mc_count(netdev))) {
1515 struct netdev_hw_addr *ha;
1516 int found_mc = 0;
1518 /* only accept multicast packets that we can
1519 * find in our multicast address list
1521 netdev_for_each_mc_addr(ha, netdev) {
1522 if (ether_addr_equal(eth->h_dest,
1523 ha->addr)) {
1524 found_mc = 1;
1525 break;
1528 if (found_mc)
1529 break; /* accept packet, dest
1530 matches a multicast
1531 address */
1533 } else if (skb->pkt_type == PACKET_HOST) {
1534 break; /* accept packet, h_dest must match vnic
1535 mac address */
1536 } else if (skb->pkt_type == PACKET_OTHERHOST) {
1537 /* something is not right */
1538 dev_err(&devdata->netdev->dev,
1539 "**** FAILED to deliver rcv packet to OS; name:%s Dest:%pM VNIC:%pM\n",
1540 netdev->name, eth->h_dest, netdev->dev_addr);
1542 /* drop packet - don't forward it up to OS */
1543 devdata->n_rcv_packets_not_accepted++;
1544 repost_return(cmdrsp, devdata, skb, netdev);
1545 return;
1546 } while (0);
1548 status = netif_rx(skb);
1549 /* netif_rx returns various values, but "in practice most drivers
1550 * ignore the return value
1553 skb = NULL;
1555 * whether the packet got dropped or handled, the skb is freed by
1556 * kernel code, so we shouldn't free it. but we should repost a
1557 * new rcv buffer.
1559 repost_return(cmdrsp, devdata, skb, netdev);
1563 * devdata_initialize - Initialize devdata structure
1564 * @devdata: visornic_devdata structure to initialize
1565 * #dev: visorbus_deviced it belongs to
1567 * Setup initial values for the visornic based on channel and default
1568 * values.
1569 * Returns a pointer to the devdata if successful, else NULL
1571 static struct visornic_devdata *
1572 devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
1574 int devnum = -1;
1576 if (!devdata)
1577 return NULL;
1578 memset(devdata, '\0', sizeof(struct visornic_devdata));
1579 spin_lock(&dev_num_pool_lock);
1580 devnum = find_first_zero_bit(dev_num_pool, MAXDEVICES);
1581 set_bit(devnum, dev_num_pool);
1582 spin_unlock(&dev_num_pool_lock);
1583 if (devnum == MAXDEVICES)
1584 devnum = -1;
1585 if (devnum < 0) {
1586 kfree(devdata);
1587 return NULL;
1589 devdata->devnum = devnum;
1590 devdata->dev = dev;
1591 strncpy(devdata->name, dev_name(&dev->device), sizeof(devdata->name));
1592 kref_init(&devdata->kref);
1593 spin_lock(&lock_all_devices);
1594 list_add_tail(&devdata->list_all, &list_all_devices);
1595 spin_unlock(&lock_all_devices);
1596 return devdata;
1600 * devdata_release - Frees up a devdata
1601 * @mykref: kref to the devdata
1603 * Frees up a devdata.
1604 * Returns void
1606 static void devdata_release(struct kref *mykref)
1608 struct visornic_devdata *devdata =
1609 container_of(mykref, struct visornic_devdata, kref);
1611 spin_lock(&dev_num_pool_lock);
1612 clear_bit(devdata->devnum, dev_num_pool);
1613 spin_unlock(&dev_num_pool_lock);
1614 spin_lock(&lock_all_devices);
1615 list_del(&devdata->list_all);
1616 spin_unlock(&lock_all_devices);
1617 kfree(devdata);
1620 static const struct net_device_ops visornic_dev_ops = {
1621 .ndo_open = visornic_open,
1622 .ndo_stop = visornic_close,
1623 .ndo_start_xmit = visornic_xmit,
1624 .ndo_get_stats = visornic_get_stats,
1625 .ndo_do_ioctl = visornic_ioctl,
1626 .ndo_change_mtu = visornic_change_mtu,
1627 .ndo_tx_timeout = visornic_xmit_timeout,
1628 .ndo_set_rx_mode = visornic_set_multi,
1632 * send_rcv_posts_if_needed
1633 * @devdata: visornic device
1635 * Send receive buffers to the IO Partition.
1636 * Returns void
1638 static void
1639 send_rcv_posts_if_needed(struct visornic_devdata *devdata)
1641 int i;
1642 struct net_device *netdev;
1643 struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
1644 int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
1646 /* don't do this until vnic is marked ready */
1647 if (!(devdata->enabled && devdata->enab_dis_acked))
1648 return;
1650 netdev = devdata->netdev;
1651 rcv_bufs_allocated = 0;
1652 /* this code is trying to prevent getting stuck here forever,
1653 * but still retry it if you cant allocate them all this time.
1655 cur_num_rcv_bufs_to_alloc = devdata->num_rcv_bufs_could_not_alloc;
1656 while (cur_num_rcv_bufs_to_alloc > 0) {
1657 cur_num_rcv_bufs_to_alloc--;
1658 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1659 if (devdata->rcvbuf[i])
1660 continue;
1661 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1662 if (!devdata->rcvbuf[i]) {
1663 devdata->alloc_failed_in_if_needed_cnt++;
1664 break;
1666 rcv_bufs_allocated++;
1667 post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1668 devdata->chstat.extra_rcvbufs_sent++;
1671 devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
1675 * draing_queue - drains the response queue
1676 * @cmdrsp: io channel command response message
1677 * @devdata: visornic device to drain
1679 * Drain the respones queue of any responses from the IO partition.
1680 * Process the responses as we get them.
1681 * Returns when response queue is empty or when the threadd stops.
1683 static void
1684 drain_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
1686 unsigned long flags;
1687 struct net_device *netdev;
1689 /* drain queue */
1690 while (1) {
1691 /* TODO: CLIENT ACQUIRE -- Don't really need this at the
1692 * moment */
1693 if (!visorchannel_signalremove(devdata->dev->visorchannel,
1694 IOCHAN_FROM_IOPART,
1695 cmdrsp))
1696 break; /* queue empty */
1698 switch (cmdrsp->net.type) {
1699 case NET_RCV:
1700 devdata->chstat.got_rcv++;
1701 /* process incoming packet */
1702 visornic_rx(cmdrsp);
1703 break;
1704 case NET_XMIT_DONE:
1705 spin_lock_irqsave(&devdata->priv_lock, flags);
1706 devdata->chstat.got_xmit_done++;
1707 if (cmdrsp->net.xmtdone.xmt_done_result)
1708 devdata->chstat.xmit_fail++;
1709 /* only call queue wake if we stopped it */
1710 netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
1711 /* ASSERT netdev == vnicinfo->netdev; */
1712 if ((netdev == devdata->netdev) &&
1713 netif_queue_stopped(netdev)) {
1714 /* check to see if we have crossed
1715 * the lower watermark for
1716 * netif_wake_queue()
1718 if (((devdata->chstat.sent_xmit >=
1719 devdata->chstat.got_xmit_done) &&
1720 (devdata->chstat.sent_xmit -
1721 devdata->chstat.got_xmit_done <=
1722 devdata->lower_threshold_net_xmits)) ||
1723 ((devdata->chstat.sent_xmit <
1724 devdata->chstat.got_xmit_done) &&
1725 (ULONG_MAX - devdata->chstat.got_xmit_done
1726 + devdata->chstat.sent_xmit <=
1727 devdata->lower_threshold_net_xmits))) {
1728 /* enough NET_XMITs completed
1729 * so can restart netif queue
1731 netif_wake_queue(netdev);
1732 devdata->flow_control_lower_hits++;
1735 skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
1736 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1737 kfree_skb(cmdrsp->net.buf);
1738 break;
1739 case NET_RCV_ENBDIS_ACK:
1740 devdata->chstat.got_enbdisack++;
1741 netdev = (struct net_device *)
1742 cmdrsp->net.enbdis.context;
1743 spin_lock_irqsave(&devdata->priv_lock, flags);
1744 devdata->enab_dis_acked = 1;
1745 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1747 if (devdata->server_down &&
1748 devdata->server_change_state) {
1749 /* Inform Linux that the link is up */
1750 devdata->server_down = false;
1751 devdata->server_change_state = false;
1752 netif_wake_queue(netdev);
1753 netif_carrier_on(netdev);
1755 break;
1756 case NET_CONNECT_STATUS:
1757 netdev = devdata->netdev;
1758 if (cmdrsp->net.enbdis.enable == 1) {
1759 spin_lock_irqsave(&devdata->priv_lock, flags);
1760 devdata->enabled = cmdrsp->net.enbdis.enable;
1761 spin_unlock_irqrestore(&devdata->priv_lock,
1762 flags);
1763 netif_wake_queue(netdev);
1764 netif_carrier_on(netdev);
1765 } else {
1766 netif_stop_queue(netdev);
1767 netif_carrier_off(netdev);
1768 spin_lock_irqsave(&devdata->priv_lock, flags);
1769 devdata->enabled = cmdrsp->net.enbdis.enable;
1770 spin_unlock_irqrestore(&devdata->priv_lock,
1771 flags);
1773 break;
1774 default:
1775 break;
1777 /* cmdrsp is now available for reuse */
1779 if (kthread_should_stop())
1780 break;
1785 * process_incoming_rsps - Checks the status of the response queue.
1786 * @v: void pointer to the visronic devdata
1788 * Main function of the vnic_incoming thread. Peridocially check the
1789 * response queue and drain it if needed.
1790 * Returns when thread has stopped.
1792 static int
1793 process_incoming_rsps(void *v)
1795 struct visornic_devdata *devdata = v;
1796 struct uiscmdrsp *cmdrsp = NULL;
1797 const int SZ = SIZEOF_CMDRSP;
1799 cmdrsp = kmalloc(SZ, GFP_ATOMIC);
1800 if (!cmdrsp)
1801 complete_and_exit(&devdata->threadinfo.has_stopped, 0);
1803 while (1) {
1804 wait_event_interruptible_timeout(
1805 devdata->rsp_queue, (atomic_read(
1806 &devdata->interrupt_rcvd) == 1),
1807 msecs_to_jiffies(devdata->thread_wait_ms));
1809 /* periodically check to see if there are any rcf bufs which
1810 * need to get sent to the IOSP. This can only happen if
1811 * we run out of memory when trying to allocate skbs.
1813 atomic_set(&devdata->interrupt_rcvd, 0);
1814 send_rcv_posts_if_needed(devdata);
1815 drain_queue(cmdrsp, devdata);
1816 if (kthread_should_stop())
1817 break;
1820 kfree(cmdrsp);
1821 complete_and_exit(&devdata->threadinfo.has_stopped, 0);
1825 * visornic_probe - probe function for visornic devices
1826 * @dev: The visor device discovered
1828 * Called when visorbus discovers a visornic device on its
1829 * bus. It creates a new visornic ethernet adapter.
1830 * Returns 0 or negative for error.
1832 static int visornic_probe(struct visor_device *dev)
1834 struct visornic_devdata *devdata = NULL;
1835 struct net_device *netdev = NULL;
1836 int err;
1837 int channel_offset = 0;
1838 u64 features;
1840 netdev = alloc_etherdev(sizeof(struct visornic_devdata));
1841 if (!netdev) {
1842 dev_err(&dev->device,
1843 "%s alloc_etherdev failed\n", __func__);
1844 return -ENOMEM;
1847 netdev->netdev_ops = &visornic_dev_ops;
1848 netdev->watchdog_timeo = (5 * HZ);
1849 netdev->dev.parent = &dev->device;
1851 /* Get MAC adddress from channel and read it into the device. */
1852 netdev->addr_len = ETH_ALEN;
1853 channel_offset = offsetof(struct spar_io_channel_protocol,
1854 vnic.macaddr);
1855 err = visorbus_read_channel(dev, channel_offset, netdev->dev_addr,
1856 ETH_ALEN);
1857 if (err < 0) {
1858 dev_err(&dev->device,
1859 "%s failed to get mac addr from chan (%d)\n",
1860 __func__, err);
1861 goto cleanup_netdev;
1864 devdata = devdata_initialize(netdev_priv(netdev), dev);
1865 if (!devdata) {
1866 dev_err(&dev->device,
1867 "%s devdata_initialize failed\n", __func__);
1868 err = -ENOMEM;
1869 goto cleanup_netdev;
1872 devdata->netdev = netdev;
1873 dev_set_drvdata(&dev->device, devdata);
1874 init_waitqueue_head(&devdata->rsp_queue);
1875 spin_lock_init(&devdata->priv_lock);
1876 devdata->enabled = 0; /* not yet */
1877 atomic_set(&devdata->usage, 1);
1879 /* Setup rcv bufs */
1880 channel_offset = offsetof(struct spar_io_channel_protocol,
1881 vnic.num_rcv_bufs);
1882 err = visorbus_read_channel(dev, channel_offset,
1883 &devdata->num_rcv_bufs, 4);
1884 if (err) {
1885 dev_err(&dev->device,
1886 "%s failed to get #rcv bufs from chan (%d)\n",
1887 __func__, err);
1888 goto cleanup_netdev;
1891 devdata->rcvbuf = kzalloc(sizeof(struct sk_buff *) *
1892 devdata->num_rcv_bufs, GFP_KERNEL);
1893 if (!devdata->rcvbuf) {
1894 err = -ENOMEM;
1895 goto cleanup_rcvbuf;
1898 /* set the net_xmit outstanding threshold */
1899 /* always leave two slots open but you should have 3 at a minimum */
1900 devdata->max_outstanding_net_xmits =
1901 max(3, ((devdata->num_rcv_bufs / 3) - 2));
1902 devdata->upper_threshold_net_xmits =
1903 max(2, devdata->max_outstanding_net_xmits - 1);
1904 devdata->lower_threshold_net_xmits =
1905 max(1, devdata->max_outstanding_net_xmits / 2);
1907 skb_queue_head_init(&devdata->xmitbufhead);
1909 /* create a cmdrsp we can use to post and unpost rcv buffers */
1910 devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1911 if (!devdata->cmdrsp_rcv) {
1912 err = -ENOMEM;
1913 goto cleanup_cmdrsp_rcv;
1915 devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1916 if (!devdata->xmit_cmdrsp) {
1917 err = -ENOMEM;
1918 goto cleanup_xmit_cmdrsp;
1920 INIT_WORK(&devdata->serverdown_completion,
1921 visornic_serverdown_complete);
1922 INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
1923 devdata->server_down = false;
1924 devdata->server_change_state = false;
1926 /*set the default mtu */
1927 channel_offset = offsetof(struct spar_io_channel_protocol,
1928 vnic.mtu);
1929 err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
1930 if (err) {
1931 dev_err(&dev->device,
1932 "%s failed to get mtu from chan (%d)\n",
1933 __func__, err);
1934 goto cleanup_xmit_cmdrsp;
1937 /* TODO: Setup Interrupt information */
1938 /* Let's start our threads to get responses */
1939 channel_offset = offsetof(struct spar_io_channel_protocol,
1940 channel_header.features);
1941 err = visorbus_read_channel(dev, channel_offset, &features, 8);
1942 if (err) {
1943 dev_err(&dev->device,
1944 "%s failed to get features from chan (%d)\n",
1945 __func__, err);
1946 goto cleanup_xmit_cmdrsp;
1949 features |= ULTRA_IO_CHANNEL_IS_POLLING;
1950 err = visorbus_write_channel(dev, channel_offset, &features, 8);
1951 if (err) {
1952 dev_err(&dev->device,
1953 "%s failed to set features in chan (%d)\n",
1954 __func__, err);
1955 goto cleanup_xmit_cmdrsp;
1958 devdata->thread_wait_ms = 2;
1959 visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
1960 devdata, "vnic_incoming");
1962 err = register_netdev(netdev);
1963 if (err) {
1964 dev_err(&dev->device,
1965 "%s register_netdev failed (%d)\n", __func__, err);
1966 goto cleanup_thread_stop;
1969 /* create debgug/sysfs directories */
1970 devdata->eth_debugfs_dir = debugfs_create_dir(netdev->name,
1971 visornic_debugfs_dir);
1972 if (!devdata->eth_debugfs_dir) {
1973 dev_err(&dev->device,
1974 "%s debugfs_create_dir %s failed\n",
1975 __func__, netdev->name);
1976 err = -ENOMEM;
1977 goto cleanup_thread_stop;
1980 dev_info(&dev->device, "%s success netdev=%s\n",
1981 __func__, netdev->name);
1982 return 0;
1984 cleanup_thread_stop:
1985 visor_thread_stop(&devdata->threadinfo);
1987 cleanup_xmit_cmdrsp:
1988 kfree(devdata->xmit_cmdrsp);
1990 cleanup_cmdrsp_rcv:
1991 kfree(devdata->cmdrsp_rcv);
1993 cleanup_rcvbuf:
1994 kfree(devdata->rcvbuf);
1996 cleanup_netdev:
1997 free_netdev(netdev);
1998 return err;
2002 * host_side_disappeared - IO part is gone.
2003 * @devdata: device object
2005 * IO partition servicing this device is gone, do cleanup
2006 * Returns void.
2008 static void host_side_disappeared(struct visornic_devdata *devdata)
2010 unsigned long flags;
2012 spin_lock_irqsave(&devdata->priv_lock, flags);
2013 sprintf(devdata->name, "<dev#%d-history>", devdata->devnum);
2014 devdata->dev = NULL; /* indicate device destroyed */
2015 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2019 * visornic_remove - Called when visornic dev goes away
2020 * @dev: visornic device that is being removed
2022 * Called when DEVICE_DESTROY gets called to remove device.
2023 * Returns void
2025 static void visornic_remove(struct visor_device *dev)
2027 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
2029 if (!devdata) {
2030 dev_err(&dev->device, "%s no devdata\n", __func__);
2031 return;
2033 dev_set_drvdata(&dev->device, NULL);
2034 host_side_disappeared(devdata);
2035 kref_put(&devdata->kref, devdata_release);
2039 * visornic_pause - Called when IO Part disappears
2040 * @dev: visornic device that is being serviced
2041 * @complete_func: call when finished.
2043 * Called when the IO Partition has gone down. Need to free
2044 * up resources and wait for IO partition to come back. Mark
2045 * link as down and don't attempt any DMA. When we have freed
2046 * memory call the complete_func so that Command knows we are
2047 * done. If we don't call complete_func, IO part will never
2048 * come back.
2049 * Returns 0 for success.
2051 static int visornic_pause(struct visor_device *dev,
2052 visorbus_state_complete_func complete_func)
2054 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
2056 visornic_serverdown(devdata, complete_func);
2057 return 0;
2061 * visornic_resume - Called when IO part has recovered
2062 * @dev: visornic device that is being serviced
2063 * @compelte_func: call when finished
2065 * Called when the IO partition has recovered. Reestablish
2066 * connection to the IO part and set the link up. Okay to do
2067 * DMA again.
2068 * Returns 0 for success.
2070 static int visornic_resume(struct visor_device *dev,
2071 visorbus_state_complete_func complete_func)
2073 struct visornic_devdata *devdata;
2074 struct net_device *netdev;
2075 unsigned long flags;
2077 devdata = dev_get_drvdata(&dev->device);
2078 if (!devdata) {
2079 dev_err(&dev->device, "%s no devdata\n", __func__);
2080 return -EINVAL;
2083 netdev = devdata->netdev;
2085 if (devdata->server_down && !devdata->server_change_state) {
2086 devdata->server_change_state = true;
2087 /* Must transition channel to ATTACHED state BEFORE
2088 * we can start using the device again.
2089 * TODO: State transitions
2091 visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
2092 devdata, "vnic_incoming");
2093 init_rcv_bufs(netdev, devdata);
2094 spin_lock_irqsave(&devdata->priv_lock, flags);
2095 devdata->enabled = 1;
2097 /* Now we're ready, let's send an ENB to uisnic but until
2098 * we get an ACK back from uisnic, we'll drop the packets
2100 devdata->enab_dis_acked = 0;
2101 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2103 /* send enable and wait for ack - don't hold lock when
2104 * sending enable because if the queue if sull, insert
2105 * might sleep.
2107 send_enbdis(netdev, 1, devdata);
2108 } else if (devdata->server_change_state) {
2109 dev_err(&dev->device, "%s server_change_state\n",
2110 __func__);
2111 return -EIO;
2114 complete_func(dev, 0);
2115 return 0;
2119 * visornic_init - Init function
2121 * Init function for the visornic driver. Do initial driver setup
2122 * and wait for devices.
2123 * Returns 0 for success, negative for error.
2125 static int visornic_init(void)
2127 struct dentry *ret;
2128 int err = -ENOMEM;
2130 /* create workqueue for serverdown completion */
2131 visornic_serverdown_workqueue =
2132 create_singlethread_workqueue("visornic_serverdown");
2133 if (!visornic_serverdown_workqueue)
2134 return -ENOMEM;
2136 /* create workqueue for tx timeout reset */
2137 visornic_timeout_reset_workqueue =
2138 create_singlethread_workqueue("visornic_timeout_reset");
2139 if (!visornic_timeout_reset_workqueue)
2140 return -ENOMEM;
2142 visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
2143 if (!visornic_debugfs_dir)
2144 return err;
2146 ret = debugfs_create_file("info", S_IRUSR, visornic_debugfs_dir, NULL,
2147 &debugfs_info_fops);
2148 if (!ret)
2149 goto cleanup_debugfs;
2150 ret = debugfs_create_file("enable_ints", S_IWUSR, visornic_debugfs_dir,
2151 NULL, &debugfs_enable_ints_fops);
2152 if (!ret)
2153 goto cleanup_debugfs;
2155 /* create workqueue for serverdown completion */
2156 visornic_serverdown_workqueue =
2157 create_singlethread_workqueue("visornic_serverdown");
2158 if (!visornic_serverdown_workqueue)
2159 goto cleanup_debugfs;
2161 /* create workqueue for tx timeout reset */
2162 visornic_timeout_reset_workqueue =
2163 create_singlethread_workqueue("visornic_timeout_reset");
2164 if (!visornic_timeout_reset_workqueue)
2165 goto cleanup_workqueue;
2167 spin_lock_init(&dev_num_pool_lock);
2168 dev_num_pool = kzalloc(BITS_TO_LONGS(MAXDEVICES), GFP_KERNEL);
2169 if (!dev_num_pool)
2170 goto cleanup_workqueue;
2172 visorbus_register_visor_driver(&visornic_driver);
2173 return 0;
2175 cleanup_workqueue:
2176 flush_workqueue(visornic_serverdown_workqueue);
2177 destroy_workqueue(visornic_serverdown_workqueue);
2178 if (visornic_timeout_reset_workqueue) {
2179 flush_workqueue(visornic_timeout_reset_workqueue);
2180 destroy_workqueue(visornic_timeout_reset_workqueue);
2182 cleanup_debugfs:
2183 debugfs_remove_recursive(visornic_debugfs_dir);
2185 return err;
2189 * visornic_cleanup - driver exit routine
2191 * Unregister driver from the bus and free up memory.
2193 static void visornic_cleanup(void)
2195 if (visornic_serverdown_workqueue) {
2196 flush_workqueue(visornic_serverdown_workqueue);
2197 destroy_workqueue(visornic_serverdown_workqueue);
2199 if (visornic_timeout_reset_workqueue) {
2200 flush_workqueue(visornic_timeout_reset_workqueue);
2201 destroy_workqueue(visornic_timeout_reset_workqueue);
2203 debugfs_remove_recursive(visornic_debugfs_dir);
2205 visorbus_unregister_visor_driver(&visornic_driver);
2206 kfree(dev_num_pool);
2207 dev_num_pool = NULL;
2210 module_init(visornic_init);
2211 module_exit(visornic_cleanup);
2213 MODULE_AUTHOR("Unisys");
2214 MODULE_LICENSE("GPL");
2215 MODULE_DESCRIPTION("sPAR nic driver for sparlinux: ver 1.0.0.0");
2216 MODULE_VERSION("1.0.0.0");