GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / message / fusion / mptlan.c
blobc3acead5ff6679b1a1b3d6d0e2c4b144a3822498
1 /*
2 * linux/drivers/message/fusion/mptlan.c
3 * IP Over Fibre Channel device driver.
4 * For use with LSI Fibre Channel PCI chip/adapters
5 * running LSI Fusion MPT (Message Passing Technology) firmware.
7 * Copyright (c) 2000-2008 LSI Corporation
8 * (mailto:DL-MPTFusionLinux@lsi.com)
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; version 2 of the License.
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 NO WARRANTY
23 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27 solely responsible for determining the appropriateness of using and
28 distributing the Program and assumes all risks associated with its
29 exercise of rights under this Agreement, including but not limited to
30 the risks and costs of program errors, damage to or loss of data,
31 programs or equipment, and unavailability or interruption of operations.
33 DISCLAIMER OF LIABILITY
34 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
42 You should have received a copy of the GNU General Public License
43 along with this program; if not, write to the Free Software
44 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
49 * Define statements used for debugging
51 //#define MPT_LAN_IO_DEBUG
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
55 #include "mptlan.h"
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/fs.h>
59 #include <linux/sched.h>
60 #include <linux/slab.h>
62 #define my_VERSION MPT_LINUX_VERSION_COMMON
63 #define MYNAM "mptlan"
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(my_VERSION);
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
70 * MPT LAN message sizes without variable part.
72 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
73 (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
75 #define MPT_LAN_TRANSACTION32_SIZE \
76 (sizeof(SGETransaction32_t) - sizeof(u32))
79 * Fusion MPT LAN private structures
82 struct BufferControl {
83 struct sk_buff *skb;
84 dma_addr_t dma;
85 unsigned int len;
88 struct mpt_lan_priv {
89 MPT_ADAPTER *mpt_dev;
90 u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
92 atomic_t buckets_out; /* number of unused buckets on IOC */
93 int bucketthresh; /* Send more when this many left */
95 int *mpt_txfidx; /* Free Tx Context list */
96 int mpt_txfidx_tail;
97 spinlock_t txfidx_lock;
99 int *mpt_rxfidx; /* Free Rx Context list */
100 int mpt_rxfidx_tail;
101 spinlock_t rxfidx_lock;
103 struct BufferControl *RcvCtl; /* Receive BufferControl structs */
104 struct BufferControl *SendCtl; /* Send BufferControl structs */
106 int max_buckets_out; /* Max buckets to send to IOC */
107 int tx_max_out; /* IOC's Tx queue len */
109 u32 total_posted;
110 u32 total_received;
112 struct delayed_work post_buckets_task;
113 struct net_device *dev;
114 unsigned long post_buckets_active;
117 struct mpt_lan_ohdr {
118 u16 dtype;
119 u8 daddr[FC_ALEN];
120 u16 stype;
121 u8 saddr[FC_ALEN];
124 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
127 * Forward protos...
129 static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
130 MPT_FRAME_HDR *reply);
131 static int mpt_lan_open(struct net_device *dev);
132 static int mpt_lan_reset(struct net_device *dev);
133 static int mpt_lan_close(struct net_device *dev);
134 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
135 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
136 int priority);
137 static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
138 static int mpt_lan_receive_post_reply(struct net_device *dev,
139 LANReceivePostReply_t *pRecvRep);
140 static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
141 static int mpt_lan_send_reply(struct net_device *dev,
142 LANSendReply_t *pSendRep);
143 static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
144 static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
145 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
146 struct net_device *dev);
148 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
150 * Fusion MPT LAN private data
152 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
154 static u32 max_buckets_out = 127;
155 static u32 tx_max_out_p = 127 - 16;
157 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
159 * lan_reply - Handle all data sent from the hardware.
160 * @ioc: Pointer to MPT_ADAPTER structure
161 * @mf: Pointer to original MPT request frame (NULL if TurboReply)
162 * @reply: Pointer to MPT reply frame
164 * Returns 1 indicating original alloc'd request frame ptr
165 * should be freed, or 0 if it shouldn't.
167 static int
168 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
170 struct net_device *dev = ioc->netdev;
171 int FreeReqFrame = 0;
173 dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
174 IOC_AND_NETDEV_NAMES_s_s(dev)));
176 // dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
177 // mf, reply));
179 if (mf == NULL) {
180 u32 tmsg = CAST_PTR_TO_U32(reply);
182 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
183 IOC_AND_NETDEV_NAMES_s_s(dev),
184 tmsg));
186 switch (GET_LAN_FORM(tmsg)) {
188 // NOTE! (Optimization) First case here is now caught in
189 // mptbase.c::mpt_interrupt() routine and callcack here
190 // is now skipped for this case!
192 case LAN_REPLY_FORM_SEND_SINGLE:
193 // dioprintk((MYNAM "/lan_reply: "
194 // "calling mpt_lan_send_reply (turbo)\n"));
196 // Potential BUG here?
197 // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
198 // If/when mpt_lan_send_turbo would return 1 here,
199 // calling routine (mptbase.c|mpt_interrupt)
200 // would Oops because mf has already been set
201 // to NULL. So after return from this func,
202 // mpt_interrupt() will attempt to put (NULL) mf ptr
203 // item back onto its adapter FreeQ - Oops!:-(
204 // It's Ok, since mpt_lan_send_turbo() *currently*
205 // always returns 0, but..., just in case:
207 (void) mpt_lan_send_turbo(dev, tmsg);
208 FreeReqFrame = 0;
210 break;
212 case LAN_REPLY_FORM_RECEIVE_SINGLE:
213 // dioprintk((KERN_INFO MYNAM "@lan_reply: "
214 // "rcv-Turbo = %08x\n", tmsg));
215 mpt_lan_receive_post_turbo(dev, tmsg);
216 break;
218 default:
219 printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
220 "that I don't know what to do with\n");
222 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
224 break;
227 return FreeReqFrame;
230 // msg = (u32 *) reply;
231 // dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
232 // le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
233 // le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
234 // dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
235 // reply->u.hdr.Function));
237 switch (reply->u.hdr.Function) {
239 case MPI_FUNCTION_LAN_SEND:
241 LANSendReply_t *pSendRep;
243 pSendRep = (LANSendReply_t *) reply;
244 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
245 break;
248 case MPI_FUNCTION_LAN_RECEIVE:
250 LANReceivePostReply_t *pRecvRep;
252 pRecvRep = (LANReceivePostReply_t *) reply;
253 if (pRecvRep->NumberOfContexts) {
254 mpt_lan_receive_post_reply(dev, pRecvRep);
255 if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
256 FreeReqFrame = 1;
257 } else
258 dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
259 "ReceivePostReply received.\n"));
260 break;
263 case MPI_FUNCTION_LAN_RESET:
264 /* Just a default reply. Might want to check it to
265 * make sure that everything went ok.
267 FreeReqFrame = 1;
268 break;
270 case MPI_FUNCTION_EVENT_NOTIFICATION:
271 case MPI_FUNCTION_EVENT_ACK:
272 /* _EVENT_NOTIFICATION should NOT come down this path any more.
273 * Should be routed to mpt_lan_event_process(), but just in case...
275 FreeReqFrame = 1;
276 break;
278 default:
279 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
280 "reply that I don't know what to do with\n");
282 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
283 FreeReqFrame = 1;
285 break;
288 return FreeReqFrame;
291 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
292 static int
293 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
295 struct net_device *dev = ioc->netdev;
296 struct mpt_lan_priv *priv;
298 if (dev == NULL)
299 return(1);
300 else
301 priv = netdev_priv(dev);
303 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
304 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
305 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
307 if (priv->mpt_rxfidx == NULL)
308 return (1);
310 if (reset_phase == MPT_IOC_SETUP_RESET) {
312 } else if (reset_phase == MPT_IOC_PRE_RESET) {
313 int i;
314 unsigned long flags;
316 netif_stop_queue(dev);
318 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
320 atomic_set(&priv->buckets_out, 0);
322 /* Reset Rx Free Tail index and re-populate the queue. */
323 spin_lock_irqsave(&priv->rxfidx_lock, flags);
324 priv->mpt_rxfidx_tail = -1;
325 for (i = 0; i < priv->max_buckets_out; i++)
326 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
327 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
328 } else {
329 mpt_lan_post_receive_buckets(priv);
330 netif_wake_queue(dev);
333 return 1;
336 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
337 static int
338 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
340 dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
342 switch (le32_to_cpu(pEvReply->Event)) {
343 case MPI_EVENT_NONE: /* 00 */
344 case MPI_EVENT_LOG_DATA: /* 01 */
345 case MPI_EVENT_STATE_CHANGE: /* 02 */
346 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
347 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
348 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
349 case MPI_EVENT_RESCAN: /* 06 */
350 /* Ok, do we need to do anything here? As far as
351 I can tell, this is when a new device gets added
352 to the loop. */
353 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
354 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
355 case MPI_EVENT_LOGOUT: /* 09 */
356 case MPI_EVENT_EVENT_CHANGE: /* 0A */
357 default:
358 break;
362 * NOTE: pEvent->AckRequired handling now done in mptbase.c;
363 * Do NOT do it here now!
366 return 1;
369 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
370 static int
371 mpt_lan_open(struct net_device *dev)
373 struct mpt_lan_priv *priv = netdev_priv(dev);
374 int i;
376 if (mpt_lan_reset(dev) != 0) {
377 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
379 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
381 if (mpt_dev->active)
382 printk ("The ioc is active. Perhaps it needs to be"
383 " reset?\n");
384 else
385 printk ("The ioc in inactive, most likely in the "
386 "process of being reset. Please try again in "
387 "a moment.\n");
390 priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
391 if (priv->mpt_txfidx == NULL)
392 goto out;
393 priv->mpt_txfidx_tail = -1;
395 priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
396 GFP_KERNEL);
397 if (priv->SendCtl == NULL)
398 goto out_mpt_txfidx;
399 for (i = 0; i < priv->tx_max_out; i++)
400 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
402 dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
404 priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
405 GFP_KERNEL);
406 if (priv->mpt_rxfidx == NULL)
407 goto out_SendCtl;
408 priv->mpt_rxfidx_tail = -1;
410 priv->RcvCtl = kcalloc(priv->max_buckets_out,
411 sizeof(struct BufferControl),
412 GFP_KERNEL);
413 if (priv->RcvCtl == NULL)
414 goto out_mpt_rxfidx;
415 for (i = 0; i < priv->max_buckets_out; i++)
416 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
418 /**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
419 /**/ for (i = 0; i < priv->tx_max_out; i++)
420 /**/ dlprintk((" %xh", priv->mpt_txfidx[i]));
421 /**/ dlprintk(("\n"));
423 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
425 mpt_lan_post_receive_buckets(priv);
426 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
427 IOC_AND_NETDEV_NAMES_s_s(dev));
429 if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
430 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
431 " Notifications. This is a bad thing! We're not going "
432 "to go ahead, but I'd be leery of system stability at "
433 "this point.\n");
436 netif_start_queue(dev);
437 dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
439 return 0;
440 out_mpt_rxfidx:
441 kfree(priv->mpt_rxfidx);
442 priv->mpt_rxfidx = NULL;
443 out_SendCtl:
444 kfree(priv->SendCtl);
445 priv->SendCtl = NULL;
446 out_mpt_txfidx:
447 kfree(priv->mpt_txfidx);
448 priv->mpt_txfidx = NULL;
449 out: return -ENOMEM;
452 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
453 /* Send a LanReset message to the FW. This should result in the FW returning
454 any buckets it still has. */
455 static int
456 mpt_lan_reset(struct net_device *dev)
458 MPT_FRAME_HDR *mf;
459 LANResetRequest_t *pResetReq;
460 struct mpt_lan_priv *priv = netdev_priv(dev);
462 mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
464 if (mf == NULL) {
465 /* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
466 "Unable to allocate a request frame.\n"));
468 return -1;
471 pResetReq = (LANResetRequest_t *) mf;
473 pResetReq->Function = MPI_FUNCTION_LAN_RESET;
474 pResetReq->ChainOffset = 0;
475 pResetReq->Reserved = 0;
476 pResetReq->PortNumber = priv->pnum;
477 pResetReq->MsgFlags = 0;
478 pResetReq->Reserved2 = 0;
480 mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
482 return 0;
485 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
486 static int
487 mpt_lan_close(struct net_device *dev)
489 struct mpt_lan_priv *priv = netdev_priv(dev);
490 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
491 unsigned long timeout;
492 int i;
494 dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
496 mpt_event_deregister(LanCtx);
498 dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
499 "since driver was loaded, %d still out\n",
500 priv->total_posted,atomic_read(&priv->buckets_out)));
502 netif_stop_queue(dev);
504 mpt_lan_reset(dev);
506 timeout = jiffies + 2 * HZ;
507 while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
508 schedule_timeout_interruptible(1);
510 for (i = 0; i < priv->max_buckets_out; i++) {
511 if (priv->RcvCtl[i].skb != NULL) {
512 /**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
513 /**/ "is still out\n", i));
514 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
515 priv->RcvCtl[i].len,
516 PCI_DMA_FROMDEVICE);
517 dev_kfree_skb(priv->RcvCtl[i].skb);
521 kfree(priv->RcvCtl);
522 kfree(priv->mpt_rxfidx);
524 for (i = 0; i < priv->tx_max_out; i++) {
525 if (priv->SendCtl[i].skb != NULL) {
526 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
527 priv->SendCtl[i].len,
528 PCI_DMA_TODEVICE);
529 dev_kfree_skb(priv->SendCtl[i].skb);
533 kfree(priv->SendCtl);
534 kfree(priv->mpt_txfidx);
536 atomic_set(&priv->buckets_out, 0);
538 printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
539 IOC_AND_NETDEV_NAMES_s_s(dev));
541 return 0;
544 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
545 static int
546 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
548 if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
549 return -EINVAL;
550 dev->mtu = new_mtu;
551 return 0;
554 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
555 /* Tx timeout handler. */
556 static void
557 mpt_lan_tx_timeout(struct net_device *dev)
559 struct mpt_lan_priv *priv = netdev_priv(dev);
560 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
562 if (mpt_dev->active) {
563 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
564 netif_wake_queue(dev);
568 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
569 //static inline int
570 static int
571 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
573 struct mpt_lan_priv *priv = netdev_priv(dev);
574 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
575 struct sk_buff *sent;
576 unsigned long flags;
577 u32 ctx;
579 ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
580 sent = priv->SendCtl[ctx].skb;
582 dev->stats.tx_packets++;
583 dev->stats.tx_bytes += sent->len;
585 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
586 IOC_AND_NETDEV_NAMES_s_s(dev),
587 __func__, sent));
589 priv->SendCtl[ctx].skb = NULL;
590 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
591 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
592 dev_kfree_skb_irq(sent);
594 spin_lock_irqsave(&priv->txfidx_lock, flags);
595 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
596 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
598 netif_wake_queue(dev);
599 return 0;
602 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
603 static int
604 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
606 struct mpt_lan_priv *priv = netdev_priv(dev);
607 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
608 struct sk_buff *sent;
609 unsigned long flags;
610 int FreeReqFrame = 0;
611 u32 *pContext;
612 u32 ctx;
613 u8 count;
615 count = pSendRep->NumberOfContexts;
617 dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
618 le16_to_cpu(pSendRep->IOCStatus)));
620 /* Add check for Loginfo Flag in IOCStatus */
622 switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
623 case MPI_IOCSTATUS_SUCCESS:
624 dev->stats.tx_packets += count;
625 break;
627 case MPI_IOCSTATUS_LAN_CANCELED:
628 case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
629 break;
631 case MPI_IOCSTATUS_INVALID_SGL:
632 dev->stats.tx_errors += count;
633 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
634 IOC_AND_NETDEV_NAMES_s_s(dev));
635 goto out;
637 default:
638 dev->stats.tx_errors += count;
639 break;
642 pContext = &pSendRep->BufferContext;
644 spin_lock_irqsave(&priv->txfidx_lock, flags);
645 while (count > 0) {
646 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
648 sent = priv->SendCtl[ctx].skb;
649 dev->stats.tx_bytes += sent->len;
651 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
652 IOC_AND_NETDEV_NAMES_s_s(dev),
653 __func__, sent));
655 priv->SendCtl[ctx].skb = NULL;
656 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
657 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
658 dev_kfree_skb_irq(sent);
660 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
662 pContext++;
663 count--;
665 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
667 out:
668 if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
669 FreeReqFrame = 1;
671 netif_wake_queue(dev);
672 return FreeReqFrame;
675 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
676 static int
677 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
679 struct mpt_lan_priv *priv = netdev_priv(dev);
680 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
681 MPT_FRAME_HDR *mf;
682 LANSendRequest_t *pSendReq;
683 SGETransaction32_t *pTrans;
684 SGESimple64_t *pSimple;
685 const unsigned char *mac;
686 dma_addr_t dma;
687 unsigned long flags;
688 int ctx;
689 u16 cur_naa = 0x1000;
691 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
692 __func__, skb));
694 spin_lock_irqsave(&priv->txfidx_lock, flags);
695 if (priv->mpt_txfidx_tail < 0) {
696 netif_stop_queue(dev);
697 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
699 printk (KERN_ERR "%s: no tx context available: %u\n",
700 __func__, priv->mpt_txfidx_tail);
701 return NETDEV_TX_BUSY;
704 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
705 if (mf == NULL) {
706 netif_stop_queue(dev);
707 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
709 printk (KERN_ERR "%s: Unable to alloc request frame\n",
710 __func__);
711 return NETDEV_TX_BUSY;
714 ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
715 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
717 // dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
718 // IOC_AND_NETDEV_NAMES_s_s(dev)));
720 pSendReq = (LANSendRequest_t *) mf;
722 /* Set the mac.raw pointer, since this apparently isn't getting
723 * done before we get the skb. Pull the data pointer past the mac data.
725 skb_reset_mac_header(skb);
726 skb_pull(skb, 12);
728 dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
729 PCI_DMA_TODEVICE);
731 priv->SendCtl[ctx].skb = skb;
732 priv->SendCtl[ctx].dma = dma;
733 priv->SendCtl[ctx].len = skb->len;
735 /* Message Header */
736 pSendReq->Reserved = 0;
737 pSendReq->Function = MPI_FUNCTION_LAN_SEND;
738 pSendReq->ChainOffset = 0;
739 pSendReq->Reserved2 = 0;
740 pSendReq->MsgFlags = 0;
741 pSendReq->PortNumber = priv->pnum;
743 /* Transaction Context Element */
744 pTrans = (SGETransaction32_t *) pSendReq->SG_List;
746 /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
747 pTrans->ContextSize = sizeof(u32);
748 pTrans->DetailsLength = 2 * sizeof(u32);
749 pTrans->Flags = 0;
750 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
752 // dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
753 // IOC_AND_NETDEV_NAMES_s_s(dev),
754 // ctx, skb, skb->data));
756 mac = skb_mac_header(skb);
758 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
759 (mac[0] << 8) |
760 (mac[1] << 0));
761 pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
762 (mac[3] << 16) |
763 (mac[4] << 8) |
764 (mac[5] << 0));
766 pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
768 /* If we ever decide to send more than one Simple SGE per LANSend, then
769 we will need to make sure that LAST_ELEMENT only gets set on the
770 last one. Otherwise, bad voodoo and evil funkiness will commence. */
771 pSimple->FlagsLength = cpu_to_le32(
772 ((MPI_SGE_FLAGS_LAST_ELEMENT |
773 MPI_SGE_FLAGS_END_OF_BUFFER |
774 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
775 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
776 MPI_SGE_FLAGS_HOST_TO_IOC |
777 MPI_SGE_FLAGS_64_BIT_ADDRESSING |
778 MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
779 skb->len);
780 pSimple->Address.Low = cpu_to_le32((u32) dma);
781 if (sizeof(dma_addr_t) > sizeof(u32))
782 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
783 else
784 pSimple->Address.High = 0;
786 mpt_put_msg_frame (LanCtx, mpt_dev, mf);
787 dev->trans_start = jiffies;
789 dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
790 IOC_AND_NETDEV_NAMES_s_s(dev),
791 le32_to_cpu(pSimple->FlagsLength)));
793 return NETDEV_TX_OK;
796 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
797 static void
798 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
800 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
803 struct mpt_lan_priv *priv = netdev_priv(dev);
805 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
806 if (priority) {
807 schedule_delayed_work(&priv->post_buckets_task, 0);
808 } else {
809 schedule_delayed_work(&priv->post_buckets_task, 1);
810 dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
811 "timer.\n"));
813 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
814 IOC_AND_NETDEV_NAMES_s_s(dev) ));
818 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
819 static int
820 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
822 struct mpt_lan_priv *priv = netdev_priv(dev);
824 skb->protocol = mpt_lan_type_trans(skb, dev);
826 dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
827 "delivered to upper level.\n",
828 IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
830 dev->stats.rx_bytes += skb->len;
831 dev->stats.rx_packets++;
833 skb->dev = dev;
834 netif_rx(skb);
836 dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
837 atomic_read(&priv->buckets_out)));
839 if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
840 mpt_lan_wake_post_buckets_task(dev, 1);
842 dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
843 "remaining, %d received back since sod\n",
844 atomic_read(&priv->buckets_out), priv->total_received));
846 return 0;
849 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
850 //static inline int
851 static int
852 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
854 struct mpt_lan_priv *priv = netdev_priv(dev);
855 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
856 struct sk_buff *skb, *old_skb;
857 unsigned long flags;
858 u32 ctx, len;
860 ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
861 skb = priv->RcvCtl[ctx].skb;
863 len = GET_LAN_PACKET_LENGTH(tmsg);
865 if (len < MPT_LAN_RX_COPYBREAK) {
866 old_skb = skb;
868 skb = (struct sk_buff *)dev_alloc_skb(len);
869 if (!skb) {
870 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
871 IOC_AND_NETDEV_NAMES_s_s(dev),
872 __FILE__, __LINE__);
873 return -ENOMEM;
876 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
877 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
879 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
881 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
882 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
883 goto out;
886 skb_put(skb, len);
888 priv->RcvCtl[ctx].skb = NULL;
890 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
891 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
893 out:
894 spin_lock_irqsave(&priv->rxfidx_lock, flags);
895 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
896 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
898 atomic_dec(&priv->buckets_out);
899 priv->total_received++;
901 return mpt_lan_receive_skb(dev, skb);
904 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
905 static int
906 mpt_lan_receive_post_free(struct net_device *dev,
907 LANReceivePostReply_t *pRecvRep)
909 struct mpt_lan_priv *priv = netdev_priv(dev);
910 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
911 unsigned long flags;
912 struct sk_buff *skb;
913 u32 ctx;
914 int count;
915 int i;
917 count = pRecvRep->NumberOfContexts;
919 /**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
920 "IOC returned %d buckets, freeing them...\n", count));
922 spin_lock_irqsave(&priv->rxfidx_lock, flags);
923 for (i = 0; i < count; i++) {
924 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
926 skb = priv->RcvCtl[ctx].skb;
928 // dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
929 // IOC_AND_NETDEV_NAMES_s_s(dev)));
930 // dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
931 // priv, &(priv->buckets_out)));
932 // dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
934 priv->RcvCtl[ctx].skb = NULL;
935 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
936 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
937 dev_kfree_skb_any(skb);
939 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
941 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
943 atomic_sub(count, &priv->buckets_out);
945 // for (i = 0; i < priv->max_buckets_out; i++)
946 // if (priv->RcvCtl[i].skb != NULL)
947 // dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
948 // "is still out\n", i));
950 /* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
951 count));
953 /**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
954 /**/ "remaining, %d received back since sod.\n",
955 /**/ atomic_read(&priv->buckets_out), priv->total_received));
956 return 0;
959 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
960 static int
961 mpt_lan_receive_post_reply(struct net_device *dev,
962 LANReceivePostReply_t *pRecvRep)
964 struct mpt_lan_priv *priv = netdev_priv(dev);
965 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
966 struct sk_buff *skb, *old_skb;
967 unsigned long flags;
968 u32 len, ctx, offset;
969 u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
970 int count;
971 int i, l;
973 dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
974 dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
975 le16_to_cpu(pRecvRep->IOCStatus)));
977 if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
978 MPI_IOCSTATUS_LAN_CANCELED)
979 return mpt_lan_receive_post_free(dev, pRecvRep);
981 len = le32_to_cpu(pRecvRep->PacketLength);
982 if (len == 0) {
983 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
984 "ReceivePostReply w/ PacketLength zero!\n",
985 IOC_AND_NETDEV_NAMES_s_s(dev));
986 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
987 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
988 return -1;
991 ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
992 count = pRecvRep->NumberOfContexts;
993 skb = priv->RcvCtl[ctx].skb;
995 offset = le32_to_cpu(pRecvRep->PacketOffset);
996 // if (offset != 0) {
997 // printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
998 // "w/ PacketOffset %u\n",
999 // IOC_AND_NETDEV_NAMES_s_s(dev),
1000 // offset);
1001 // }
1003 dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1004 IOC_AND_NETDEV_NAMES_s_s(dev),
1005 offset, len));
1007 if (count > 1) {
1008 int szrem = len;
1010 // dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1011 // "for single packet, concatenating...\n",
1012 // IOC_AND_NETDEV_NAMES_s_s(dev)));
1014 skb = (struct sk_buff *)dev_alloc_skb(len);
1015 if (!skb) {
1016 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1017 IOC_AND_NETDEV_NAMES_s_s(dev),
1018 __FILE__, __LINE__);
1019 return -ENOMEM;
1022 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1023 for (i = 0; i < count; i++) {
1025 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1026 old_skb = priv->RcvCtl[ctx].skb;
1028 l = priv->RcvCtl[ctx].len;
1029 if (szrem < l)
1030 l = szrem;
1032 // dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1033 // IOC_AND_NETDEV_NAMES_s_s(dev),
1034 // i, l));
1036 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1037 priv->RcvCtl[ctx].dma,
1038 priv->RcvCtl[ctx].len,
1039 PCI_DMA_FROMDEVICE);
1040 skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1042 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1043 priv->RcvCtl[ctx].dma,
1044 priv->RcvCtl[ctx].len,
1045 PCI_DMA_FROMDEVICE);
1047 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1048 szrem -= l;
1050 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1052 } else if (len < MPT_LAN_RX_COPYBREAK) {
1054 old_skb = skb;
1056 skb = (struct sk_buff *)dev_alloc_skb(len);
1057 if (!skb) {
1058 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1059 IOC_AND_NETDEV_NAMES_s_s(dev),
1060 __FILE__, __LINE__);
1061 return -ENOMEM;
1064 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1065 priv->RcvCtl[ctx].dma,
1066 priv->RcvCtl[ctx].len,
1067 PCI_DMA_FROMDEVICE);
1069 skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1071 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1072 priv->RcvCtl[ctx].dma,
1073 priv->RcvCtl[ctx].len,
1074 PCI_DMA_FROMDEVICE);
1076 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1077 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1078 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1080 } else {
1081 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1083 priv->RcvCtl[ctx].skb = NULL;
1085 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1086 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1087 priv->RcvCtl[ctx].dma = 0;
1089 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1090 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1092 skb_put(skb,len);
1095 atomic_sub(count, &priv->buckets_out);
1096 priv->total_received += count;
1098 if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1099 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1100 "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1101 IOC_AND_NETDEV_NAMES_s_s(dev),
1102 priv->mpt_rxfidx_tail,
1103 MPT_LAN_MAX_BUCKETS_OUT);
1105 return -1;
1108 if (remaining == 0)
1109 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1110 "(priv->buckets_out = %d)\n",
1111 IOC_AND_NETDEV_NAMES_s_s(dev),
1112 atomic_read(&priv->buckets_out));
1113 else if (remaining < 10)
1114 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1115 "(priv->buckets_out = %d)\n",
1116 IOC_AND_NETDEV_NAMES_s_s(dev),
1117 remaining, atomic_read(&priv->buckets_out));
1119 if ((remaining < priv->bucketthresh) &&
1120 ((atomic_read(&priv->buckets_out) - remaining) >
1121 MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1123 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1124 "buckets_out count and fw's BucketsRemaining "
1125 "count has crossed the threshold, issuing a "
1126 "LanReset to clear the fw's hashtable. You may "
1127 "want to check your /var/log/messages for \"CRC "
1128 "error\" event notifications.\n");
1130 mpt_lan_reset(dev);
1131 mpt_lan_wake_post_buckets_task(dev, 0);
1134 return mpt_lan_receive_skb(dev, skb);
1137 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1138 /* Simple SGE's only at the moment */
1140 static void
1141 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1143 struct net_device *dev = priv->dev;
1144 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1145 MPT_FRAME_HDR *mf;
1146 LANReceivePostRequest_t *pRecvReq;
1147 SGETransaction32_t *pTrans;
1148 SGESimple64_t *pSimple;
1149 struct sk_buff *skb;
1150 dma_addr_t dma;
1151 u32 curr, buckets, count, max;
1152 u32 len = (dev->mtu + dev->hard_header_len + 4);
1153 unsigned long flags;
1154 int i;
1156 curr = atomic_read(&priv->buckets_out);
1157 buckets = (priv->max_buckets_out - curr);
1159 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1160 IOC_AND_NETDEV_NAMES_s_s(dev),
1161 __func__, buckets, curr));
1163 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1164 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1166 while (buckets) {
1167 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1168 if (mf == NULL) {
1169 printk (KERN_ERR "%s: Unable to alloc request frame\n",
1170 __func__);
1171 dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1172 __func__, buckets));
1173 goto out;
1175 pRecvReq = (LANReceivePostRequest_t *) mf;
1177 i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1178 mpt_dev->RequestNB[i] = 0;
1179 count = buckets;
1180 if (count > max)
1181 count = max;
1183 pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
1184 pRecvReq->ChainOffset = 0;
1185 pRecvReq->MsgFlags = 0;
1186 pRecvReq->PortNumber = priv->pnum;
1188 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1189 pSimple = NULL;
1191 for (i = 0; i < count; i++) {
1192 int ctx;
1194 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1195 if (priv->mpt_rxfidx_tail < 0) {
1196 printk (KERN_ERR "%s: Can't alloc context\n",
1197 __func__);
1198 spin_unlock_irqrestore(&priv->rxfidx_lock,
1199 flags);
1200 break;
1203 ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1205 skb = priv->RcvCtl[ctx].skb;
1206 if (skb && (priv->RcvCtl[ctx].len != len)) {
1207 pci_unmap_single(mpt_dev->pcidev,
1208 priv->RcvCtl[ctx].dma,
1209 priv->RcvCtl[ctx].len,
1210 PCI_DMA_FROMDEVICE);
1211 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1212 skb = priv->RcvCtl[ctx].skb = NULL;
1215 if (skb == NULL) {
1216 skb = dev_alloc_skb(len);
1217 if (skb == NULL) {
1218 printk (KERN_WARNING
1219 MYNAM "/%s: Can't alloc skb\n",
1220 __func__);
1221 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1222 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1223 break;
1226 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1227 len, PCI_DMA_FROMDEVICE);
1229 priv->RcvCtl[ctx].skb = skb;
1230 priv->RcvCtl[ctx].dma = dma;
1231 priv->RcvCtl[ctx].len = len;
1234 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1236 pTrans->ContextSize = sizeof(u32);
1237 pTrans->DetailsLength = 0;
1238 pTrans->Flags = 0;
1239 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1241 pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1243 pSimple->FlagsLength = cpu_to_le32(
1244 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1245 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1246 MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1247 pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1248 if (sizeof(dma_addr_t) > sizeof(u32))
1249 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1250 else
1251 pSimple->Address.High = 0;
1253 pTrans = (SGETransaction32_t *) (pSimple + 1);
1256 if (pSimple == NULL) {
1257 /**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1258 /**/ __func__);
1259 mpt_free_msg_frame(mpt_dev, mf);
1260 goto out;
1263 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1265 pRecvReq->BucketCount = cpu_to_le32(i);
1267 /* printk(KERN_INFO MYNAM ": posting buckets\n ");
1268 * for (i = 0; i < j + 2; i ++)
1269 * printk (" %08x", le32_to_cpu(msg[i]));
1270 * printk ("\n");
1273 mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1275 priv->total_posted += i;
1276 buckets -= i;
1277 atomic_add(i, &priv->buckets_out);
1280 out:
1281 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1282 __func__, buckets, atomic_read(&priv->buckets_out)));
1283 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1284 __func__, priv->total_posted, priv->total_received));
1286 clear_bit(0, &priv->post_buckets_active);
1289 static void
1290 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1292 mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1293 post_buckets_task.work));
1296 static const struct net_device_ops mpt_netdev_ops = {
1297 .ndo_open = mpt_lan_open,
1298 .ndo_stop = mpt_lan_close,
1299 .ndo_start_xmit = mpt_lan_sdu_send,
1300 .ndo_change_mtu = mpt_lan_change_mtu,
1301 .ndo_tx_timeout = mpt_lan_tx_timeout,
1304 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1305 static struct net_device *
1306 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1308 struct net_device *dev;
1309 struct mpt_lan_priv *priv;
1310 u8 HWaddr[FC_ALEN], *a;
1312 dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1313 if (!dev)
1314 return NULL;
1316 dev->mtu = MPT_LAN_MTU;
1318 priv = netdev_priv(dev);
1320 priv->dev = dev;
1321 priv->mpt_dev = mpt_dev;
1322 priv->pnum = pnum;
1324 INIT_DELAYED_WORK(&priv->post_buckets_task,
1325 mpt_lan_post_receive_buckets_work);
1326 priv->post_buckets_active = 0;
1328 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1329 __LINE__, dev->mtu + dev->hard_header_len + 4));
1331 atomic_set(&priv->buckets_out, 0);
1332 priv->total_posted = 0;
1333 priv->total_received = 0;
1334 priv->max_buckets_out = max_buckets_out;
1335 if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1336 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1338 dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1339 __LINE__,
1340 mpt_dev->pfacts[0].MaxLanBuckets,
1341 max_buckets_out,
1342 priv->max_buckets_out));
1344 priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1345 spin_lock_init(&priv->txfidx_lock);
1346 spin_lock_init(&priv->rxfidx_lock);
1348 /* Grab pre-fetched LANPage1 stuff. :-) */
1349 a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1351 HWaddr[0] = a[5];
1352 HWaddr[1] = a[4];
1353 HWaddr[2] = a[3];
1354 HWaddr[3] = a[2];
1355 HWaddr[4] = a[1];
1356 HWaddr[5] = a[0];
1358 dev->addr_len = FC_ALEN;
1359 memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1360 memset(dev->broadcast, 0xff, FC_ALEN);
1362 /* The Tx queue is 127 deep on the 909.
1363 * Give ourselves some breathing room.
1365 priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1366 tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1368 dev->netdev_ops = &mpt_netdev_ops;
1369 dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1371 dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1372 "and setting initial values\n"));
1374 if (register_netdev(dev) != 0) {
1375 free_netdev(dev);
1376 dev = NULL;
1378 return dev;
1381 static int
1382 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1384 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1385 struct net_device *dev;
1386 int i;
1388 for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1389 printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1390 "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1391 ioc->name, ioc->pfacts[i].PortNumber,
1392 ioc->pfacts[i].ProtocolFlags,
1393 MPT_PROTOCOL_FLAGS_c_c_c_c(
1394 ioc->pfacts[i].ProtocolFlags));
1396 if (!(ioc->pfacts[i].ProtocolFlags &
1397 MPI_PORTFACTS_PROTOCOL_LAN)) {
1398 printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1399 "seems to be disabled on this adapter port!\n",
1400 ioc->name);
1401 continue;
1404 dev = mpt_register_lan_device(ioc, i);
1405 if (!dev) {
1406 printk(KERN_ERR MYNAM ": %s: Unable to register "
1407 "port%d as a LAN device\n", ioc->name,
1408 ioc->pfacts[i].PortNumber);
1409 continue;
1412 printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1413 "registered as '%s'\n", ioc->name, dev->name);
1414 printk(KERN_INFO MYNAM ": %s/%s: "
1415 "LanAddr = %pM\n",
1416 IOC_AND_NETDEV_NAMES_s_s(dev),
1417 dev->dev_addr);
1419 ioc->netdev = dev;
1421 return 0;
1424 return -ENODEV;
1427 static void
1428 mptlan_remove(struct pci_dev *pdev)
1430 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1431 struct net_device *dev = ioc->netdev;
1433 if(dev != NULL) {
1434 unregister_netdev(dev);
1435 free_netdev(dev);
1439 static struct mpt_pci_driver mptlan_driver = {
1440 .probe = mptlan_probe,
1441 .remove = mptlan_remove,
1444 static int __init mpt_lan_init (void)
1446 show_mptmod_ver(LANAME, LANVER);
1448 LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
1449 "lan_reply");
1450 if (LanCtx <= 0) {
1451 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1452 return -EBUSY;
1455 dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1457 if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1458 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1459 "handler with mptbase! The world is at an end! "
1460 "Everything is fading to black! Goodbye.\n");
1461 return -EBUSY;
1464 dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1466 mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1467 return 0;
1470 static void __exit mpt_lan_exit(void)
1472 mpt_device_driver_deregister(MPTLAN_DRIVER);
1473 mpt_reset_deregister(LanCtx);
1475 if (LanCtx) {
1476 mpt_deregister(LanCtx);
1477 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1481 module_init(mpt_lan_init);
1482 module_exit(mpt_lan_exit);
1484 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1485 static unsigned short
1486 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1488 struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1489 struct fcllc *fcllc;
1491 skb_reset_mac_header(skb);
1492 skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1494 if (fch->dtype == htons(0xffff)) {
1495 u32 *p = (u32 *) fch;
1497 swab32s(p + 0);
1498 swab32s(p + 1);
1499 swab32s(p + 2);
1500 swab32s(p + 3);
1502 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1503 NETDEV_PTR_TO_IOC_NAME_s(dev));
1504 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1505 fch->saddr);
1508 if (*fch->daddr & 1) {
1509 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1510 skb->pkt_type = PACKET_BROADCAST;
1511 } else {
1512 skb->pkt_type = PACKET_MULTICAST;
1514 } else {
1515 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1516 skb->pkt_type = PACKET_OTHERHOST;
1517 } else {
1518 skb->pkt_type = PACKET_HOST;
1522 fcllc = (struct fcllc *)skb->data;
1524 /* Strip the SNAP header from ARP packets since we don't
1525 * pass them through to the 802.2/SNAP layers.
1527 if (fcllc->dsap == EXTENDED_SAP &&
1528 (fcllc->ethertype == htons(ETH_P_IP) ||
1529 fcllc->ethertype == htons(ETH_P_ARP))) {
1530 skb_pull(skb, sizeof(struct fcllc));
1531 return fcllc->ethertype;
1534 return htons(ETH_P_802_2);
1537 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/