2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * These are the routines used by layered drivers, currently just the
36 * layered ethernet driver and verbs layer.
40 #include <linux/pci.h>
41 #include <asm/byteorder.h>
43 #include "ipath_kernel.h"
44 #include "ips_common.h"
45 #include "ipath_layer.h"
47 /* Acquire before ipath_devs_lock. */
48 static DEFINE_MUTEX(ipath_layer_mutex
);
50 static int ipath_verbs_registered
;
52 u16 ipath_layer_rcv_opcode
;
54 static int (*layer_intr
)(void *, u32
);
55 static int (*layer_rcv
)(void *, void *, struct sk_buff
*);
56 static int (*layer_rcv_lid
)(void *, void *);
57 static int (*verbs_piobufavail
)(void *);
58 static void (*verbs_rcv
)(void *, void *, void *, u32
);
60 static void *(*layer_add_one
)(int, struct ipath_devdata
*);
61 static void (*layer_remove_one
)(void *);
62 static void *(*verbs_add_one
)(int, struct ipath_devdata
*);
63 static void (*verbs_remove_one
)(void *);
64 static void (*verbs_timer_cb
)(void *);
66 int __ipath_layer_intr(struct ipath_devdata
*dd
, u32 arg
)
70 if (dd
->ipath_layer
.l_arg
&& layer_intr
)
71 ret
= layer_intr(dd
->ipath_layer
.l_arg
, arg
);
76 int ipath_layer_intr(struct ipath_devdata
*dd
, u32 arg
)
80 mutex_lock(&ipath_layer_mutex
);
82 ret
= __ipath_layer_intr(dd
, arg
);
84 mutex_unlock(&ipath_layer_mutex
);
89 int __ipath_layer_rcv(struct ipath_devdata
*dd
, void *hdr
,
94 if (dd
->ipath_layer
.l_arg
&& layer_rcv
)
95 ret
= layer_rcv(dd
->ipath_layer
.l_arg
, hdr
, skb
);
100 int __ipath_layer_rcv_lid(struct ipath_devdata
*dd
, void *hdr
)
104 if (dd
->ipath_layer
.l_arg
&& layer_rcv_lid
)
105 ret
= layer_rcv_lid(dd
->ipath_layer
.l_arg
, hdr
);
110 int __ipath_verbs_piobufavail(struct ipath_devdata
*dd
)
114 if (dd
->verbs_layer
.l_arg
&& verbs_piobufavail
)
115 ret
= verbs_piobufavail(dd
->verbs_layer
.l_arg
);
120 int __ipath_verbs_rcv(struct ipath_devdata
*dd
, void *rc
, void *ebuf
,
125 if (dd
->verbs_layer
.l_arg
&& verbs_rcv
) {
126 verbs_rcv(dd
->verbs_layer
.l_arg
, rc
, ebuf
, tlen
);
133 int ipath_layer_set_linkstate(struct ipath_devdata
*dd
, u8 newstate
)
139 case IPATH_IB_LINKDOWN
:
140 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKINITCMD_POLL
<<
141 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
146 case IPATH_IB_LINKDOWN_SLEEP
:
147 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKINITCMD_SLEEP
<<
148 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
153 case IPATH_IB_LINKDOWN_DISABLE
:
154 ipath_set_ib_lstate(dd
,
155 INFINIPATH_IBCC_LINKINITCMD_DISABLE
<<
156 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
161 case IPATH_IB_LINKINIT
:
162 if (dd
->ipath_flags
& IPATH_LINKINIT
) {
166 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKCMD_INIT
<<
167 INFINIPATH_IBCC_LINKCMD_SHIFT
);
168 lstate
= IPATH_LINKINIT
;
171 case IPATH_IB_LINKARM
:
172 if (dd
->ipath_flags
& IPATH_LINKARMED
) {
176 if (!(dd
->ipath_flags
&
177 (IPATH_LINKINIT
| IPATH_LINKACTIVE
))) {
181 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKCMD_ARMED
<<
182 INFINIPATH_IBCC_LINKCMD_SHIFT
);
184 * Since the port can transition to ACTIVE by receiving
185 * a non VL 15 packet, wait for either state.
187 lstate
= IPATH_LINKARMED
| IPATH_LINKACTIVE
;
190 case IPATH_IB_LINKACTIVE
:
191 if (dd
->ipath_flags
& IPATH_LINKACTIVE
) {
195 if (!(dd
->ipath_flags
& IPATH_LINKARMED
)) {
199 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKCMD_ACTIVE
<<
200 INFINIPATH_IBCC_LINKCMD_SHIFT
);
201 lstate
= IPATH_LINKACTIVE
;
205 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate
);
209 ret
= ipath_wait_linkstate(dd
, lstate
, 2000);
215 EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate
);
218 * ipath_layer_set_mtu - set the MTU
219 * @dd: the infinipath device
222 * we can handle "any" incoming size, the issue here is whether we
223 * need to restrict our outgoing size. For now, we don't do any
224 * sanity checking on this, and we don't deal with what happens to
225 * programs that are already running when the size changes.
226 * NOTE: changing the MTU will usually cause the IBC to go back to
227 * link initialize (IPATH_IBSTATE_INIT) state...
229 int ipath_layer_set_mtu(struct ipath_devdata
*dd
, u16 arg
)
236 * mtu is IB data payload max. It's the largest power of 2 less
237 * than piosize (or even larger, since it only really controls the
238 * largest we can receive; we can send the max of the mtu and
239 * piosize). We check that it's one of the valid IB sizes.
241 if (arg
!= 256 && arg
!= 512 && arg
!= 1024 && arg
!= 2048 &&
243 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg
);
247 if (dd
->ipath_ibmtu
== arg
) {
248 ret
= 0; /* same as current */
252 piosize
= dd
->ipath_ibmaxlen
;
253 dd
->ipath_ibmtu
= arg
;
255 if (arg
>= (piosize
- IPATH_PIO_MAXIBHDR
)) {
256 /* Only if it's not the initial value (or reset to it) */
257 if (piosize
!= dd
->ipath_init_ibmaxlen
) {
258 dd
->ipath_ibmaxlen
= piosize
;
261 } else if ((arg
+ IPATH_PIO_MAXIBHDR
) != dd
->ipath_ibmaxlen
) {
262 piosize
= arg
+ IPATH_PIO_MAXIBHDR
;
263 ipath_cdbg(VERBOSE
, "ibmaxlen was 0x%x, setting to 0x%x "
264 "(mtu 0x%x)\n", dd
->ipath_ibmaxlen
, piosize
,
266 dd
->ipath_ibmaxlen
= piosize
;
272 * set the IBC maxpktlength to the size of our pio
275 u64 ibc
= dd
->ipath_ibcctrl
;
276 ibc
&= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK
<<
277 INFINIPATH_IBCC_MAXPKTLEN_SHIFT
);
279 piosize
= piosize
- 2 * sizeof(u32
); /* ignore pbc */
280 dd
->ipath_ibmaxlen
= piosize
;
281 piosize
/= sizeof(u32
); /* in words */
283 * for ICRC, which we only send in diag test pkt mode, and
284 * we don't need to worry about that for mtu
288 ibc
|= piosize
<< INFINIPATH_IBCC_MAXPKTLEN_SHIFT
;
289 dd
->ipath_ibcctrl
= ibc
;
290 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
292 dd
->ipath_f_tidtemplate(dd
);
301 EXPORT_SYMBOL_GPL(ipath_layer_set_mtu
);
303 int ipath_set_sps_lid(struct ipath_devdata
*dd
, u32 arg
, u8 lmc
)
305 ipath_stats
.sps_lid
[dd
->ipath_unit
] = arg
;
309 mutex_lock(&ipath_layer_mutex
);
311 if (dd
->ipath_layer
.l_arg
&& layer_intr
)
312 layer_intr(dd
->ipath_layer
.l_arg
, IPATH_LAYER_INT_LID
);
314 mutex_unlock(&ipath_layer_mutex
);
319 EXPORT_SYMBOL_GPL(ipath_set_sps_lid
);
321 int ipath_layer_set_guid(struct ipath_devdata
*dd
, __be64 guid
)
323 /* XXX - need to inform anyone who cares this just happened. */
324 dd
->ipath_guid
= guid
;
328 EXPORT_SYMBOL_GPL(ipath_layer_set_guid
);
330 __be64
ipath_layer_get_guid(struct ipath_devdata
*dd
)
332 return dd
->ipath_guid
;
335 EXPORT_SYMBOL_GPL(ipath_layer_get_guid
);
337 u32
ipath_layer_get_nguid(struct ipath_devdata
*dd
)
339 return dd
->ipath_nguid
;
342 EXPORT_SYMBOL_GPL(ipath_layer_get_nguid
);
344 u32
ipath_layer_get_majrev(struct ipath_devdata
*dd
)
346 return dd
->ipath_majrev
;
349 EXPORT_SYMBOL_GPL(ipath_layer_get_majrev
);
351 u32
ipath_layer_get_minrev(struct ipath_devdata
*dd
)
353 return dd
->ipath_minrev
;
356 EXPORT_SYMBOL_GPL(ipath_layer_get_minrev
);
358 u32
ipath_layer_get_pcirev(struct ipath_devdata
*dd
)
360 return dd
->ipath_pcirev
;
363 EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev
);
365 u32
ipath_layer_get_flags(struct ipath_devdata
*dd
)
367 return dd
->ipath_flags
;
370 EXPORT_SYMBOL_GPL(ipath_layer_get_flags
);
372 struct device
*ipath_layer_get_device(struct ipath_devdata
*dd
)
374 return &dd
->pcidev
->dev
;
377 EXPORT_SYMBOL_GPL(ipath_layer_get_device
);
379 u16
ipath_layer_get_deviceid(struct ipath_devdata
*dd
)
381 return dd
->ipath_deviceid
;
384 EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid
);
386 u32
ipath_layer_get_vendorid(struct ipath_devdata
*dd
)
388 return dd
->ipath_vendorid
;
391 EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid
);
393 u64
ipath_layer_get_lastibcstat(struct ipath_devdata
*dd
)
395 return dd
->ipath_lastibcstat
;
398 EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat
);
400 u32
ipath_layer_get_ibmtu(struct ipath_devdata
*dd
)
402 return dd
->ipath_ibmtu
;
405 EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu
);
407 void ipath_layer_add(struct ipath_devdata
*dd
)
409 mutex_lock(&ipath_layer_mutex
);
412 dd
->ipath_layer
.l_arg
=
413 layer_add_one(dd
->ipath_unit
, dd
);
416 dd
->verbs_layer
.l_arg
=
417 verbs_add_one(dd
->ipath_unit
, dd
);
419 mutex_unlock(&ipath_layer_mutex
);
422 void ipath_layer_remove(struct ipath_devdata
*dd
)
424 mutex_lock(&ipath_layer_mutex
);
426 if (dd
->ipath_layer
.l_arg
&& layer_remove_one
) {
427 layer_remove_one(dd
->ipath_layer
.l_arg
);
428 dd
->ipath_layer
.l_arg
= NULL
;
431 if (dd
->verbs_layer
.l_arg
&& verbs_remove_one
) {
432 verbs_remove_one(dd
->verbs_layer
.l_arg
);
433 dd
->verbs_layer
.l_arg
= NULL
;
436 mutex_unlock(&ipath_layer_mutex
);
439 int ipath_layer_register(void *(*l_add
)(int, struct ipath_devdata
*),
440 void (*l_remove
)(void *),
441 int (*l_intr
)(void *, u32
),
442 int (*l_rcv
)(void *, void *, struct sk_buff
*),
444 int (*l_rcv_lid
)(void *, void *))
446 struct ipath_devdata
*dd
, *tmp
;
449 mutex_lock(&ipath_layer_mutex
);
451 layer_add_one
= l_add
;
452 layer_remove_one
= l_remove
;
455 layer_rcv_lid
= l_rcv_lid
;
456 ipath_layer_rcv_opcode
= l_rcv_opcode
;
458 spin_lock_irqsave(&ipath_devs_lock
, flags
);
460 list_for_each_entry_safe(dd
, tmp
, &ipath_dev_list
, ipath_list
) {
461 if (!(dd
->ipath_flags
& IPATH_INITTED
))
464 if (dd
->ipath_layer
.l_arg
)
467 if (!(*dd
->ipath_statusp
& IPATH_STATUS_SMA
))
468 *dd
->ipath_statusp
|= IPATH_STATUS_OIB_SMA
;
470 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
471 dd
->ipath_layer
.l_arg
= l_add(dd
->ipath_unit
, dd
);
472 spin_lock_irqsave(&ipath_devs_lock
, flags
);
475 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
476 mutex_unlock(&ipath_layer_mutex
);
481 EXPORT_SYMBOL_GPL(ipath_layer_register
);
483 void ipath_layer_unregister(void)
485 struct ipath_devdata
*dd
, *tmp
;
488 mutex_lock(&ipath_layer_mutex
);
489 spin_lock_irqsave(&ipath_devs_lock
, flags
);
491 list_for_each_entry_safe(dd
, tmp
, &ipath_dev_list
, ipath_list
) {
492 if (dd
->ipath_layer
.l_arg
&& layer_remove_one
) {
493 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
494 layer_remove_one(dd
->ipath_layer
.l_arg
);
495 spin_lock_irqsave(&ipath_devs_lock
, flags
);
496 dd
->ipath_layer
.l_arg
= NULL
;
500 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
502 layer_add_one
= NULL
;
503 layer_remove_one
= NULL
;
506 layer_rcv_lid
= NULL
;
508 mutex_unlock(&ipath_layer_mutex
);
511 EXPORT_SYMBOL_GPL(ipath_layer_unregister
);
513 static void __ipath_verbs_timer(unsigned long arg
)
515 struct ipath_devdata
*dd
= (struct ipath_devdata
*) arg
;
518 * If port 0 receive packet interrupts are not available, or
519 * can be missed, poll the receive queue
521 if (dd
->ipath_flags
& IPATH_POLL_RX_INTR
)
524 /* Handle verbs layer timeouts. */
525 if (dd
->verbs_layer
.l_arg
&& verbs_timer_cb
)
526 verbs_timer_cb(dd
->verbs_layer
.l_arg
);
528 mod_timer(&dd
->verbs_layer
.l_timer
, jiffies
+ 1);
532 * ipath_verbs_register - verbs layer registration
533 * @l_piobufavail: callback for when PIO buffers become available
534 * @l_rcv: callback for receiving a packet
535 * @l_timer_cb: timer callback
536 * @ipath_devdata: device data structure is put here
538 int ipath_verbs_register(void *(*l_add
)(int, struct ipath_devdata
*),
539 void (*l_remove
)(void *arg
),
540 int (*l_piobufavail
) (void *arg
),
541 void (*l_rcv
) (void *arg
, void *rhdr
,
542 void *data
, u32 tlen
),
543 void (*l_timer_cb
) (void *arg
))
545 struct ipath_devdata
*dd
, *tmp
;
548 mutex_lock(&ipath_layer_mutex
);
550 verbs_add_one
= l_add
;
551 verbs_remove_one
= l_remove
;
552 verbs_piobufavail
= l_piobufavail
;
554 verbs_timer_cb
= l_timer_cb
;
556 spin_lock_irqsave(&ipath_devs_lock
, flags
);
558 list_for_each_entry_safe(dd
, tmp
, &ipath_dev_list
, ipath_list
) {
559 if (!(dd
->ipath_flags
& IPATH_INITTED
))
562 if (dd
->verbs_layer
.l_arg
)
565 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
566 dd
->verbs_layer
.l_arg
= l_add(dd
->ipath_unit
, dd
);
567 spin_lock_irqsave(&ipath_devs_lock
, flags
);
570 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
571 mutex_unlock(&ipath_layer_mutex
);
573 ipath_verbs_registered
= 1;
578 EXPORT_SYMBOL_GPL(ipath_verbs_register
);
580 void ipath_verbs_unregister(void)
582 struct ipath_devdata
*dd
, *tmp
;
585 mutex_lock(&ipath_layer_mutex
);
586 spin_lock_irqsave(&ipath_devs_lock
, flags
);
588 list_for_each_entry_safe(dd
, tmp
, &ipath_dev_list
, ipath_list
) {
589 *dd
->ipath_statusp
&= ~IPATH_STATUS_OIB_SMA
;
591 if (dd
->verbs_layer
.l_arg
&& verbs_remove_one
) {
592 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
593 verbs_remove_one(dd
->verbs_layer
.l_arg
);
594 spin_lock_irqsave(&ipath_devs_lock
, flags
);
595 dd
->verbs_layer
.l_arg
= NULL
;
599 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
601 verbs_add_one
= NULL
;
602 verbs_remove_one
= NULL
;
603 verbs_piobufavail
= NULL
;
605 verbs_timer_cb
= NULL
;
607 ipath_verbs_registered
= 0;
609 mutex_unlock(&ipath_layer_mutex
);
612 EXPORT_SYMBOL_GPL(ipath_verbs_unregister
);
614 int ipath_layer_open(struct ipath_devdata
*dd
, u32
* pktmax
)
619 mutex_lock(&ipath_layer_mutex
);
621 if (!dd
->ipath_layer
.l_arg
) {
626 ret
= ipath_setrcvhdrsize(dd
, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE
);
631 *pktmax
= dd
->ipath_ibmaxlen
;
633 if (*dd
->ipath_statusp
& IPATH_STATUS_IB_READY
)
634 intval
|= IPATH_LAYER_INT_IF_UP
;
635 if (ipath_stats
.sps_lid
[dd
->ipath_unit
])
636 intval
|= IPATH_LAYER_INT_LID
;
637 if (ipath_stats
.sps_mlid
[dd
->ipath_unit
])
638 intval
|= IPATH_LAYER_INT_BCAST
;
640 * do this on open, in case low level is already up and
641 * just layered driver was reloaded, etc.
644 layer_intr(dd
->ipath_layer
.l_arg
, intval
);
648 mutex_unlock(&ipath_layer_mutex
);
653 EXPORT_SYMBOL_GPL(ipath_layer_open
);
655 u16
ipath_layer_get_lid(struct ipath_devdata
*dd
)
657 return dd
->ipath_lid
;
660 EXPORT_SYMBOL_GPL(ipath_layer_get_lid
);
663 * ipath_layer_get_mac - get the MAC address
664 * @dd: the infinipath device
665 * @mac: the MAC is put here
667 * This is the EUID-64 OUI octets (top 3), then
668 * skip the next 2 (which should both be zero or 0xff).
669 * The returned MAC is in network order
670 * mac points to at least 6 bytes of buffer
671 * We assume that by the time the LID is set, that the GUID is as valid
672 * as it's ever going to be, rather than adding yet another status bit.
675 int ipath_layer_get_mac(struct ipath_devdata
*dd
, u8
* mac
)
679 guid
= (u8
*) &dd
->ipath_guid
;
687 if ((guid
[3] || guid
[4]) && !(guid
[3] == 0xff && guid
[4] == 0xff))
688 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
689 "%x %x\n", guid
[3], guid
[4]);
693 EXPORT_SYMBOL_GPL(ipath_layer_get_mac
);
695 u16
ipath_layer_get_bcast(struct ipath_devdata
*dd
)
697 return dd
->ipath_mlid
;
700 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast
);
702 u32
ipath_layer_get_cr_errpkey(struct ipath_devdata
*dd
)
704 return ipath_read_creg32(dd
, dd
->ipath_cregs
->cr_errpkey
);
707 EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey
);
709 static void update_sge(struct ipath_sge_state
*ss
, u32 length
)
711 struct ipath_sge
*sge
= &ss
->sge
;
713 sge
->vaddr
+= length
;
714 sge
->length
-= length
;
715 sge
->sge_length
-= length
;
716 if (sge
->sge_length
== 0) {
718 *sge
= *ss
->sg_list
++;
719 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
720 if (++sge
->n
>= IPATH_SEGSZ
) {
721 if (++sge
->m
>= sge
->mr
->mapsz
)
725 sge
->vaddr
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
726 sge
->length
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
730 #ifdef __LITTLE_ENDIAN
731 static inline u32
get_upper_bits(u32 data
, u32 shift
)
733 return data
>> shift
;
736 static inline u32
set_upper_bits(u32 data
, u32 shift
)
738 return data
<< shift
;
741 static inline u32
clear_upper_bytes(u32 data
, u32 n
, u32 off
)
743 data
<<= ((sizeof(u32
) - n
) * BITS_PER_BYTE
);
744 data
>>= ((sizeof(u32
) - n
- off
) * BITS_PER_BYTE
);
748 static inline u32
get_upper_bits(u32 data
, u32 shift
)
750 return data
<< shift
;
753 static inline u32
set_upper_bits(u32 data
, u32 shift
)
755 return data
>> shift
;
758 static inline u32
clear_upper_bytes(u32 data
, u32 n
, u32 off
)
760 data
>>= ((sizeof(u32
) - n
) * BITS_PER_BYTE
);
761 data
<<= ((sizeof(u32
) - n
- off
) * BITS_PER_BYTE
);
766 static void copy_io(u32 __iomem
*piobuf
, struct ipath_sge_state
*ss
,
774 u32 len
= ss
->sge
.length
;
780 if (len
> ss
->sge
.sge_length
)
781 len
= ss
->sge
.sge_length
;
782 /* If the source address is not aligned, try to align it. */
783 off
= (unsigned long)ss
->sge
.vaddr
& (sizeof(u32
) - 1);
785 u32
*addr
= (u32
*)((unsigned long)ss
->sge
.vaddr
&
787 u32 v
= get_upper_bits(*addr
, off
* BITS_PER_BYTE
);
790 y
= sizeof(u32
) - off
;
793 if (len
+ extra
>= sizeof(u32
)) {
794 data
|= set_upper_bits(v
, extra
*
796 len
= sizeof(u32
) - extra
;
801 __raw_writel(data
, piobuf
);
806 /* Clear unused upper bytes */
807 data
|= clear_upper_bytes(v
, len
, extra
);
815 /* Source address is aligned. */
816 u32
*addr
= (u32
*) ss
->sge
.vaddr
;
817 int shift
= extra
* BITS_PER_BYTE
;
818 int ushift
= 32 - shift
;
821 while (l
>= sizeof(u32
)) {
824 data
|= set_upper_bits(v
, shift
);
825 __raw_writel(data
, piobuf
);
826 data
= get_upper_bits(v
, ushift
);
832 * We still have 'extra' number of bytes leftover.
837 if (l
+ extra
>= sizeof(u32
)) {
838 data
|= set_upper_bits(v
, shift
);
839 len
-= l
+ extra
- sizeof(u32
);
844 __raw_writel(data
, piobuf
);
849 /* Clear unused upper bytes */
850 data
|= clear_upper_bytes(v
, l
,
858 } else if (len
== length
) {
862 } else if (len
== length
) {
866 * Need to round up for the last dword in the
870 __iowrite32_copy(piobuf
, ss
->sge
.vaddr
, w
- 1);
872 last
= ((u32
*) ss
->sge
.vaddr
)[w
- 1];
877 __iowrite32_copy(piobuf
, ss
->sge
.vaddr
, w
);
880 extra
= len
& (sizeof(u32
) - 1);
882 u32 v
= ((u32
*) ss
->sge
.vaddr
)[w
];
884 /* Clear unused upper bytes */
885 data
= clear_upper_bytes(v
, extra
, 0);
891 /* Update address before sending packet. */
892 update_sge(ss
, length
);
893 /* must flush early everything before trigger word */
895 __raw_writel(last
, piobuf
);
896 /* be sure trigger word is written */
901 * ipath_verbs_send - send a packet from the verbs layer
902 * @dd: the infinipath device
903 * @hdrwords: the number of words in the header
904 * @hdr: the packet header
905 * @len: the length of the packet in bytes
906 * @ss: the SGE to send
908 * This is like ipath_sma_send_pkt() in that we need to be able to send
909 * packets after the chip is initialized (MADs) but also like
910 * ipath_layer_send_hdr() since its used by the verbs layer.
912 int ipath_verbs_send(struct ipath_devdata
*dd
, u32 hdrwords
,
913 u32
*hdr
, u32 len
, struct ipath_sge_state
*ss
)
919 /* +1 is for the qword padding of pbc */
920 plen
= hdrwords
+ ((len
+ 3) >> 2) + 1;
921 if (unlikely((plen
<< 2) > dd
->ipath_ibmaxlen
)) {
922 ipath_dbg("packet len 0x%x too long, failing\n", plen
);
927 /* Get a PIO buffer to use. */
928 piobuf
= ipath_getpiobuf(dd
, NULL
);
929 if (unlikely(piobuf
== NULL
)) {
935 * Write len to control qword, no flags.
936 * We have to flush after the PBC for correctness on some cpus
937 * or WC buffer can be written out of order.
939 writeq(plen
, piobuf
);
944 * If there is just the header portion, must flush before
945 * writing last word of header for correctness, and after
946 * the last header word (trigger word).
948 __iowrite32_copy(piobuf
, hdr
, hdrwords
- 1);
950 __raw_writel(hdr
[hdrwords
- 1], piobuf
+ hdrwords
- 1);
956 __iowrite32_copy(piobuf
, hdr
, hdrwords
);
959 /* The common case is aligned and contained in one segment. */
960 if (likely(ss
->num_sge
== 1 && len
<= ss
->sge
.length
&&
961 !((unsigned long)ss
->sge
.vaddr
& (sizeof(u32
) - 1)))) {
963 u32
*addr
= (u32
*) ss
->sge
.vaddr
;
965 /* Update address before sending packet. */
967 /* Need to round up for the last dword in the packet. */
969 __iowrite32_copy(piobuf
, addr
, w
- 1);
970 /* must flush early everything before trigger word */
972 __raw_writel(addr
[w
- 1], piobuf
+ w
- 1);
973 /* be sure trigger word is written */
978 copy_io(piobuf
, ss
, len
);
985 EXPORT_SYMBOL_GPL(ipath_verbs_send
);
987 int ipath_layer_snapshot_counters(struct ipath_devdata
*dd
, u64
*swords
,
988 u64
*rwords
, u64
*spkts
, u64
*rpkts
,
993 if (!(dd
->ipath_flags
& IPATH_INITTED
)) {
994 /* no hardware, freeze, etc. */
995 ipath_dbg("unit %u not usable\n", dd
->ipath_unit
);
999 *swords
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordsendcnt
);
1000 *rwords
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordrcvcnt
);
1001 *spkts
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktsendcnt
);
1002 *rpkts
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktrcvcnt
);
1003 *xmit_wait
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_sendstallcnt
);
1011 EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters
);
1014 * ipath_layer_get_counters - get various chip counters
1015 * @dd: the infinipath device
1016 * @cntrs: counters are placed here
1018 * Return the counters needed by recv_pma_get_portcounters().
1020 int ipath_layer_get_counters(struct ipath_devdata
*dd
,
1021 struct ipath_layer_counters
*cntrs
)
1025 if (!(dd
->ipath_flags
& IPATH_INITTED
)) {
1026 /* no hardware, freeze, etc. */
1027 ipath_dbg("unit %u not usable\n", dd
->ipath_unit
);
1031 cntrs
->symbol_error_counter
=
1032 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_ibsymbolerrcnt
);
1033 cntrs
->link_error_recovery_counter
=
1034 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_iblinkerrrecovcnt
);
1035 cntrs
->link_downed_counter
=
1036 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_iblinkdowncnt
);
1037 cntrs
->port_rcv_errors
=
1038 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_rxdroppktcnt
) +
1039 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_rcvovflcnt
) +
1040 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_portovflcnt
) +
1041 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_errrcvflowctrlcnt
) +
1042 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_err_rlencnt
) +
1043 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_invalidrlencnt
) +
1044 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_erricrccnt
) +
1045 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_errvcrccnt
) +
1046 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_errlpcrccnt
) +
1047 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_errlinkcnt
) +
1048 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_badformatcnt
);
1049 cntrs
->port_rcv_remphys_errors
=
1050 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_rcvebpcnt
);
1051 cntrs
->port_xmit_discards
=
1052 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_unsupvlcnt
);
1053 cntrs
->port_xmit_data
=
1054 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordsendcnt
);
1055 cntrs
->port_rcv_data
=
1056 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordrcvcnt
);
1057 cntrs
->port_xmit_packets
=
1058 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktsendcnt
);
1059 cntrs
->port_rcv_packets
=
1060 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktrcvcnt
);
1068 EXPORT_SYMBOL_GPL(ipath_layer_get_counters
);
1070 int ipath_layer_want_buffer(struct ipath_devdata
*dd
)
1072 set_bit(IPATH_S_PIOINTBUFAVAIL
, &dd
->ipath_sendctrl
);
1073 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
1074 dd
->ipath_sendctrl
);
1079 EXPORT_SYMBOL_GPL(ipath_layer_want_buffer
);
1081 int ipath_layer_send_hdr(struct ipath_devdata
*dd
, struct ether_header
*hdr
)
1084 u32 __iomem
*piobuf
;
1089 if (!(dd
->ipath_flags
& IPATH_RCVHDRSZ_SET
)) {
1090 ipath_dbg("send while not open\n");
1093 if ((dd
->ipath_flags
& (IPATH_LINKUNK
| IPATH_LINKDOWN
)) ||
1094 dd
->ipath_lid
== 0) {
1096 * lid check is for when sma hasn't yet configured
1099 ipath_cdbg(VERBOSE
, "send while not ready, "
1100 "mylid=%u, flags=0x%x\n",
1101 dd
->ipath_lid
, dd
->ipath_flags
);
1104 vlsllnh
= *((__be16
*) hdr
);
1105 if (vlsllnh
!= htons(IPS_LRH_BTH
)) {
1106 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
1107 "not sending\n", be16_to_cpu(vlsllnh
),
1114 /* Get a PIO buffer to use. */
1115 piobuf
= ipath_getpiobuf(dd
, NULL
);
1116 if (piobuf
== NULL
) {
1121 plen
= (sizeof(*hdr
) >> 2); /* actual length */
1122 ipath_cdbg(EPKT
, "0x%x+1w pio %p\n", plen
, piobuf
);
1124 writeq(plen
+1, piobuf
); /* len (+1 for pad) to pbc, no flags */
1128 count
= plen
-1; /* amount we can copy before trigger word */
1129 __iowrite32_copy(piobuf
, uhdr
, count
);
1131 __raw_writel(uhdr
[count
], piobuf
+ count
);
1132 ipath_flush_wc(); /* ensure it's sent, now */
1134 ipath_stats
.sps_ether_spkts
++; /* ether packet sent */
1140 EXPORT_SYMBOL_GPL(ipath_layer_send_hdr
);
1142 int ipath_layer_set_piointbufavail_int(struct ipath_devdata
*dd
)
1144 set_bit(IPATH_S_PIOINTBUFAVAIL
, &dd
->ipath_sendctrl
);
1146 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
1147 dd
->ipath_sendctrl
);
1151 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int
);
1153 int ipath_layer_enable_timer(struct ipath_devdata
*dd
)
1156 * HT-400 has a design flaw where the chip and kernel idea
1157 * of the tail register don't always agree, and therefore we won't
1158 * get an interrupt on the next packet received.
1159 * If the board supports per packet receive interrupts, use it.
1160 * Otherwise, the timer function periodically checks for packets
1161 * to cover this case.
1162 * Either way, the timer is needed for verbs layer related
1165 if (dd
->ipath_flags
& IPATH_GPIO_INTR
) {
1166 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_debugportselect
,
1167 0x2074076542310ULL
);
1168 /* Enable GPIO bit 2 interrupt */
1169 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_mask
,
1173 init_timer(&dd
->verbs_layer
.l_timer
);
1174 dd
->verbs_layer
.l_timer
.function
= __ipath_verbs_timer
;
1175 dd
->verbs_layer
.l_timer
.data
= (unsigned long)dd
;
1176 dd
->verbs_layer
.l_timer
.expires
= jiffies
+ 1;
1177 add_timer(&dd
->verbs_layer
.l_timer
);
1182 EXPORT_SYMBOL_GPL(ipath_layer_enable_timer
);
1184 int ipath_layer_disable_timer(struct ipath_devdata
*dd
)
1186 /* Disable GPIO bit 2 interrupt */
1187 if (dd
->ipath_flags
& IPATH_GPIO_INTR
)
1188 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_mask
, 0);
1190 del_timer_sync(&dd
->verbs_layer
.l_timer
);
1195 EXPORT_SYMBOL_GPL(ipath_layer_disable_timer
);
1198 * ipath_layer_set_verbs_flags - set the verbs layer flags
1199 * @dd: the infinipath device
1200 * @flags: the flags to set
1202 int ipath_layer_set_verbs_flags(struct ipath_devdata
*dd
, unsigned flags
)
1204 struct ipath_devdata
*ss
;
1205 unsigned long lflags
;
1207 spin_lock_irqsave(&ipath_devs_lock
, lflags
);
1209 list_for_each_entry(ss
, &ipath_dev_list
, ipath_list
) {
1210 if (!(ss
->ipath_flags
& IPATH_INITTED
))
1212 if ((flags
& IPATH_VERBS_KERNEL_SMA
) &&
1213 !(*ss
->ipath_statusp
& IPATH_STATUS_SMA
))
1214 *ss
->ipath_statusp
|= IPATH_STATUS_OIB_SMA
;
1216 *ss
->ipath_statusp
&= ~IPATH_STATUS_OIB_SMA
;
1219 spin_unlock_irqrestore(&ipath_devs_lock
, lflags
);
1224 EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags
);
1227 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1228 * @dd: the infinipath device
1230 unsigned ipath_layer_get_npkeys(struct ipath_devdata
*dd
)
1232 return ARRAY_SIZE(dd
->ipath_pd
[0]->port_pkeys
);
1235 EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys
);
1238 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1239 * @dd: the infinipath device
1240 * @index: the PKEY index
1242 unsigned ipath_layer_get_pkey(struct ipath_devdata
*dd
, unsigned index
)
1246 if (index
>= ARRAY_SIZE(dd
->ipath_pd
[0]->port_pkeys
))
1249 ret
= dd
->ipath_pd
[0]->port_pkeys
[index
];
1254 EXPORT_SYMBOL_GPL(ipath_layer_get_pkey
);
1257 * ipath_layer_get_pkeys - return the PKEY table for port 0
1258 * @dd: the infinipath device
1259 * @pkeys: the pkey table is placed here
1261 int ipath_layer_get_pkeys(struct ipath_devdata
*dd
, u16
* pkeys
)
1263 struct ipath_portdata
*pd
= dd
->ipath_pd
[0];
1265 memcpy(pkeys
, pd
->port_pkeys
, sizeof(pd
->port_pkeys
));
1270 EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys
);
1273 * rm_pkey - decrecment the reference count for the given PKEY
1274 * @dd: the infinipath device
1275 * @key: the PKEY index
1277 * Return true if this was the last reference and the hardware table entry
1278 * needs to be changed.
1280 static int rm_pkey(struct ipath_devdata
*dd
, u16 key
)
1285 for (i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
1286 if (dd
->ipath_pkeys
[i
] != key
)
1288 if (atomic_dec_and_test(&dd
->ipath_pkeyrefs
[i
])) {
1289 dd
->ipath_pkeys
[i
] = 0;
1303 * add_pkey - add the given PKEY to the hardware table
1304 * @dd: the infinipath device
1307 * Return an error code if unable to add the entry, zero if no change,
1308 * or 1 if the hardware PKEY register needs to be updated.
1310 static int add_pkey(struct ipath_devdata
*dd
, u16 key
)
1313 u16 lkey
= key
& 0x7FFF;
1317 if (lkey
== 0x7FFF) {
1322 /* Look for an empty slot or a matching PKEY. */
1323 for (i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
1324 if (!dd
->ipath_pkeys
[i
]) {
1328 /* If it matches exactly, try to increment the ref count */
1329 if (dd
->ipath_pkeys
[i
] == key
) {
1330 if (atomic_inc_return(&dd
->ipath_pkeyrefs
[i
]) > 1) {
1334 /* Lost the race. Look for an empty slot below. */
1335 atomic_dec(&dd
->ipath_pkeyrefs
[i
]);
1339 * It makes no sense to have both the limited and unlimited
1340 * PKEY set at the same time since the unlimited one will
1341 * disable the limited one.
1343 if ((dd
->ipath_pkeys
[i
] & 0x7FFF) == lkey
) {
1352 for (i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
1353 if (!dd
->ipath_pkeys
[i
] &&
1354 atomic_inc_return(&dd
->ipath_pkeyrefs
[i
]) == 1) {
1355 /* for ipathstats, etc. */
1356 ipath_stats
.sps_pkeys
[i
] = lkey
;
1357 dd
->ipath_pkeys
[i
] = key
;
1369 * ipath_layer_set_pkeys - set the PKEY table for port 0
1370 * @dd: the infinipath device
1371 * @pkeys: the PKEY table
1373 int ipath_layer_set_pkeys(struct ipath_devdata
*dd
, u16
* pkeys
)
1375 struct ipath_portdata
*pd
;
1379 pd
= dd
->ipath_pd
[0];
1381 for (i
= 0; i
< ARRAY_SIZE(pd
->port_pkeys
); i
++) {
1383 u16 okey
= pd
->port_pkeys
[i
];
1388 * The value of this PKEY table entry is changing.
1389 * Remove the old entry in the hardware's array of PKEYs.
1392 changed
|= rm_pkey(dd
, okey
);
1394 int ret
= add_pkey(dd
, key
);
1401 pd
->port_pkeys
[i
] = key
;
1406 pkey
= (u64
) dd
->ipath_pkeys
[0] |
1407 ((u64
) dd
->ipath_pkeys
[1] << 16) |
1408 ((u64
) dd
->ipath_pkeys
[2] << 32) |
1409 ((u64
) dd
->ipath_pkeys
[3] << 48);
1410 ipath_cdbg(VERBOSE
, "p0 new pkey reg %llx\n",
1411 (unsigned long long) pkey
);
1412 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_partitionkey
,
1418 EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys
);
1421 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1422 * @dd: the infinipath device
1424 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1426 int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata
*dd
)
1428 return !!(dd
->ipath_ibcctrl
& INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE
);
1431 EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate
);
1434 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1435 * @dd: the infinipath device
1436 * @sleep: the new state
1438 * Note that this will only take effect when the link state changes.
1440 int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata
*dd
,
1444 dd
->ipath_ibcctrl
|= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE
;
1446 dd
->ipath_ibcctrl
&= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE
;
1447 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1452 EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate
);
1454 int ipath_layer_get_phyerrthreshold(struct ipath_devdata
*dd
)
1456 return (dd
->ipath_ibcctrl
>>
1457 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
) &
1458 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
;
1461 EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold
);
1464 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1465 * @dd: the infinipath device
1466 * @n: the new threshold
1468 * Note that this will only take effect when the link state changes.
1470 int ipath_layer_set_phyerrthreshold(struct ipath_devdata
*dd
, unsigned n
)
1474 v
= (dd
->ipath_ibcctrl
>> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
) &
1475 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
;
1477 dd
->ipath_ibcctrl
&=
1478 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
<<
1479 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
);
1480 dd
->ipath_ibcctrl
|=
1481 (u64
) n
<< INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
;
1482 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1488 EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold
);
1490 int ipath_layer_get_overrunthreshold(struct ipath_devdata
*dd
)
1492 return (dd
->ipath_ibcctrl
>>
1493 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
) &
1494 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK
;
1497 EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold
);
1500 * ipath_layer_set_overrunthreshold - set the overrun threshold
1501 * @dd: the infinipath device
1502 * @n: the new threshold
1504 * Note that this will only take effect when the link state changes.
1506 int ipath_layer_set_overrunthreshold(struct ipath_devdata
*dd
, unsigned n
)
1510 v
= (dd
->ipath_ibcctrl
>> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
) &
1511 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK
;
1513 dd
->ipath_ibcctrl
&=
1514 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK
<<
1515 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
);
1516 dd
->ipath_ibcctrl
|=
1517 (u64
) n
<< INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
;
1518 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1524 EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold
);
1526 int ipath_layer_get_boardname(struct ipath_devdata
*dd
, char *name
,
1529 return dd
->ipath_f_get_boardname(dd
, name
, namelen
);
1531 EXPORT_SYMBOL_GPL(ipath_layer_get_boardname
);
1533 u32
ipath_layer_get_rcvhdrentsize(struct ipath_devdata
*dd
)
1535 return dd
->ipath_rcvhdrentsize
;
1537 EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize
);