2 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * These are the routines used by layered drivers, currently just the
35 * layered ethernet driver and verbs layer.
39 #include <linux/pci.h>
40 #include <asm/byteorder.h>
42 #include "ipath_kernel.h"
43 #include "ips_common.h"
44 #include "ipath_layer.h"
46 /* Acquire before ipath_devs_lock. */
47 static DEFINE_MUTEX(ipath_layer_mutex
);
49 u16 ipath_layer_rcv_opcode
;
50 static int (*layer_intr
)(void *, u32
);
51 static int (*layer_rcv
)(void *, void *, struct sk_buff
*);
52 static int (*layer_rcv_lid
)(void *, void *);
53 static int (*verbs_piobufavail
)(void *);
54 static void (*verbs_rcv
)(void *, void *, void *, u32
);
55 int ipath_verbs_registered
;
57 static void *(*layer_add_one
)(int, struct ipath_devdata
*);
58 static void (*layer_remove_one
)(void *);
59 static void *(*verbs_add_one
)(int, struct ipath_devdata
*);
60 static void (*verbs_remove_one
)(void *);
61 static void (*verbs_timer_cb
)(void *);
63 int __ipath_layer_intr(struct ipath_devdata
*dd
, u32 arg
)
67 if (dd
->ipath_layer
.l_arg
&& layer_intr
)
68 ret
= layer_intr(dd
->ipath_layer
.l_arg
, arg
);
73 int ipath_layer_intr(struct ipath_devdata
*dd
, u32 arg
)
77 mutex_lock(&ipath_layer_mutex
);
79 ret
= __ipath_layer_intr(dd
, arg
);
81 mutex_unlock(&ipath_layer_mutex
);
86 int __ipath_layer_rcv(struct ipath_devdata
*dd
, void *hdr
,
91 if (dd
->ipath_layer
.l_arg
&& layer_rcv
)
92 ret
= layer_rcv(dd
->ipath_layer
.l_arg
, hdr
, skb
);
97 int __ipath_layer_rcv_lid(struct ipath_devdata
*dd
, void *hdr
)
101 if (dd
->ipath_layer
.l_arg
&& layer_rcv_lid
)
102 ret
= layer_rcv_lid(dd
->ipath_layer
.l_arg
, hdr
);
107 int __ipath_verbs_piobufavail(struct ipath_devdata
*dd
)
111 if (dd
->verbs_layer
.l_arg
&& verbs_piobufavail
)
112 ret
= verbs_piobufavail(dd
->verbs_layer
.l_arg
);
117 int __ipath_verbs_rcv(struct ipath_devdata
*dd
, void *rc
, void *ebuf
,
122 if (dd
->verbs_layer
.l_arg
&& verbs_rcv
) {
123 verbs_rcv(dd
->verbs_layer
.l_arg
, rc
, ebuf
, tlen
);
130 int ipath_layer_set_linkstate(struct ipath_devdata
*dd
, u8 newstate
)
136 case IPATH_IB_LINKDOWN
:
137 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKINITCMD_POLL
<<
138 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
143 case IPATH_IB_LINKDOWN_SLEEP
:
144 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKINITCMD_SLEEP
<<
145 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
150 case IPATH_IB_LINKDOWN_DISABLE
:
151 ipath_set_ib_lstate(dd
,
152 INFINIPATH_IBCC_LINKINITCMD_DISABLE
<<
153 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
158 case IPATH_IB_LINKINIT
:
159 if (dd
->ipath_flags
& IPATH_LINKINIT
) {
163 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKCMD_INIT
<<
164 INFINIPATH_IBCC_LINKCMD_SHIFT
);
165 lstate
= IPATH_LINKINIT
;
168 case IPATH_IB_LINKARM
:
169 if (dd
->ipath_flags
& IPATH_LINKARMED
) {
173 if (!(dd
->ipath_flags
&
174 (IPATH_LINKINIT
| IPATH_LINKACTIVE
))) {
178 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKCMD_ARMED
<<
179 INFINIPATH_IBCC_LINKCMD_SHIFT
);
181 * Since the port can transition to ACTIVE by receiving
182 * a non VL 15 packet, wait for either state.
184 lstate
= IPATH_LINKARMED
| IPATH_LINKACTIVE
;
187 case IPATH_IB_LINKACTIVE
:
188 if (dd
->ipath_flags
& IPATH_LINKACTIVE
) {
192 if (!(dd
->ipath_flags
& IPATH_LINKARMED
)) {
196 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKCMD_ACTIVE
<<
197 INFINIPATH_IBCC_LINKCMD_SHIFT
);
198 lstate
= IPATH_LINKACTIVE
;
202 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate
);
206 ret
= ipath_wait_linkstate(dd
, lstate
, 2000);
212 EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate
);
215 * ipath_layer_set_mtu - set the MTU
216 * @dd: the infinipath device
219 * we can handle "any" incoming size, the issue here is whether we
220 * need to restrict our outgoing size. For now, we don't do any
221 * sanity checking on this, and we don't deal with what happens to
222 * programs that are already running when the size changes.
223 * NOTE: changing the MTU will usually cause the IBC to go back to
224 * link initialize (IPATH_IBSTATE_INIT) state...
226 int ipath_layer_set_mtu(struct ipath_devdata
*dd
, u16 arg
)
233 * mtu is IB data payload max. It's the largest power of 2 less
234 * than piosize (or even larger, since it only really controls the
235 * largest we can receive; we can send the max of the mtu and
236 * piosize). We check that it's one of the valid IB sizes.
238 if (arg
!= 256 && arg
!= 512 && arg
!= 1024 && arg
!= 2048 &&
240 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg
);
244 if (dd
->ipath_ibmtu
== arg
) {
245 ret
= 0; /* same as current */
249 piosize
= dd
->ipath_ibmaxlen
;
250 dd
->ipath_ibmtu
= arg
;
252 if (arg
>= (piosize
- IPATH_PIO_MAXIBHDR
)) {
253 /* Only if it's not the initial value (or reset to it) */
254 if (piosize
!= dd
->ipath_init_ibmaxlen
) {
255 dd
->ipath_ibmaxlen
= piosize
;
258 } else if ((arg
+ IPATH_PIO_MAXIBHDR
) != dd
->ipath_ibmaxlen
) {
259 piosize
= arg
+ IPATH_PIO_MAXIBHDR
;
260 ipath_cdbg(VERBOSE
, "ibmaxlen was 0x%x, setting to 0x%x "
261 "(mtu 0x%x)\n", dd
->ipath_ibmaxlen
, piosize
,
263 dd
->ipath_ibmaxlen
= piosize
;
269 * set the IBC maxpktlength to the size of our pio
272 u64 ibc
= dd
->ipath_ibcctrl
;
273 ibc
&= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK
<<
274 INFINIPATH_IBCC_MAXPKTLEN_SHIFT
);
276 piosize
= piosize
- 2 * sizeof(u32
); /* ignore pbc */
277 dd
->ipath_ibmaxlen
= piosize
;
278 piosize
/= sizeof(u32
); /* in words */
280 * for ICRC, which we only send in diag test pkt mode, and
281 * we don't need to worry about that for mtu
285 ibc
|= piosize
<< INFINIPATH_IBCC_MAXPKTLEN_SHIFT
;
286 dd
->ipath_ibcctrl
= ibc
;
287 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
289 dd
->ipath_f_tidtemplate(dd
);
298 EXPORT_SYMBOL_GPL(ipath_layer_set_mtu
);
300 int ipath_set_sps_lid(struct ipath_devdata
*dd
, u32 arg
, u8 lmc
)
302 ipath_stats
.sps_lid
[dd
->ipath_unit
] = arg
;
306 mutex_lock(&ipath_layer_mutex
);
308 if (dd
->ipath_layer
.l_arg
&& layer_intr
)
309 layer_intr(dd
->ipath_layer
.l_arg
, IPATH_LAYER_INT_LID
);
311 mutex_unlock(&ipath_layer_mutex
);
316 EXPORT_SYMBOL_GPL(ipath_set_sps_lid
);
318 int ipath_layer_set_guid(struct ipath_devdata
*dd
, __be64 guid
)
320 /* XXX - need to inform anyone who cares this just happened. */
321 dd
->ipath_guid
= guid
;
325 EXPORT_SYMBOL_GPL(ipath_layer_set_guid
);
327 __be64
ipath_layer_get_guid(struct ipath_devdata
*dd
)
329 return dd
->ipath_guid
;
332 EXPORT_SYMBOL_GPL(ipath_layer_get_guid
);
334 u32
ipath_layer_get_nguid(struct ipath_devdata
*dd
)
336 return dd
->ipath_nguid
;
339 EXPORT_SYMBOL_GPL(ipath_layer_get_nguid
);
341 int ipath_layer_query_device(struct ipath_devdata
*dd
, u32
* vendor
,
342 u32
* boardrev
, u32
* majrev
, u32
* minrev
)
344 *vendor
= dd
->ipath_vendorid
;
345 *boardrev
= dd
->ipath_boardrev
;
346 *majrev
= dd
->ipath_majrev
;
347 *minrev
= dd
->ipath_minrev
;
352 EXPORT_SYMBOL_GPL(ipath_layer_query_device
);
354 u32
ipath_layer_get_flags(struct ipath_devdata
*dd
)
356 return dd
->ipath_flags
;
359 EXPORT_SYMBOL_GPL(ipath_layer_get_flags
);
361 struct device
*ipath_layer_get_device(struct ipath_devdata
*dd
)
363 return &dd
->pcidev
->dev
;
366 EXPORT_SYMBOL_GPL(ipath_layer_get_device
);
368 u16
ipath_layer_get_deviceid(struct ipath_devdata
*dd
)
370 return dd
->ipath_deviceid
;
373 EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid
);
375 u64
ipath_layer_get_lastibcstat(struct ipath_devdata
*dd
)
377 return dd
->ipath_lastibcstat
;
380 EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat
);
382 u32
ipath_layer_get_ibmtu(struct ipath_devdata
*dd
)
384 return dd
->ipath_ibmtu
;
387 EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu
);
389 void ipath_layer_add(struct ipath_devdata
*dd
)
391 mutex_lock(&ipath_layer_mutex
);
394 dd
->ipath_layer
.l_arg
=
395 layer_add_one(dd
->ipath_unit
, dd
);
398 dd
->verbs_layer
.l_arg
=
399 verbs_add_one(dd
->ipath_unit
, dd
);
401 mutex_unlock(&ipath_layer_mutex
);
404 void ipath_layer_del(struct ipath_devdata
*dd
)
406 mutex_lock(&ipath_layer_mutex
);
408 if (dd
->ipath_layer
.l_arg
&& layer_remove_one
) {
409 layer_remove_one(dd
->ipath_layer
.l_arg
);
410 dd
->ipath_layer
.l_arg
= NULL
;
413 if (dd
->verbs_layer
.l_arg
&& verbs_remove_one
) {
414 verbs_remove_one(dd
->verbs_layer
.l_arg
);
415 dd
->verbs_layer
.l_arg
= NULL
;
418 mutex_unlock(&ipath_layer_mutex
);
421 int ipath_layer_register(void *(*l_add
)(int, struct ipath_devdata
*),
422 void (*l_remove
)(void *),
423 int (*l_intr
)(void *, u32
),
424 int (*l_rcv
)(void *, void *, struct sk_buff
*),
426 int (*l_rcv_lid
)(void *, void *))
428 struct ipath_devdata
*dd
, *tmp
;
431 mutex_lock(&ipath_layer_mutex
);
433 layer_add_one
= l_add
;
434 layer_remove_one
= l_remove
;
437 layer_rcv_lid
= l_rcv_lid
;
438 ipath_layer_rcv_opcode
= l_rcv_opcode
;
440 spin_lock_irqsave(&ipath_devs_lock
, flags
);
442 list_for_each_entry_safe(dd
, tmp
, &ipath_dev_list
, ipath_list
) {
443 if (!(dd
->ipath_flags
& IPATH_INITTED
))
446 if (dd
->ipath_layer
.l_arg
)
449 if (!(*dd
->ipath_statusp
& IPATH_STATUS_SMA
))
450 *dd
->ipath_statusp
|= IPATH_STATUS_OIB_SMA
;
452 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
453 dd
->ipath_layer
.l_arg
= l_add(dd
->ipath_unit
, dd
);
454 spin_lock_irqsave(&ipath_devs_lock
, flags
);
457 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
458 mutex_unlock(&ipath_layer_mutex
);
463 EXPORT_SYMBOL_GPL(ipath_layer_register
);
465 void ipath_layer_unregister(void)
467 struct ipath_devdata
*dd
, *tmp
;
470 mutex_lock(&ipath_layer_mutex
);
471 spin_lock_irqsave(&ipath_devs_lock
, flags
);
473 list_for_each_entry_safe(dd
, tmp
, &ipath_dev_list
, ipath_list
) {
474 if (dd
->ipath_layer
.l_arg
&& layer_remove_one
) {
475 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
476 layer_remove_one(dd
->ipath_layer
.l_arg
);
477 spin_lock_irqsave(&ipath_devs_lock
, flags
);
478 dd
->ipath_layer
.l_arg
= NULL
;
482 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
484 layer_add_one
= NULL
;
485 layer_remove_one
= NULL
;
488 layer_rcv_lid
= NULL
;
490 mutex_unlock(&ipath_layer_mutex
);
493 EXPORT_SYMBOL_GPL(ipath_layer_unregister
);
495 static void __ipath_verbs_timer(unsigned long arg
)
497 struct ipath_devdata
*dd
= (struct ipath_devdata
*) arg
;
500 * If port 0 receive packet interrupts are not available, or
501 * can be missed, poll the receive queue
503 if (dd
->ipath_flags
& IPATH_POLL_RX_INTR
)
506 /* Handle verbs layer timeouts. */
507 if (dd
->verbs_layer
.l_arg
&& verbs_timer_cb
)
508 verbs_timer_cb(dd
->verbs_layer
.l_arg
);
510 mod_timer(&dd
->verbs_layer
.l_timer
, jiffies
+ 1);
514 * ipath_verbs_register - verbs layer registration
515 * @l_piobufavail: callback for when PIO buffers become available
516 * @l_rcv: callback for receiving a packet
517 * @l_timer_cb: timer callback
518 * @ipath_devdata: device data structure is put here
520 int ipath_verbs_register(void *(*l_add
)(int, struct ipath_devdata
*),
521 void (*l_remove
)(void *arg
),
522 int (*l_piobufavail
) (void *arg
),
523 void (*l_rcv
) (void *arg
, void *rhdr
,
524 void *data
, u32 tlen
),
525 void (*l_timer_cb
) (void *arg
))
527 struct ipath_devdata
*dd
, *tmp
;
530 mutex_lock(&ipath_layer_mutex
);
532 verbs_add_one
= l_add
;
533 verbs_remove_one
= l_remove
;
534 verbs_piobufavail
= l_piobufavail
;
536 verbs_timer_cb
= l_timer_cb
;
538 spin_lock_irqsave(&ipath_devs_lock
, flags
);
540 list_for_each_entry_safe(dd
, tmp
, &ipath_dev_list
, ipath_list
) {
541 if (!(dd
->ipath_flags
& IPATH_INITTED
))
544 if (dd
->verbs_layer
.l_arg
)
547 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
548 dd
->verbs_layer
.l_arg
= l_add(dd
->ipath_unit
, dd
);
549 spin_lock_irqsave(&ipath_devs_lock
, flags
);
552 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
553 mutex_unlock(&ipath_layer_mutex
);
555 ipath_verbs_registered
= 1;
560 EXPORT_SYMBOL_GPL(ipath_verbs_register
);
562 void ipath_verbs_unregister(void)
564 struct ipath_devdata
*dd
, *tmp
;
567 mutex_lock(&ipath_layer_mutex
);
568 spin_lock_irqsave(&ipath_devs_lock
, flags
);
570 list_for_each_entry_safe(dd
, tmp
, &ipath_dev_list
, ipath_list
) {
571 *dd
->ipath_statusp
&= ~IPATH_STATUS_OIB_SMA
;
573 if (dd
->verbs_layer
.l_arg
&& verbs_remove_one
) {
574 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
575 verbs_remove_one(dd
->verbs_layer
.l_arg
);
576 spin_lock_irqsave(&ipath_devs_lock
, flags
);
577 dd
->verbs_layer
.l_arg
= NULL
;
581 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
583 verbs_add_one
= NULL
;
584 verbs_remove_one
= NULL
;
585 verbs_piobufavail
= NULL
;
587 verbs_timer_cb
= NULL
;
589 mutex_unlock(&ipath_layer_mutex
);
592 EXPORT_SYMBOL_GPL(ipath_verbs_unregister
);
594 int ipath_layer_open(struct ipath_devdata
*dd
, u32
* pktmax
)
599 mutex_lock(&ipath_layer_mutex
);
601 if (!dd
->ipath_layer
.l_arg
) {
606 ret
= ipath_setrcvhdrsize(dd
, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE
);
611 *pktmax
= dd
->ipath_ibmaxlen
;
613 if (*dd
->ipath_statusp
& IPATH_STATUS_IB_READY
)
614 intval
|= IPATH_LAYER_INT_IF_UP
;
615 if (ipath_stats
.sps_lid
[dd
->ipath_unit
])
616 intval
|= IPATH_LAYER_INT_LID
;
617 if (ipath_stats
.sps_mlid
[dd
->ipath_unit
])
618 intval
|= IPATH_LAYER_INT_BCAST
;
620 * do this on open, in case low level is already up and
621 * just layered driver was reloaded, etc.
624 layer_intr(dd
->ipath_layer
.l_arg
, intval
);
628 mutex_unlock(&ipath_layer_mutex
);
633 EXPORT_SYMBOL_GPL(ipath_layer_open
);
635 u16
ipath_layer_get_lid(struct ipath_devdata
*dd
)
637 return dd
->ipath_lid
;
640 EXPORT_SYMBOL_GPL(ipath_layer_get_lid
);
643 * ipath_layer_get_mac - get the MAC address
644 * @dd: the infinipath device
645 * @mac: the MAC is put here
647 * This is the EUID-64 OUI octets (top 3), then
648 * skip the next 2 (which should both be zero or 0xff).
649 * The returned MAC is in network order
650 * mac points to at least 6 bytes of buffer
651 * We assume that by the time the LID is set, that the GUID is as valid
652 * as it's ever going to be, rather than adding yet another status bit.
655 int ipath_layer_get_mac(struct ipath_devdata
*dd
, u8
* mac
)
659 guid
= (u8
*) &dd
->ipath_guid
;
667 if ((guid
[3] || guid
[4]) && !(guid
[3] == 0xff && guid
[4] == 0xff))
668 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
669 "%x %x\n", guid
[3], guid
[4]);
673 EXPORT_SYMBOL_GPL(ipath_layer_get_mac
);
675 u16
ipath_layer_get_bcast(struct ipath_devdata
*dd
)
677 return dd
->ipath_mlid
;
680 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast
);
682 u32
ipath_layer_get_cr_errpkey(struct ipath_devdata
*dd
)
684 return ipath_read_creg32(dd
, dd
->ipath_cregs
->cr_errpkey
);
687 EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey
);
689 static void update_sge(struct ipath_sge_state
*ss
, u32 length
)
691 struct ipath_sge
*sge
= &ss
->sge
;
693 sge
->vaddr
+= length
;
694 sge
->length
-= length
;
695 sge
->sge_length
-= length
;
696 if (sge
->sge_length
== 0) {
698 *sge
= *ss
->sg_list
++;
699 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
700 if (++sge
->n
>= IPATH_SEGSZ
) {
701 if (++sge
->m
>= sge
->mr
->mapsz
)
705 sge
->vaddr
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
706 sge
->length
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
710 #ifdef __LITTLE_ENDIAN
711 static inline u32
get_upper_bits(u32 data
, u32 shift
)
713 return data
>> shift
;
716 static inline u32
set_upper_bits(u32 data
, u32 shift
)
718 return data
<< shift
;
721 static inline u32
clear_upper_bytes(u32 data
, u32 n
, u32 off
)
723 data
<<= ((sizeof(u32
) - n
) * BITS_PER_BYTE
);
724 data
>>= ((sizeof(u32
) - n
- off
) * BITS_PER_BYTE
);
728 static inline u32
get_upper_bits(u32 data
, u32 shift
)
730 return data
<< shift
;
733 static inline u32
set_upper_bits(u32 data
, u32 shift
)
735 return data
>> shift
;
738 static inline u32
clear_upper_bytes(u32 data
, u32 n
, u32 off
)
740 data
>>= ((sizeof(u32
) - n
) * BITS_PER_BYTE
);
741 data
<<= ((sizeof(u32
) - n
- off
) * BITS_PER_BYTE
);
746 static void copy_io(u32 __iomem
*piobuf
, struct ipath_sge_state
*ss
,
754 u32 len
= ss
->sge
.length
;
760 if (len
> ss
->sge
.sge_length
)
761 len
= ss
->sge
.sge_length
;
762 /* If the source address is not aligned, try to align it. */
763 off
= (unsigned long)ss
->sge
.vaddr
& (sizeof(u32
) - 1);
765 u32
*addr
= (u32
*)((unsigned long)ss
->sge
.vaddr
&
767 u32 v
= get_upper_bits(*addr
, off
* BITS_PER_BYTE
);
770 y
= sizeof(u32
) - off
;
773 if (len
+ extra
>= sizeof(u32
)) {
774 data
|= set_upper_bits(v
, extra
*
776 len
= sizeof(u32
) - extra
;
781 __raw_writel(data
, piobuf
);
786 /* Clear unused upper bytes */
787 data
|= clear_upper_bytes(v
, len
, extra
);
795 /* Source address is aligned. */
796 u32
*addr
= (u32
*) ss
->sge
.vaddr
;
797 int shift
= extra
* BITS_PER_BYTE
;
798 int ushift
= 32 - shift
;
801 while (l
>= sizeof(u32
)) {
804 data
|= set_upper_bits(v
, shift
);
805 __raw_writel(data
, piobuf
);
806 data
= get_upper_bits(v
, ushift
);
812 * We still have 'extra' number of bytes leftover.
817 if (l
+ extra
>= sizeof(u32
)) {
818 data
|= set_upper_bits(v
, shift
);
819 len
-= l
+ extra
- sizeof(u32
);
824 __raw_writel(data
, piobuf
);
829 /* Clear unused upper bytes */
830 data
|= clear_upper_bytes(v
, l
,
838 } else if (len
== length
) {
842 } else if (len
== length
) {
846 * Need to round up for the last dword in the
850 __iowrite32_copy(piobuf
, ss
->sge
.vaddr
, w
- 1);
852 last
= ((u32
*) ss
->sge
.vaddr
)[w
- 1];
857 __iowrite32_copy(piobuf
, ss
->sge
.vaddr
, w
);
860 extra
= len
& (sizeof(u32
) - 1);
862 u32 v
= ((u32
*) ss
->sge
.vaddr
)[w
];
864 /* Clear unused upper bytes */
865 data
= clear_upper_bytes(v
, extra
, 0);
871 /* must flush early everything before trigger word */
873 __raw_writel(last
, piobuf
);
874 /* be sure trigger word is written */
876 update_sge(ss
, length
);
880 * ipath_verbs_send - send a packet from the verbs layer
881 * @dd: the infinipath device
882 * @hdrwords: the number of works in the header
883 * @hdr: the packet header
884 * @len: the length of the packet in bytes
885 * @ss: the SGE to send
887 * This is like ipath_sma_send_pkt() in that we need to be able to send
888 * packets after the chip is initialized (MADs) but also like
889 * ipath_layer_send_hdr() since its used by the verbs layer.
891 int ipath_verbs_send(struct ipath_devdata
*dd
, u32 hdrwords
,
892 u32
*hdr
, u32 len
, struct ipath_sge_state
*ss
)
898 /* +1 is for the qword padding of pbc */
899 plen
= hdrwords
+ ((len
+ 3) >> 2) + 1;
900 if (unlikely((plen
<< 2) > dd
->ipath_ibmaxlen
)) {
901 ipath_dbg("packet len 0x%x too long, failing\n", plen
);
906 /* Get a PIO buffer to use. */
907 piobuf
= ipath_getpiobuf(dd
, NULL
);
908 if (unlikely(piobuf
== NULL
)) {
914 * Write len to control qword, no flags.
915 * We have to flush after the PBC for correctness on some cpus
916 * or WC buffer can be written out of order.
918 writeq(plen
, piobuf
);
923 * If there is just the header portion, must flush before
924 * writing last word of header for correctness, and after
925 * the last header word (trigger word).
927 __iowrite32_copy(piobuf
, hdr
, hdrwords
- 1);
929 __raw_writel(hdr
[hdrwords
- 1], piobuf
+ hdrwords
- 1);
935 __iowrite32_copy(piobuf
, hdr
, hdrwords
);
938 /* The common case is aligned and contained in one segment. */
939 if (likely(ss
->num_sge
== 1 && len
<= ss
->sge
.length
&&
940 !((unsigned long)ss
->sge
.vaddr
& (sizeof(u32
) - 1)))) {
943 /* Need to round up for the last dword in the packet. */
945 __iowrite32_copy(piobuf
, ss
->sge
.vaddr
, w
- 1);
946 /* must flush early everything before trigger word */
948 __raw_writel(((u32
*) ss
->sge
.vaddr
)[w
- 1],
950 /* be sure trigger word is written */
956 copy_io(piobuf
, ss
, len
);
963 EXPORT_SYMBOL_GPL(ipath_verbs_send
);
965 int ipath_layer_snapshot_counters(struct ipath_devdata
*dd
, u64
*swords
,
966 u64
*rwords
, u64
*spkts
, u64
*rpkts
,
971 if (!(dd
->ipath_flags
& IPATH_INITTED
)) {
972 /* no hardware, freeze, etc. */
973 ipath_dbg("unit %u not usable\n", dd
->ipath_unit
);
977 *swords
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordsendcnt
);
978 *rwords
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordrcvcnt
);
979 *spkts
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktsendcnt
);
980 *rpkts
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktrcvcnt
);
981 *xmit_wait
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_sendstallcnt
);
989 EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters
);
992 * ipath_layer_get_counters - get various chip counters
993 * @dd: the infinipath device
994 * @cntrs: counters are placed here
996 * Return the counters needed by recv_pma_get_portcounters().
998 int ipath_layer_get_counters(struct ipath_devdata
*dd
,
999 struct ipath_layer_counters
*cntrs
)
1003 if (!(dd
->ipath_flags
& IPATH_INITTED
)) {
1004 /* no hardware, freeze, etc. */
1005 ipath_dbg("unit %u not usable\n", dd
->ipath_unit
);
1009 cntrs
->symbol_error_counter
=
1010 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_ibsymbolerrcnt
);
1011 cntrs
->link_error_recovery_counter
=
1012 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_iblinkerrrecovcnt
);
1013 cntrs
->link_downed_counter
=
1014 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_iblinkdowncnt
);
1015 cntrs
->port_rcv_errors
=
1016 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_rxdroppktcnt
) +
1017 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_rcvovflcnt
) +
1018 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_portovflcnt
) +
1019 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_errrcvflowctrlcnt
) +
1020 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_err_rlencnt
) +
1021 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_invalidrlencnt
) +
1022 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_erricrccnt
) +
1023 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_errvcrccnt
) +
1024 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_errlpcrccnt
) +
1025 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_errlinkcnt
) +
1026 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_badformatcnt
);
1027 cntrs
->port_rcv_remphys_errors
=
1028 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_rcvebpcnt
);
1029 cntrs
->port_xmit_discards
=
1030 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_unsupvlcnt
);
1031 cntrs
->port_xmit_data
=
1032 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordsendcnt
);
1033 cntrs
->port_rcv_data
=
1034 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordrcvcnt
);
1035 cntrs
->port_xmit_packets
=
1036 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktsendcnt
);
1037 cntrs
->port_rcv_packets
=
1038 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktrcvcnt
);
1046 EXPORT_SYMBOL_GPL(ipath_layer_get_counters
);
1048 int ipath_layer_want_buffer(struct ipath_devdata
*dd
)
1050 set_bit(IPATH_S_PIOINTBUFAVAIL
, &dd
->ipath_sendctrl
);
1051 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
1052 dd
->ipath_sendctrl
);
1057 EXPORT_SYMBOL_GPL(ipath_layer_want_buffer
);
1059 int ipath_layer_send_hdr(struct ipath_devdata
*dd
, struct ether_header
*hdr
)
1062 u32 __iomem
*piobuf
;
1067 if (!(dd
->ipath_flags
& IPATH_RCVHDRSZ_SET
)) {
1068 ipath_dbg("send while not open\n");
1071 if ((dd
->ipath_flags
& (IPATH_LINKUNK
| IPATH_LINKDOWN
)) ||
1072 dd
->ipath_lid
== 0) {
1074 * lid check is for when sma hasn't yet configured
1077 ipath_cdbg(VERBOSE
, "send while not ready, "
1078 "mylid=%u, flags=0x%x\n",
1079 dd
->ipath_lid
, dd
->ipath_flags
);
1082 vlsllnh
= *((__be16
*) hdr
);
1083 if (vlsllnh
!= htons(IPS_LRH_BTH
)) {
1084 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
1085 "not sending\n", be16_to_cpu(vlsllnh
),
1092 /* Get a PIO buffer to use. */
1093 piobuf
= ipath_getpiobuf(dd
, NULL
);
1094 if (piobuf
== NULL
) {
1099 plen
= (sizeof(*hdr
) >> 2); /* actual length */
1100 ipath_cdbg(EPKT
, "0x%x+1w pio %p\n", plen
, piobuf
);
1102 writeq(plen
+1, piobuf
); /* len (+1 for pad) to pbc, no flags */
1106 count
= plen
-1; /* amount we can copy before trigger word */
1107 __iowrite32_copy(piobuf
, uhdr
, count
);
1109 __raw_writel(uhdr
[count
], piobuf
+ count
);
1110 ipath_flush_wc(); /* ensure it's sent, now */
1112 ipath_stats
.sps_ether_spkts
++; /* ether packet sent */
1118 EXPORT_SYMBOL_GPL(ipath_layer_send_hdr
);
1120 int ipath_layer_set_piointbufavail_int(struct ipath_devdata
*dd
)
1122 set_bit(IPATH_S_PIOINTBUFAVAIL
, &dd
->ipath_sendctrl
);
1124 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
1125 dd
->ipath_sendctrl
);
1129 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int
);
1131 int ipath_layer_enable_timer(struct ipath_devdata
*dd
)
1134 * HT-400 has a design flaw where the chip and kernel idea
1135 * of the tail register don't always agree, and therefore we won't
1136 * get an interrupt on the next packet received.
1137 * If the board supports per packet receive interrupts, use it.
1138 * Otherwise, the timer function periodically checks for packets
1139 * to cover this case.
1140 * Either way, the timer is needed for verbs layer related
1143 if (dd
->ipath_flags
& IPATH_GPIO_INTR
) {
1144 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_debugportselect
,
1145 0x2074076542310ULL
);
1146 /* Enable GPIO bit 2 interrupt */
1147 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_mask
,
1151 init_timer(&dd
->verbs_layer
.l_timer
);
1152 dd
->verbs_layer
.l_timer
.function
= __ipath_verbs_timer
;
1153 dd
->verbs_layer
.l_timer
.data
= (unsigned long)dd
;
1154 dd
->verbs_layer
.l_timer
.expires
= jiffies
+ 1;
1155 add_timer(&dd
->verbs_layer
.l_timer
);
1160 EXPORT_SYMBOL_GPL(ipath_layer_enable_timer
);
1162 int ipath_layer_disable_timer(struct ipath_devdata
*dd
)
1164 /* Disable GPIO bit 2 interrupt */
1165 if (dd
->ipath_flags
& IPATH_GPIO_INTR
)
1166 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_mask
, 0);
1168 del_timer_sync(&dd
->verbs_layer
.l_timer
);
1173 EXPORT_SYMBOL_GPL(ipath_layer_disable_timer
);
1176 * ipath_layer_set_verbs_flags - set the verbs layer flags
1177 * @dd: the infinipath device
1178 * @flags: the flags to set
1180 int ipath_layer_set_verbs_flags(struct ipath_devdata
*dd
, unsigned flags
)
1182 struct ipath_devdata
*ss
;
1183 unsigned long lflags
;
1185 spin_lock_irqsave(&ipath_devs_lock
, lflags
);
1187 list_for_each_entry(ss
, &ipath_dev_list
, ipath_list
) {
1188 if (!(ss
->ipath_flags
& IPATH_INITTED
))
1190 if ((flags
& IPATH_VERBS_KERNEL_SMA
) &&
1191 !(*ss
->ipath_statusp
& IPATH_STATUS_SMA
))
1192 *ss
->ipath_statusp
|= IPATH_STATUS_OIB_SMA
;
1194 *ss
->ipath_statusp
&= ~IPATH_STATUS_OIB_SMA
;
1197 spin_unlock_irqrestore(&ipath_devs_lock
, lflags
);
1202 EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags
);
1205 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1206 * @dd: the infinipath device
1208 unsigned ipath_layer_get_npkeys(struct ipath_devdata
*dd
)
1210 return ARRAY_SIZE(dd
->ipath_pd
[0]->port_pkeys
);
1213 EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys
);
1216 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1217 * @dd: the infinipath device
1218 * @index: the PKEY index
1220 unsigned ipath_layer_get_pkey(struct ipath_devdata
*dd
, unsigned index
)
1224 if (index
>= ARRAY_SIZE(dd
->ipath_pd
[0]->port_pkeys
))
1227 ret
= dd
->ipath_pd
[0]->port_pkeys
[index
];
1232 EXPORT_SYMBOL_GPL(ipath_layer_get_pkey
);
1235 * ipath_layer_get_pkeys - return the PKEY table for port 0
1236 * @dd: the infinipath device
1237 * @pkeys: the pkey table is placed here
1239 int ipath_layer_get_pkeys(struct ipath_devdata
*dd
, u16
* pkeys
)
1241 struct ipath_portdata
*pd
= dd
->ipath_pd
[0];
1243 memcpy(pkeys
, pd
->port_pkeys
, sizeof(pd
->port_pkeys
));
1248 EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys
);
1251 * rm_pkey - decrecment the reference count for the given PKEY
1252 * @dd: the infinipath device
1253 * @key: the PKEY index
1255 * Return true if this was the last reference and the hardware table entry
1256 * needs to be changed.
1258 static int rm_pkey(struct ipath_devdata
*dd
, u16 key
)
1263 for (i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
1264 if (dd
->ipath_pkeys
[i
] != key
)
1266 if (atomic_dec_and_test(&dd
->ipath_pkeyrefs
[i
])) {
1267 dd
->ipath_pkeys
[i
] = 0;
1281 * add_pkey - add the given PKEY to the hardware table
1282 * @dd: the infinipath device
1285 * Return an error code if unable to add the entry, zero if no change,
1286 * or 1 if the hardware PKEY register needs to be updated.
1288 static int add_pkey(struct ipath_devdata
*dd
, u16 key
)
1291 u16 lkey
= key
& 0x7FFF;
1295 if (lkey
== 0x7FFF) {
1300 /* Look for an empty slot or a matching PKEY. */
1301 for (i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
1302 if (!dd
->ipath_pkeys
[i
]) {
1306 /* If it matches exactly, try to increment the ref count */
1307 if (dd
->ipath_pkeys
[i
] == key
) {
1308 if (atomic_inc_return(&dd
->ipath_pkeyrefs
[i
]) > 1) {
1312 /* Lost the race. Look for an empty slot below. */
1313 atomic_dec(&dd
->ipath_pkeyrefs
[i
]);
1317 * It makes no sense to have both the limited and unlimited
1318 * PKEY set at the same time since the unlimited one will
1319 * disable the limited one.
1321 if ((dd
->ipath_pkeys
[i
] & 0x7FFF) == lkey
) {
1330 for (i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
1331 if (!dd
->ipath_pkeys
[i
] &&
1332 atomic_inc_return(&dd
->ipath_pkeyrefs
[i
]) == 1) {
1333 /* for ipathstats, etc. */
1334 ipath_stats
.sps_pkeys
[i
] = lkey
;
1335 dd
->ipath_pkeys
[i
] = key
;
1347 * ipath_layer_set_pkeys - set the PKEY table for port 0
1348 * @dd: the infinipath device
1349 * @pkeys: the PKEY table
1351 int ipath_layer_set_pkeys(struct ipath_devdata
*dd
, u16
* pkeys
)
1353 struct ipath_portdata
*pd
;
1357 pd
= dd
->ipath_pd
[0];
1359 for (i
= 0; i
< ARRAY_SIZE(pd
->port_pkeys
); i
++) {
1361 u16 okey
= pd
->port_pkeys
[i
];
1366 * The value of this PKEY table entry is changing.
1367 * Remove the old entry in the hardware's array of PKEYs.
1370 changed
|= rm_pkey(dd
, okey
);
1372 int ret
= add_pkey(dd
, key
);
1379 pd
->port_pkeys
[i
] = key
;
1384 pkey
= (u64
) dd
->ipath_pkeys
[0] |
1385 ((u64
) dd
->ipath_pkeys
[1] << 16) |
1386 ((u64
) dd
->ipath_pkeys
[2] << 32) |
1387 ((u64
) dd
->ipath_pkeys
[3] << 48);
1388 ipath_cdbg(VERBOSE
, "p0 new pkey reg %llx\n",
1389 (unsigned long long) pkey
);
1390 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_partitionkey
,
1396 EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys
);
1399 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1400 * @dd: the infinipath device
1402 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1404 int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata
*dd
)
1406 return !!(dd
->ipath_ibcctrl
& INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE
);
1409 EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate
);
1412 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1413 * @dd: the infinipath device
1414 * @sleep: the new state
1416 * Note that this will only take effect when the link state changes.
1418 int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata
*dd
,
1422 dd
->ipath_ibcctrl
|= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE
;
1424 dd
->ipath_ibcctrl
&= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE
;
1425 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1430 EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate
);
1432 int ipath_layer_get_phyerrthreshold(struct ipath_devdata
*dd
)
1434 return (dd
->ipath_ibcctrl
>>
1435 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
) &
1436 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
;
1439 EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold
);
1442 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1443 * @dd: the infinipath device
1444 * @n: the new threshold
1446 * Note that this will only take effect when the link state changes.
1448 int ipath_layer_set_phyerrthreshold(struct ipath_devdata
*dd
, unsigned n
)
1452 v
= (dd
->ipath_ibcctrl
>> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
) &
1453 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
;
1455 dd
->ipath_ibcctrl
&=
1456 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
<<
1457 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
);
1458 dd
->ipath_ibcctrl
|=
1459 (u64
) n
<< INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
;
1460 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1466 EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold
);
1468 int ipath_layer_get_overrunthreshold(struct ipath_devdata
*dd
)
1470 return (dd
->ipath_ibcctrl
>>
1471 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
) &
1472 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK
;
1475 EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold
);
1478 * ipath_layer_set_overrunthreshold - set the overrun threshold
1479 * @dd: the infinipath device
1480 * @n: the new threshold
1482 * Note that this will only take effect when the link state changes.
1484 int ipath_layer_set_overrunthreshold(struct ipath_devdata
*dd
, unsigned n
)
1488 v
= (dd
->ipath_ibcctrl
>> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
) &
1489 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK
;
1491 dd
->ipath_ibcctrl
&=
1492 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK
<<
1493 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
);
1494 dd
->ipath_ibcctrl
|=
1495 (u64
) n
<< INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
;
1496 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1502 EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold
);
1504 int ipath_layer_get_boardname(struct ipath_devdata
*dd
, char *name
,
1507 return dd
->ipath_f_get_boardname(dd
, name
, namelen
);
1509 EXPORT_SYMBOL_GPL(ipath_layer_get_boardname
);
1511 u32
ipath_layer_get_rcvhdrentsize(struct ipath_devdata
*dd
)
1513 return dd
->ipath_rcvhdrentsize
;
1515 EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize
);