2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * These are the routines used by layered drivers, currently just the
36 * layered ethernet driver and verbs layer.
40 #include <linux/pci.h>
41 #include <asm/byteorder.h>
43 #include "ipath_kernel.h"
44 #include "ips_common.h"
45 #include "ipath_layer.h"
47 /* Acquire before ipath_devs_lock. */
48 static DEFINE_MUTEX(ipath_layer_mutex
);
50 static int ipath_verbs_registered
;
52 u16 ipath_layer_rcv_opcode
;
54 static int (*layer_intr
)(void *, u32
);
55 static int (*layer_rcv
)(void *, void *, struct sk_buff
*);
56 static int (*layer_rcv_lid
)(void *, void *);
57 static int (*verbs_piobufavail
)(void *);
58 static void (*verbs_rcv
)(void *, void *, void *, u32
);
60 static void *(*layer_add_one
)(int, struct ipath_devdata
*);
61 static void (*layer_remove_one
)(void *);
62 static void *(*verbs_add_one
)(int, struct ipath_devdata
*);
63 static void (*verbs_remove_one
)(void *);
64 static void (*verbs_timer_cb
)(void *);
66 int __ipath_layer_intr(struct ipath_devdata
*dd
, u32 arg
)
70 if (dd
->ipath_layer
.l_arg
&& layer_intr
)
71 ret
= layer_intr(dd
->ipath_layer
.l_arg
, arg
);
76 int ipath_layer_intr(struct ipath_devdata
*dd
, u32 arg
)
80 mutex_lock(&ipath_layer_mutex
);
82 ret
= __ipath_layer_intr(dd
, arg
);
84 mutex_unlock(&ipath_layer_mutex
);
89 int __ipath_layer_rcv(struct ipath_devdata
*dd
, void *hdr
,
94 if (dd
->ipath_layer
.l_arg
&& layer_rcv
)
95 ret
= layer_rcv(dd
->ipath_layer
.l_arg
, hdr
, skb
);
100 int __ipath_layer_rcv_lid(struct ipath_devdata
*dd
, void *hdr
)
104 if (dd
->ipath_layer
.l_arg
&& layer_rcv_lid
)
105 ret
= layer_rcv_lid(dd
->ipath_layer
.l_arg
, hdr
);
110 int __ipath_verbs_piobufavail(struct ipath_devdata
*dd
)
114 if (dd
->verbs_layer
.l_arg
&& verbs_piobufavail
)
115 ret
= verbs_piobufavail(dd
->verbs_layer
.l_arg
);
120 int __ipath_verbs_rcv(struct ipath_devdata
*dd
, void *rc
, void *ebuf
,
125 if (dd
->verbs_layer
.l_arg
&& verbs_rcv
) {
126 verbs_rcv(dd
->verbs_layer
.l_arg
, rc
, ebuf
, tlen
);
133 int ipath_layer_set_linkstate(struct ipath_devdata
*dd
, u8 newstate
)
139 case IPATH_IB_LINKDOWN
:
140 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKINITCMD_POLL
<<
141 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
146 case IPATH_IB_LINKDOWN_SLEEP
:
147 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKINITCMD_SLEEP
<<
148 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
153 case IPATH_IB_LINKDOWN_DISABLE
:
154 ipath_set_ib_lstate(dd
,
155 INFINIPATH_IBCC_LINKINITCMD_DISABLE
<<
156 INFINIPATH_IBCC_LINKINITCMD_SHIFT
);
161 case IPATH_IB_LINKINIT
:
162 if (dd
->ipath_flags
& IPATH_LINKINIT
) {
166 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKCMD_INIT
<<
167 INFINIPATH_IBCC_LINKCMD_SHIFT
);
168 lstate
= IPATH_LINKINIT
;
171 case IPATH_IB_LINKARM
:
172 if (dd
->ipath_flags
& IPATH_LINKARMED
) {
176 if (!(dd
->ipath_flags
&
177 (IPATH_LINKINIT
| IPATH_LINKACTIVE
))) {
181 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKCMD_ARMED
<<
182 INFINIPATH_IBCC_LINKCMD_SHIFT
);
184 * Since the port can transition to ACTIVE by receiving
185 * a non VL 15 packet, wait for either state.
187 lstate
= IPATH_LINKARMED
| IPATH_LINKACTIVE
;
190 case IPATH_IB_LINKACTIVE
:
191 if (dd
->ipath_flags
& IPATH_LINKACTIVE
) {
195 if (!(dd
->ipath_flags
& IPATH_LINKARMED
)) {
199 ipath_set_ib_lstate(dd
, INFINIPATH_IBCC_LINKCMD_ACTIVE
<<
200 INFINIPATH_IBCC_LINKCMD_SHIFT
);
201 lstate
= IPATH_LINKACTIVE
;
205 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate
);
209 ret
= ipath_wait_linkstate(dd
, lstate
, 2000);
215 EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate
);
218 * ipath_layer_set_mtu - set the MTU
219 * @dd: the infinipath device
222 * we can handle "any" incoming size, the issue here is whether we
223 * need to restrict our outgoing size. For now, we don't do any
224 * sanity checking on this, and we don't deal with what happens to
225 * programs that are already running when the size changes.
226 * NOTE: changing the MTU will usually cause the IBC to go back to
227 * link initialize (IPATH_IBSTATE_INIT) state...
229 int ipath_layer_set_mtu(struct ipath_devdata
*dd
, u16 arg
)
236 * mtu is IB data payload max. It's the largest power of 2 less
237 * than piosize (or even larger, since it only really controls the
238 * largest we can receive; we can send the max of the mtu and
239 * piosize). We check that it's one of the valid IB sizes.
241 if (arg
!= 256 && arg
!= 512 && arg
!= 1024 && arg
!= 2048 &&
243 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg
);
247 if (dd
->ipath_ibmtu
== arg
) {
248 ret
= 0; /* same as current */
252 piosize
= dd
->ipath_ibmaxlen
;
253 dd
->ipath_ibmtu
= arg
;
255 if (arg
>= (piosize
- IPATH_PIO_MAXIBHDR
)) {
256 /* Only if it's not the initial value (or reset to it) */
257 if (piosize
!= dd
->ipath_init_ibmaxlen
) {
258 dd
->ipath_ibmaxlen
= piosize
;
261 } else if ((arg
+ IPATH_PIO_MAXIBHDR
) != dd
->ipath_ibmaxlen
) {
262 piosize
= arg
+ IPATH_PIO_MAXIBHDR
;
263 ipath_cdbg(VERBOSE
, "ibmaxlen was 0x%x, setting to 0x%x "
264 "(mtu 0x%x)\n", dd
->ipath_ibmaxlen
, piosize
,
266 dd
->ipath_ibmaxlen
= piosize
;
272 * set the IBC maxpktlength to the size of our pio
275 u64 ibc
= dd
->ipath_ibcctrl
;
276 ibc
&= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK
<<
277 INFINIPATH_IBCC_MAXPKTLEN_SHIFT
);
279 piosize
= piosize
- 2 * sizeof(u32
); /* ignore pbc */
280 dd
->ipath_ibmaxlen
= piosize
;
281 piosize
/= sizeof(u32
); /* in words */
283 * for ICRC, which we only send in diag test pkt mode, and
284 * we don't need to worry about that for mtu
288 ibc
|= piosize
<< INFINIPATH_IBCC_MAXPKTLEN_SHIFT
;
289 dd
->ipath_ibcctrl
= ibc
;
290 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
292 dd
->ipath_f_tidtemplate(dd
);
301 EXPORT_SYMBOL_GPL(ipath_layer_set_mtu
);
303 int ipath_set_sps_lid(struct ipath_devdata
*dd
, u32 arg
, u8 lmc
)
305 ipath_stats
.sps_lid
[dd
->ipath_unit
] = arg
;
309 mutex_lock(&ipath_layer_mutex
);
311 if (dd
->ipath_layer
.l_arg
&& layer_intr
)
312 layer_intr(dd
->ipath_layer
.l_arg
, IPATH_LAYER_INT_LID
);
314 mutex_unlock(&ipath_layer_mutex
);
319 EXPORT_SYMBOL_GPL(ipath_set_sps_lid
);
321 int ipath_layer_set_guid(struct ipath_devdata
*dd
, __be64 guid
)
323 /* XXX - need to inform anyone who cares this just happened. */
324 dd
->ipath_guid
= guid
;
328 EXPORT_SYMBOL_GPL(ipath_layer_set_guid
);
330 __be64
ipath_layer_get_guid(struct ipath_devdata
*dd
)
332 return dd
->ipath_guid
;
335 EXPORT_SYMBOL_GPL(ipath_layer_get_guid
);
337 u32
ipath_layer_get_nguid(struct ipath_devdata
*dd
)
339 return dd
->ipath_nguid
;
342 EXPORT_SYMBOL_GPL(ipath_layer_get_nguid
);
344 int ipath_layer_query_device(struct ipath_devdata
*dd
, u32
* vendor
,
345 u32
* boardrev
, u32
* majrev
, u32
* minrev
)
347 *vendor
= dd
->ipath_vendorid
;
348 *boardrev
= dd
->ipath_boardrev
;
349 *majrev
= dd
->ipath_majrev
;
350 *minrev
= dd
->ipath_minrev
;
355 EXPORT_SYMBOL_GPL(ipath_layer_query_device
);
357 u32
ipath_layer_get_flags(struct ipath_devdata
*dd
)
359 return dd
->ipath_flags
;
362 EXPORT_SYMBOL_GPL(ipath_layer_get_flags
);
364 struct device
*ipath_layer_get_device(struct ipath_devdata
*dd
)
366 return &dd
->pcidev
->dev
;
369 EXPORT_SYMBOL_GPL(ipath_layer_get_device
);
371 u16
ipath_layer_get_deviceid(struct ipath_devdata
*dd
)
373 return dd
->ipath_deviceid
;
376 EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid
);
378 u64
ipath_layer_get_lastibcstat(struct ipath_devdata
*dd
)
380 return dd
->ipath_lastibcstat
;
383 EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat
);
385 u32
ipath_layer_get_ibmtu(struct ipath_devdata
*dd
)
387 return dd
->ipath_ibmtu
;
390 EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu
);
392 void ipath_layer_add(struct ipath_devdata
*dd
)
394 mutex_lock(&ipath_layer_mutex
);
397 dd
->ipath_layer
.l_arg
=
398 layer_add_one(dd
->ipath_unit
, dd
);
401 dd
->verbs_layer
.l_arg
=
402 verbs_add_one(dd
->ipath_unit
, dd
);
404 mutex_unlock(&ipath_layer_mutex
);
407 void ipath_layer_del(struct ipath_devdata
*dd
)
409 mutex_lock(&ipath_layer_mutex
);
411 if (dd
->ipath_layer
.l_arg
&& layer_remove_one
) {
412 layer_remove_one(dd
->ipath_layer
.l_arg
);
413 dd
->ipath_layer
.l_arg
= NULL
;
416 if (dd
->verbs_layer
.l_arg
&& verbs_remove_one
) {
417 verbs_remove_one(dd
->verbs_layer
.l_arg
);
418 dd
->verbs_layer
.l_arg
= NULL
;
421 mutex_unlock(&ipath_layer_mutex
);
424 int ipath_layer_register(void *(*l_add
)(int, struct ipath_devdata
*),
425 void (*l_remove
)(void *),
426 int (*l_intr
)(void *, u32
),
427 int (*l_rcv
)(void *, void *, struct sk_buff
*),
429 int (*l_rcv_lid
)(void *, void *))
431 struct ipath_devdata
*dd
, *tmp
;
434 mutex_lock(&ipath_layer_mutex
);
436 layer_add_one
= l_add
;
437 layer_remove_one
= l_remove
;
440 layer_rcv_lid
= l_rcv_lid
;
441 ipath_layer_rcv_opcode
= l_rcv_opcode
;
443 spin_lock_irqsave(&ipath_devs_lock
, flags
);
445 list_for_each_entry_safe(dd
, tmp
, &ipath_dev_list
, ipath_list
) {
446 if (!(dd
->ipath_flags
& IPATH_INITTED
))
449 if (dd
->ipath_layer
.l_arg
)
452 if (!(*dd
->ipath_statusp
& IPATH_STATUS_SMA
))
453 *dd
->ipath_statusp
|= IPATH_STATUS_OIB_SMA
;
455 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
456 dd
->ipath_layer
.l_arg
= l_add(dd
->ipath_unit
, dd
);
457 spin_lock_irqsave(&ipath_devs_lock
, flags
);
460 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
461 mutex_unlock(&ipath_layer_mutex
);
466 EXPORT_SYMBOL_GPL(ipath_layer_register
);
468 void ipath_layer_unregister(void)
470 struct ipath_devdata
*dd
, *tmp
;
473 mutex_lock(&ipath_layer_mutex
);
474 spin_lock_irqsave(&ipath_devs_lock
, flags
);
476 list_for_each_entry_safe(dd
, tmp
, &ipath_dev_list
, ipath_list
) {
477 if (dd
->ipath_layer
.l_arg
&& layer_remove_one
) {
478 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
479 layer_remove_one(dd
->ipath_layer
.l_arg
);
480 spin_lock_irqsave(&ipath_devs_lock
, flags
);
481 dd
->ipath_layer
.l_arg
= NULL
;
485 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
487 layer_add_one
= NULL
;
488 layer_remove_one
= NULL
;
491 layer_rcv_lid
= NULL
;
493 mutex_unlock(&ipath_layer_mutex
);
496 EXPORT_SYMBOL_GPL(ipath_layer_unregister
);
498 static void __ipath_verbs_timer(unsigned long arg
)
500 struct ipath_devdata
*dd
= (struct ipath_devdata
*) arg
;
503 * If port 0 receive packet interrupts are not available, or
504 * can be missed, poll the receive queue
506 if (dd
->ipath_flags
& IPATH_POLL_RX_INTR
)
509 /* Handle verbs layer timeouts. */
510 if (dd
->verbs_layer
.l_arg
&& verbs_timer_cb
)
511 verbs_timer_cb(dd
->verbs_layer
.l_arg
);
513 mod_timer(&dd
->verbs_layer
.l_timer
, jiffies
+ 1);
517 * ipath_verbs_register - verbs layer registration
518 * @l_piobufavail: callback for when PIO buffers become available
519 * @l_rcv: callback for receiving a packet
520 * @l_timer_cb: timer callback
521 * @ipath_devdata: device data structure is put here
523 int ipath_verbs_register(void *(*l_add
)(int, struct ipath_devdata
*),
524 void (*l_remove
)(void *arg
),
525 int (*l_piobufavail
) (void *arg
),
526 void (*l_rcv
) (void *arg
, void *rhdr
,
527 void *data
, u32 tlen
),
528 void (*l_timer_cb
) (void *arg
))
530 struct ipath_devdata
*dd
, *tmp
;
533 mutex_lock(&ipath_layer_mutex
);
535 verbs_add_one
= l_add
;
536 verbs_remove_one
= l_remove
;
537 verbs_piobufavail
= l_piobufavail
;
539 verbs_timer_cb
= l_timer_cb
;
541 spin_lock_irqsave(&ipath_devs_lock
, flags
);
543 list_for_each_entry_safe(dd
, tmp
, &ipath_dev_list
, ipath_list
) {
544 if (!(dd
->ipath_flags
& IPATH_INITTED
))
547 if (dd
->verbs_layer
.l_arg
)
550 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
551 dd
->verbs_layer
.l_arg
= l_add(dd
->ipath_unit
, dd
);
552 spin_lock_irqsave(&ipath_devs_lock
, flags
);
555 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
556 mutex_unlock(&ipath_layer_mutex
);
558 ipath_verbs_registered
= 1;
563 EXPORT_SYMBOL_GPL(ipath_verbs_register
);
565 void ipath_verbs_unregister(void)
567 struct ipath_devdata
*dd
, *tmp
;
570 mutex_lock(&ipath_layer_mutex
);
571 spin_lock_irqsave(&ipath_devs_lock
, flags
);
573 list_for_each_entry_safe(dd
, tmp
, &ipath_dev_list
, ipath_list
) {
574 *dd
->ipath_statusp
&= ~IPATH_STATUS_OIB_SMA
;
576 if (dd
->verbs_layer
.l_arg
&& verbs_remove_one
) {
577 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
578 verbs_remove_one(dd
->verbs_layer
.l_arg
);
579 spin_lock_irqsave(&ipath_devs_lock
, flags
);
580 dd
->verbs_layer
.l_arg
= NULL
;
584 spin_unlock_irqrestore(&ipath_devs_lock
, flags
);
586 verbs_add_one
= NULL
;
587 verbs_remove_one
= NULL
;
588 verbs_piobufavail
= NULL
;
590 verbs_timer_cb
= NULL
;
592 ipath_verbs_registered
= 0;
594 mutex_unlock(&ipath_layer_mutex
);
597 EXPORT_SYMBOL_GPL(ipath_verbs_unregister
);
599 int ipath_layer_open(struct ipath_devdata
*dd
, u32
* pktmax
)
604 mutex_lock(&ipath_layer_mutex
);
606 if (!dd
->ipath_layer
.l_arg
) {
611 ret
= ipath_setrcvhdrsize(dd
, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE
);
616 *pktmax
= dd
->ipath_ibmaxlen
;
618 if (*dd
->ipath_statusp
& IPATH_STATUS_IB_READY
)
619 intval
|= IPATH_LAYER_INT_IF_UP
;
620 if (ipath_stats
.sps_lid
[dd
->ipath_unit
])
621 intval
|= IPATH_LAYER_INT_LID
;
622 if (ipath_stats
.sps_mlid
[dd
->ipath_unit
])
623 intval
|= IPATH_LAYER_INT_BCAST
;
625 * do this on open, in case low level is already up and
626 * just layered driver was reloaded, etc.
629 layer_intr(dd
->ipath_layer
.l_arg
, intval
);
633 mutex_unlock(&ipath_layer_mutex
);
638 EXPORT_SYMBOL_GPL(ipath_layer_open
);
640 u16
ipath_layer_get_lid(struct ipath_devdata
*dd
)
642 return dd
->ipath_lid
;
645 EXPORT_SYMBOL_GPL(ipath_layer_get_lid
);
648 * ipath_layer_get_mac - get the MAC address
649 * @dd: the infinipath device
650 * @mac: the MAC is put here
652 * This is the EUID-64 OUI octets (top 3), then
653 * skip the next 2 (which should both be zero or 0xff).
654 * The returned MAC is in network order
655 * mac points to at least 6 bytes of buffer
656 * We assume that by the time the LID is set, that the GUID is as valid
657 * as it's ever going to be, rather than adding yet another status bit.
660 int ipath_layer_get_mac(struct ipath_devdata
*dd
, u8
* mac
)
664 guid
= (u8
*) &dd
->ipath_guid
;
672 if ((guid
[3] || guid
[4]) && !(guid
[3] == 0xff && guid
[4] == 0xff))
673 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
674 "%x %x\n", guid
[3], guid
[4]);
678 EXPORT_SYMBOL_GPL(ipath_layer_get_mac
);
680 u16
ipath_layer_get_bcast(struct ipath_devdata
*dd
)
682 return dd
->ipath_mlid
;
685 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast
);
687 u32
ipath_layer_get_cr_errpkey(struct ipath_devdata
*dd
)
689 return ipath_read_creg32(dd
, dd
->ipath_cregs
->cr_errpkey
);
692 EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey
);
694 static void update_sge(struct ipath_sge_state
*ss
, u32 length
)
696 struct ipath_sge
*sge
= &ss
->sge
;
698 sge
->vaddr
+= length
;
699 sge
->length
-= length
;
700 sge
->sge_length
-= length
;
701 if (sge
->sge_length
== 0) {
703 *sge
= *ss
->sg_list
++;
704 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
705 if (++sge
->n
>= IPATH_SEGSZ
) {
706 if (++sge
->m
>= sge
->mr
->mapsz
)
710 sge
->vaddr
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
711 sge
->length
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
715 #ifdef __LITTLE_ENDIAN
716 static inline u32
get_upper_bits(u32 data
, u32 shift
)
718 return data
>> shift
;
721 static inline u32
set_upper_bits(u32 data
, u32 shift
)
723 return data
<< shift
;
726 static inline u32
clear_upper_bytes(u32 data
, u32 n
, u32 off
)
728 data
<<= ((sizeof(u32
) - n
) * BITS_PER_BYTE
);
729 data
>>= ((sizeof(u32
) - n
- off
) * BITS_PER_BYTE
);
733 static inline u32
get_upper_bits(u32 data
, u32 shift
)
735 return data
<< shift
;
738 static inline u32
set_upper_bits(u32 data
, u32 shift
)
740 return data
>> shift
;
743 static inline u32
clear_upper_bytes(u32 data
, u32 n
, u32 off
)
745 data
>>= ((sizeof(u32
) - n
) * BITS_PER_BYTE
);
746 data
<<= ((sizeof(u32
) - n
- off
) * BITS_PER_BYTE
);
751 static void copy_io(u32 __iomem
*piobuf
, struct ipath_sge_state
*ss
,
759 u32 len
= ss
->sge
.length
;
765 if (len
> ss
->sge
.sge_length
)
766 len
= ss
->sge
.sge_length
;
767 /* If the source address is not aligned, try to align it. */
768 off
= (unsigned long)ss
->sge
.vaddr
& (sizeof(u32
) - 1);
770 u32
*addr
= (u32
*)((unsigned long)ss
->sge
.vaddr
&
772 u32 v
= get_upper_bits(*addr
, off
* BITS_PER_BYTE
);
775 y
= sizeof(u32
) - off
;
778 if (len
+ extra
>= sizeof(u32
)) {
779 data
|= set_upper_bits(v
, extra
*
781 len
= sizeof(u32
) - extra
;
786 __raw_writel(data
, piobuf
);
791 /* Clear unused upper bytes */
792 data
|= clear_upper_bytes(v
, len
, extra
);
800 /* Source address is aligned. */
801 u32
*addr
= (u32
*) ss
->sge
.vaddr
;
802 int shift
= extra
* BITS_PER_BYTE
;
803 int ushift
= 32 - shift
;
806 while (l
>= sizeof(u32
)) {
809 data
|= set_upper_bits(v
, shift
);
810 __raw_writel(data
, piobuf
);
811 data
= get_upper_bits(v
, ushift
);
817 * We still have 'extra' number of bytes leftover.
822 if (l
+ extra
>= sizeof(u32
)) {
823 data
|= set_upper_bits(v
, shift
);
824 len
-= l
+ extra
- sizeof(u32
);
829 __raw_writel(data
, piobuf
);
834 /* Clear unused upper bytes */
835 data
|= clear_upper_bytes(v
, l
,
843 } else if (len
== length
) {
847 } else if (len
== length
) {
851 * Need to round up for the last dword in the
855 __iowrite32_copy(piobuf
, ss
->sge
.vaddr
, w
- 1);
857 last
= ((u32
*) ss
->sge
.vaddr
)[w
- 1];
862 __iowrite32_copy(piobuf
, ss
->sge
.vaddr
, w
);
865 extra
= len
& (sizeof(u32
) - 1);
867 u32 v
= ((u32
*) ss
->sge
.vaddr
)[w
];
869 /* Clear unused upper bytes */
870 data
= clear_upper_bytes(v
, extra
, 0);
876 /* Update address before sending packet. */
877 update_sge(ss
, length
);
878 /* must flush early everything before trigger word */
880 __raw_writel(last
, piobuf
);
881 /* be sure trigger word is written */
886 * ipath_verbs_send - send a packet from the verbs layer
887 * @dd: the infinipath device
888 * @hdrwords: the number of works in the header
889 * @hdr: the packet header
890 * @len: the length of the packet in bytes
891 * @ss: the SGE to send
893 * This is like ipath_sma_send_pkt() in that we need to be able to send
894 * packets after the chip is initialized (MADs) but also like
895 * ipath_layer_send_hdr() since its used by the verbs layer.
897 int ipath_verbs_send(struct ipath_devdata
*dd
, u32 hdrwords
,
898 u32
*hdr
, u32 len
, struct ipath_sge_state
*ss
)
904 /* +1 is for the qword padding of pbc */
905 plen
= hdrwords
+ ((len
+ 3) >> 2) + 1;
906 if (unlikely((plen
<< 2) > dd
->ipath_ibmaxlen
)) {
907 ipath_dbg("packet len 0x%x too long, failing\n", plen
);
912 /* Get a PIO buffer to use. */
913 piobuf
= ipath_getpiobuf(dd
, NULL
);
914 if (unlikely(piobuf
== NULL
)) {
920 * Write len to control qword, no flags.
921 * We have to flush after the PBC for correctness on some cpus
922 * or WC buffer can be written out of order.
924 writeq(plen
, piobuf
);
929 * If there is just the header portion, must flush before
930 * writing last word of header for correctness, and after
931 * the last header word (trigger word).
933 __iowrite32_copy(piobuf
, hdr
, hdrwords
- 1);
935 __raw_writel(hdr
[hdrwords
- 1], piobuf
+ hdrwords
- 1);
941 __iowrite32_copy(piobuf
, hdr
, hdrwords
);
944 /* The common case is aligned and contained in one segment. */
945 if (likely(ss
->num_sge
== 1 && len
<= ss
->sge
.length
&&
946 !((unsigned long)ss
->sge
.vaddr
& (sizeof(u32
) - 1)))) {
948 u32
*addr
= (u32
*) ss
->sge
.vaddr
;
950 /* Update address before sending packet. */
952 /* Need to round up for the last dword in the packet. */
954 __iowrite32_copy(piobuf
, addr
, w
- 1);
955 /* must flush early everything before trigger word */
957 __raw_writel(addr
[w
- 1], piobuf
+ w
- 1);
958 /* be sure trigger word is written */
963 copy_io(piobuf
, ss
, len
);
970 EXPORT_SYMBOL_GPL(ipath_verbs_send
);
972 int ipath_layer_snapshot_counters(struct ipath_devdata
*dd
, u64
*swords
,
973 u64
*rwords
, u64
*spkts
, u64
*rpkts
,
978 if (!(dd
->ipath_flags
& IPATH_INITTED
)) {
979 /* no hardware, freeze, etc. */
980 ipath_dbg("unit %u not usable\n", dd
->ipath_unit
);
984 *swords
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordsendcnt
);
985 *rwords
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordrcvcnt
);
986 *spkts
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktsendcnt
);
987 *rpkts
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktrcvcnt
);
988 *xmit_wait
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_sendstallcnt
);
996 EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters
);
999 * ipath_layer_get_counters - get various chip counters
1000 * @dd: the infinipath device
1001 * @cntrs: counters are placed here
1003 * Return the counters needed by recv_pma_get_portcounters().
1005 int ipath_layer_get_counters(struct ipath_devdata
*dd
,
1006 struct ipath_layer_counters
*cntrs
)
1010 if (!(dd
->ipath_flags
& IPATH_INITTED
)) {
1011 /* no hardware, freeze, etc. */
1012 ipath_dbg("unit %u not usable\n", dd
->ipath_unit
);
1016 cntrs
->symbol_error_counter
=
1017 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_ibsymbolerrcnt
);
1018 cntrs
->link_error_recovery_counter
=
1019 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_iblinkerrrecovcnt
);
1020 cntrs
->link_downed_counter
=
1021 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_iblinkdowncnt
);
1022 cntrs
->port_rcv_errors
=
1023 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_rxdroppktcnt
) +
1024 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_rcvovflcnt
) +
1025 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_portovflcnt
) +
1026 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_errrcvflowctrlcnt
) +
1027 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_err_rlencnt
) +
1028 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_invalidrlencnt
) +
1029 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_erricrccnt
) +
1030 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_errvcrccnt
) +
1031 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_errlpcrccnt
) +
1032 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_errlinkcnt
) +
1033 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_badformatcnt
);
1034 cntrs
->port_rcv_remphys_errors
=
1035 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_rcvebpcnt
);
1036 cntrs
->port_xmit_discards
=
1037 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_unsupvlcnt
);
1038 cntrs
->port_xmit_data
=
1039 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordsendcnt
);
1040 cntrs
->port_rcv_data
=
1041 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordrcvcnt
);
1042 cntrs
->port_xmit_packets
=
1043 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktsendcnt
);
1044 cntrs
->port_rcv_packets
=
1045 ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktrcvcnt
);
1053 EXPORT_SYMBOL_GPL(ipath_layer_get_counters
);
1055 int ipath_layer_want_buffer(struct ipath_devdata
*dd
)
1057 set_bit(IPATH_S_PIOINTBUFAVAIL
, &dd
->ipath_sendctrl
);
1058 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
1059 dd
->ipath_sendctrl
);
1064 EXPORT_SYMBOL_GPL(ipath_layer_want_buffer
);
1066 int ipath_layer_send_hdr(struct ipath_devdata
*dd
, struct ether_header
*hdr
)
1069 u32 __iomem
*piobuf
;
1074 if (!(dd
->ipath_flags
& IPATH_RCVHDRSZ_SET
)) {
1075 ipath_dbg("send while not open\n");
1078 if ((dd
->ipath_flags
& (IPATH_LINKUNK
| IPATH_LINKDOWN
)) ||
1079 dd
->ipath_lid
== 0) {
1081 * lid check is for when sma hasn't yet configured
1084 ipath_cdbg(VERBOSE
, "send while not ready, "
1085 "mylid=%u, flags=0x%x\n",
1086 dd
->ipath_lid
, dd
->ipath_flags
);
1089 vlsllnh
= *((__be16
*) hdr
);
1090 if (vlsllnh
!= htons(IPS_LRH_BTH
)) {
1091 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
1092 "not sending\n", be16_to_cpu(vlsllnh
),
1099 /* Get a PIO buffer to use. */
1100 piobuf
= ipath_getpiobuf(dd
, NULL
);
1101 if (piobuf
== NULL
) {
1106 plen
= (sizeof(*hdr
) >> 2); /* actual length */
1107 ipath_cdbg(EPKT
, "0x%x+1w pio %p\n", plen
, piobuf
);
1109 writeq(plen
+1, piobuf
); /* len (+1 for pad) to pbc, no flags */
1113 count
= plen
-1; /* amount we can copy before trigger word */
1114 __iowrite32_copy(piobuf
, uhdr
, count
);
1116 __raw_writel(uhdr
[count
], piobuf
+ count
);
1117 ipath_flush_wc(); /* ensure it's sent, now */
1119 ipath_stats
.sps_ether_spkts
++; /* ether packet sent */
1125 EXPORT_SYMBOL_GPL(ipath_layer_send_hdr
);
1127 int ipath_layer_set_piointbufavail_int(struct ipath_devdata
*dd
)
1129 set_bit(IPATH_S_PIOINTBUFAVAIL
, &dd
->ipath_sendctrl
);
1131 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
1132 dd
->ipath_sendctrl
);
1136 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int
);
1138 int ipath_layer_enable_timer(struct ipath_devdata
*dd
)
1141 * HT-400 has a design flaw where the chip and kernel idea
1142 * of the tail register don't always agree, and therefore we won't
1143 * get an interrupt on the next packet received.
1144 * If the board supports per packet receive interrupts, use it.
1145 * Otherwise, the timer function periodically checks for packets
1146 * to cover this case.
1147 * Either way, the timer is needed for verbs layer related
1150 if (dd
->ipath_flags
& IPATH_GPIO_INTR
) {
1151 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_debugportselect
,
1152 0x2074076542310ULL
);
1153 /* Enable GPIO bit 2 interrupt */
1154 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_mask
,
1158 init_timer(&dd
->verbs_layer
.l_timer
);
1159 dd
->verbs_layer
.l_timer
.function
= __ipath_verbs_timer
;
1160 dd
->verbs_layer
.l_timer
.data
= (unsigned long)dd
;
1161 dd
->verbs_layer
.l_timer
.expires
= jiffies
+ 1;
1162 add_timer(&dd
->verbs_layer
.l_timer
);
1167 EXPORT_SYMBOL_GPL(ipath_layer_enable_timer
);
1169 int ipath_layer_disable_timer(struct ipath_devdata
*dd
)
1171 /* Disable GPIO bit 2 interrupt */
1172 if (dd
->ipath_flags
& IPATH_GPIO_INTR
)
1173 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_mask
, 0);
1175 del_timer_sync(&dd
->verbs_layer
.l_timer
);
1180 EXPORT_SYMBOL_GPL(ipath_layer_disable_timer
);
1183 * ipath_layer_set_verbs_flags - set the verbs layer flags
1184 * @dd: the infinipath device
1185 * @flags: the flags to set
1187 int ipath_layer_set_verbs_flags(struct ipath_devdata
*dd
, unsigned flags
)
1189 struct ipath_devdata
*ss
;
1190 unsigned long lflags
;
1192 spin_lock_irqsave(&ipath_devs_lock
, lflags
);
1194 list_for_each_entry(ss
, &ipath_dev_list
, ipath_list
) {
1195 if (!(ss
->ipath_flags
& IPATH_INITTED
))
1197 if ((flags
& IPATH_VERBS_KERNEL_SMA
) &&
1198 !(*ss
->ipath_statusp
& IPATH_STATUS_SMA
))
1199 *ss
->ipath_statusp
|= IPATH_STATUS_OIB_SMA
;
1201 *ss
->ipath_statusp
&= ~IPATH_STATUS_OIB_SMA
;
1204 spin_unlock_irqrestore(&ipath_devs_lock
, lflags
);
1209 EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags
);
1212 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1213 * @dd: the infinipath device
1215 unsigned ipath_layer_get_npkeys(struct ipath_devdata
*dd
)
1217 return ARRAY_SIZE(dd
->ipath_pd
[0]->port_pkeys
);
1220 EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys
);
1223 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1224 * @dd: the infinipath device
1225 * @index: the PKEY index
1227 unsigned ipath_layer_get_pkey(struct ipath_devdata
*dd
, unsigned index
)
1231 if (index
>= ARRAY_SIZE(dd
->ipath_pd
[0]->port_pkeys
))
1234 ret
= dd
->ipath_pd
[0]->port_pkeys
[index
];
1239 EXPORT_SYMBOL_GPL(ipath_layer_get_pkey
);
1242 * ipath_layer_get_pkeys - return the PKEY table for port 0
1243 * @dd: the infinipath device
1244 * @pkeys: the pkey table is placed here
1246 int ipath_layer_get_pkeys(struct ipath_devdata
*dd
, u16
* pkeys
)
1248 struct ipath_portdata
*pd
= dd
->ipath_pd
[0];
1250 memcpy(pkeys
, pd
->port_pkeys
, sizeof(pd
->port_pkeys
));
1255 EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys
);
1258 * rm_pkey - decrecment the reference count for the given PKEY
1259 * @dd: the infinipath device
1260 * @key: the PKEY index
1262 * Return true if this was the last reference and the hardware table entry
1263 * needs to be changed.
1265 static int rm_pkey(struct ipath_devdata
*dd
, u16 key
)
1270 for (i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
1271 if (dd
->ipath_pkeys
[i
] != key
)
1273 if (atomic_dec_and_test(&dd
->ipath_pkeyrefs
[i
])) {
1274 dd
->ipath_pkeys
[i
] = 0;
1288 * add_pkey - add the given PKEY to the hardware table
1289 * @dd: the infinipath device
1292 * Return an error code if unable to add the entry, zero if no change,
1293 * or 1 if the hardware PKEY register needs to be updated.
1295 static int add_pkey(struct ipath_devdata
*dd
, u16 key
)
1298 u16 lkey
= key
& 0x7FFF;
1302 if (lkey
== 0x7FFF) {
1307 /* Look for an empty slot or a matching PKEY. */
1308 for (i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
1309 if (!dd
->ipath_pkeys
[i
]) {
1313 /* If it matches exactly, try to increment the ref count */
1314 if (dd
->ipath_pkeys
[i
] == key
) {
1315 if (atomic_inc_return(&dd
->ipath_pkeyrefs
[i
]) > 1) {
1319 /* Lost the race. Look for an empty slot below. */
1320 atomic_dec(&dd
->ipath_pkeyrefs
[i
]);
1324 * It makes no sense to have both the limited and unlimited
1325 * PKEY set at the same time since the unlimited one will
1326 * disable the limited one.
1328 if ((dd
->ipath_pkeys
[i
] & 0x7FFF) == lkey
) {
1337 for (i
= 0; i
< ARRAY_SIZE(dd
->ipath_pkeys
); i
++) {
1338 if (!dd
->ipath_pkeys
[i
] &&
1339 atomic_inc_return(&dd
->ipath_pkeyrefs
[i
]) == 1) {
1340 /* for ipathstats, etc. */
1341 ipath_stats
.sps_pkeys
[i
] = lkey
;
1342 dd
->ipath_pkeys
[i
] = key
;
1354 * ipath_layer_set_pkeys - set the PKEY table for port 0
1355 * @dd: the infinipath device
1356 * @pkeys: the PKEY table
1358 int ipath_layer_set_pkeys(struct ipath_devdata
*dd
, u16
* pkeys
)
1360 struct ipath_portdata
*pd
;
1364 pd
= dd
->ipath_pd
[0];
1366 for (i
= 0; i
< ARRAY_SIZE(pd
->port_pkeys
); i
++) {
1368 u16 okey
= pd
->port_pkeys
[i
];
1373 * The value of this PKEY table entry is changing.
1374 * Remove the old entry in the hardware's array of PKEYs.
1377 changed
|= rm_pkey(dd
, okey
);
1379 int ret
= add_pkey(dd
, key
);
1386 pd
->port_pkeys
[i
] = key
;
1391 pkey
= (u64
) dd
->ipath_pkeys
[0] |
1392 ((u64
) dd
->ipath_pkeys
[1] << 16) |
1393 ((u64
) dd
->ipath_pkeys
[2] << 32) |
1394 ((u64
) dd
->ipath_pkeys
[3] << 48);
1395 ipath_cdbg(VERBOSE
, "p0 new pkey reg %llx\n",
1396 (unsigned long long) pkey
);
1397 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_partitionkey
,
1403 EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys
);
1406 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1407 * @dd: the infinipath device
1409 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1411 int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata
*dd
)
1413 return !!(dd
->ipath_ibcctrl
& INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE
);
1416 EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate
);
1419 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1420 * @dd: the infinipath device
1421 * @sleep: the new state
1423 * Note that this will only take effect when the link state changes.
1425 int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata
*dd
,
1429 dd
->ipath_ibcctrl
|= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE
;
1431 dd
->ipath_ibcctrl
&= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE
;
1432 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1437 EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate
);
1439 int ipath_layer_get_phyerrthreshold(struct ipath_devdata
*dd
)
1441 return (dd
->ipath_ibcctrl
>>
1442 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
) &
1443 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
;
1446 EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold
);
1449 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1450 * @dd: the infinipath device
1451 * @n: the new threshold
1453 * Note that this will only take effect when the link state changes.
1455 int ipath_layer_set_phyerrthreshold(struct ipath_devdata
*dd
, unsigned n
)
1459 v
= (dd
->ipath_ibcctrl
>> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
) &
1460 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
;
1462 dd
->ipath_ibcctrl
&=
1463 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK
<<
1464 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
);
1465 dd
->ipath_ibcctrl
|=
1466 (u64
) n
<< INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT
;
1467 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1473 EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold
);
1475 int ipath_layer_get_overrunthreshold(struct ipath_devdata
*dd
)
1477 return (dd
->ipath_ibcctrl
>>
1478 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
) &
1479 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK
;
1482 EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold
);
1485 * ipath_layer_set_overrunthreshold - set the overrun threshold
1486 * @dd: the infinipath device
1487 * @n: the new threshold
1489 * Note that this will only take effect when the link state changes.
1491 int ipath_layer_set_overrunthreshold(struct ipath_devdata
*dd
, unsigned n
)
1495 v
= (dd
->ipath_ibcctrl
>> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
) &
1496 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK
;
1498 dd
->ipath_ibcctrl
&=
1499 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK
<<
1500 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
);
1501 dd
->ipath_ibcctrl
|=
1502 (u64
) n
<< INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT
;
1503 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_ibcctrl
,
1509 EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold
);
1511 int ipath_layer_get_boardname(struct ipath_devdata
*dd
, char *name
,
1514 return dd
->ipath_f_get_boardname(dd
, name
, namelen
);
1516 EXPORT_SYMBOL_GPL(ipath_layer_get_boardname
);
1518 u32
ipath_layer_get_rcvhdrentsize(struct ipath_devdata
*dd
)
1520 return dd
->ipath_rcvhdrentsize
;
1522 EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize
);