[PATCH] inode-diet: Eliminate i_blksize from the inode structure
[linux-2.6/libata-dev.git] / drivers / infiniband / hw / ipath / ipath_layer.c
blobe46aa4ed2a7e1123898d63e86d35f5ca99a61bf9
1 /*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
35 * These are the routines used by layered drivers, currently just the
36 * layered ethernet driver and verbs layer.
39 #include <linux/io.h>
40 #include <linux/pci.h>
41 #include <asm/byteorder.h>
43 #include "ipath_kernel.h"
44 #include "ipath_layer.h"
45 #include "ipath_verbs.h"
46 #include "ipath_common.h"
48 /* Acquire before ipath_devs_lock. */
49 static DEFINE_MUTEX(ipath_layer_mutex);
51 u16 ipath_layer_rcv_opcode;
53 static int (*layer_intr)(void *, u32);
54 static int (*layer_rcv)(void *, void *, struct sk_buff *);
55 static int (*layer_rcv_lid)(void *, void *);
57 static void *(*layer_add_one)(int, struct ipath_devdata *);
58 static void (*layer_remove_one)(void *);
60 int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
62 int ret = -ENODEV;
64 if (dd->ipath_layer.l_arg && layer_intr)
65 ret = layer_intr(dd->ipath_layer.l_arg, arg);
67 return ret;
70 int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
72 int ret;
74 mutex_lock(&ipath_layer_mutex);
76 ret = __ipath_layer_intr(dd, arg);
78 mutex_unlock(&ipath_layer_mutex);
80 return ret;
83 int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
84 struct sk_buff *skb)
86 int ret = -ENODEV;
88 if (dd->ipath_layer.l_arg && layer_rcv)
89 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
91 return ret;
94 int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
96 int ret = -ENODEV;
98 if (dd->ipath_layer.l_arg && layer_rcv_lid)
99 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
101 return ret;
104 void ipath_layer_lid_changed(struct ipath_devdata *dd)
106 mutex_lock(&ipath_layer_mutex);
108 if (dd->ipath_layer.l_arg && layer_intr)
109 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
111 mutex_unlock(&ipath_layer_mutex);
114 void ipath_layer_add(struct ipath_devdata *dd)
116 mutex_lock(&ipath_layer_mutex);
118 if (layer_add_one)
119 dd->ipath_layer.l_arg =
120 layer_add_one(dd->ipath_unit, dd);
122 mutex_unlock(&ipath_layer_mutex);
125 void ipath_layer_remove(struct ipath_devdata *dd)
127 mutex_lock(&ipath_layer_mutex);
129 if (dd->ipath_layer.l_arg && layer_remove_one) {
130 layer_remove_one(dd->ipath_layer.l_arg);
131 dd->ipath_layer.l_arg = NULL;
134 mutex_unlock(&ipath_layer_mutex);
137 int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
138 void (*l_remove)(void *),
139 int (*l_intr)(void *, u32),
140 int (*l_rcv)(void *, void *, struct sk_buff *),
141 u16 l_rcv_opcode,
142 int (*l_rcv_lid)(void *, void *))
144 struct ipath_devdata *dd, *tmp;
145 unsigned long flags;
147 mutex_lock(&ipath_layer_mutex);
149 layer_add_one = l_add;
150 layer_remove_one = l_remove;
151 layer_intr = l_intr;
152 layer_rcv = l_rcv;
153 layer_rcv_lid = l_rcv_lid;
154 ipath_layer_rcv_opcode = l_rcv_opcode;
156 spin_lock_irqsave(&ipath_devs_lock, flags);
158 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
159 if (!(dd->ipath_flags & IPATH_INITTED))
160 continue;
162 if (dd->ipath_layer.l_arg)
163 continue;
165 spin_unlock_irqrestore(&ipath_devs_lock, flags);
166 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
167 spin_lock_irqsave(&ipath_devs_lock, flags);
170 spin_unlock_irqrestore(&ipath_devs_lock, flags);
171 mutex_unlock(&ipath_layer_mutex);
173 return 0;
176 EXPORT_SYMBOL_GPL(ipath_layer_register);
178 void ipath_layer_unregister(void)
180 struct ipath_devdata *dd, *tmp;
181 unsigned long flags;
183 mutex_lock(&ipath_layer_mutex);
184 spin_lock_irqsave(&ipath_devs_lock, flags);
186 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
187 if (dd->ipath_layer.l_arg && layer_remove_one) {
188 spin_unlock_irqrestore(&ipath_devs_lock, flags);
189 layer_remove_one(dd->ipath_layer.l_arg);
190 spin_lock_irqsave(&ipath_devs_lock, flags);
191 dd->ipath_layer.l_arg = NULL;
195 spin_unlock_irqrestore(&ipath_devs_lock, flags);
197 layer_add_one = NULL;
198 layer_remove_one = NULL;
199 layer_intr = NULL;
200 layer_rcv = NULL;
201 layer_rcv_lid = NULL;
203 mutex_unlock(&ipath_layer_mutex);
206 EXPORT_SYMBOL_GPL(ipath_layer_unregister);
208 int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
210 int ret;
211 u32 intval = 0;
213 mutex_lock(&ipath_layer_mutex);
215 if (!dd->ipath_layer.l_arg) {
216 ret = -EINVAL;
217 goto bail;
220 ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
222 if (ret < 0)
223 goto bail;
225 *pktmax = dd->ipath_ibmaxlen;
227 if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
228 intval |= IPATH_LAYER_INT_IF_UP;
229 if (dd->ipath_lid)
230 intval |= IPATH_LAYER_INT_LID;
231 if (dd->ipath_mlid)
232 intval |= IPATH_LAYER_INT_BCAST;
234 * do this on open, in case low level is already up and
235 * just layered driver was reloaded, etc.
237 if (intval)
238 layer_intr(dd->ipath_layer.l_arg, intval);
240 ret = 0;
241 bail:
242 mutex_unlock(&ipath_layer_mutex);
244 return ret;
247 EXPORT_SYMBOL_GPL(ipath_layer_open);
249 u16 ipath_layer_get_lid(struct ipath_devdata *dd)
251 return dd->ipath_lid;
254 EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
257 * ipath_layer_get_mac - get the MAC address
258 * @dd: the infinipath device
259 * @mac: the MAC is put here
261 * This is the EUID-64 OUI octets (top 3), then
262 * skip the next 2 (which should both be zero or 0xff).
263 * The returned MAC is in network order
264 * mac points to at least 6 bytes of buffer
265 * We assume that by the time the LID is set, that the GUID is as valid
266 * as it's ever going to be, rather than adding yet another status bit.
269 int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
271 u8 *guid;
273 guid = (u8 *) &dd->ipath_guid;
275 mac[0] = guid[0];
276 mac[1] = guid[1];
277 mac[2] = guid[2];
278 mac[3] = guid[5];
279 mac[4] = guid[6];
280 mac[5] = guid[7];
281 if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
282 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
283 "%x %x\n", guid[3], guid[4]);
284 return 0;
287 EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
289 u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
291 return dd->ipath_mlid;
294 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
296 int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
298 int ret = 0;
299 u32 __iomem *piobuf;
300 u32 plen, *uhdr;
301 size_t count;
302 __be16 vlsllnh;
304 if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
305 ipath_dbg("send while not open\n");
306 ret = -EINVAL;
307 } else
308 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
309 dd->ipath_lid == 0) {
311 * lid check is for when sma hasn't yet configured
313 ret = -ENETDOWN;
314 ipath_cdbg(VERBOSE, "send while not ready, "
315 "mylid=%u, flags=0x%x\n",
316 dd->ipath_lid, dd->ipath_flags);
319 vlsllnh = *((__be16 *) hdr);
320 if (vlsllnh != htons(IPATH_LRH_BTH)) {
321 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
322 "not sending\n", be16_to_cpu(vlsllnh),
323 IPATH_LRH_BTH);
324 ret = -EINVAL;
326 if (ret)
327 goto done;
329 /* Get a PIO buffer to use. */
330 piobuf = ipath_getpiobuf(dd, NULL);
331 if (piobuf == NULL) {
332 ret = -EBUSY;
333 goto done;
336 plen = (sizeof(*hdr) >> 2); /* actual length */
337 ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
339 writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
340 ipath_flush_wc();
341 piobuf += 2;
342 uhdr = (u32 *)hdr;
343 count = plen-1; /* amount we can copy before trigger word */
344 __iowrite32_copy(piobuf, uhdr, count);
345 ipath_flush_wc();
346 __raw_writel(uhdr[count], piobuf + count);
347 ipath_flush_wc(); /* ensure it's sent, now */
349 ipath_stats.sps_ether_spkts++; /* ether packet sent */
351 done:
352 return ret;
355 EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
357 int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
359 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
361 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
362 dd->ipath_sendctrl);
363 return 0;
366 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);