3 * Copyright (C) 2002 Intersil Americas Inc.
4 * Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
26 #include <asm/system.h>
27 #include <linux/if_arp.h>
29 #include "prismcompat.h"
31 #include "islpci_mgt.h"
32 #include "isl_oid.h" /* additional types and defs for isl38xx fw */
33 #include "isl_ioctl.h"
35 #include <net/iw_handler.h>
37 /******************************************************************************
38 Global variable definition section
39 ******************************************************************************/
40 int pc_debug
= VERBOSE
;
41 module_param(pc_debug
, int, 0);
43 /******************************************************************************
44 Driver general functions
45 ******************************************************************************/
46 #if VERBOSE > SHOW_ERROR_MESSAGES
48 display_buffer(char *buffer
, int length
)
50 if ((pc_debug
& SHOW_BUFFER_CONTENTS
) == 0)
54 printk("[%02x]", *buffer
& 255);
63 /*****************************************************************************
64 Queue handling for management frames
65 ******************************************************************************/
68 * Helper function to create a PIMFOR management frame header.
71 pimfor_encode_header(int operation
, u32 oid
, u32 length
, pimfor_header_t
*h
)
73 h
->version
= PIMFOR_VERSION
;
74 h
->operation
= operation
;
75 h
->device_id
= PIMFOR_DEV_ID_MHLI_MIB
;
77 h
->oid
= cpu_to_be32(oid
);
78 h
->length
= cpu_to_be32(length
);
82 * Helper function to analyze a PIMFOR management frame header.
84 static pimfor_header_t
*
85 pimfor_decode_header(void *data
, int len
)
87 pimfor_header_t
*h
= data
;
89 while ((void *) h
< data
+ len
) {
90 if (h
->flags
& PIMFOR_FLAG_LITTLE_ENDIAN
) {
91 le32_to_cpus(&h
->oid
);
92 le32_to_cpus(&h
->length
);
94 be32_to_cpus(&h
->oid
);
95 be32_to_cpus(&h
->length
);
97 if (h
->oid
!= OID_INL_TUNNEL
)
105 * Fill the receive queue for management frames with fresh buffers.
108 islpci_mgmt_rx_fill(struct net_device
*ndev
)
110 islpci_private
*priv
= netdev_priv(ndev
);
111 isl38xx_control_block
*cb
= /* volatile not needed */
112 (isl38xx_control_block
*) priv
->control_block
;
113 u32 curr
= le32_to_cpu(cb
->driver_curr_frag
[ISL38XX_CB_RX_MGMTQ
]);
115 #if VERBOSE > SHOW_ERROR_MESSAGES
116 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgmt_rx_fill \n");
119 while (curr
- priv
->index_mgmt_rx
< ISL38XX_CB_MGMT_QSIZE
) {
120 u32 index
= curr
% ISL38XX_CB_MGMT_QSIZE
;
121 struct islpci_membuf
*buf
= &priv
->mgmt_rx
[index
];
122 isl38xx_fragment
*frag
= &cb
->rx_data_mgmt
[index
];
124 if (buf
->mem
== NULL
) {
125 buf
->mem
= kmalloc(MGMT_FRAME_SIZE
, GFP_ATOMIC
);
128 "Error allocating management frame.\n");
131 buf
->size
= MGMT_FRAME_SIZE
;
133 if (buf
->pci_addr
== 0) {
134 buf
->pci_addr
= pci_map_single(priv
->pdev
, buf
->mem
,
137 if (!buf
->pci_addr
) {
139 "Failed to make memory DMA'able.\n");
144 /* be safe: always reset control block information */
145 frag
->size
= cpu_to_le16(MGMT_FRAME_SIZE
);
147 frag
->address
= cpu_to_le32(buf
->pci_addr
);
150 /* The fragment address in the control block must have
151 * been written before announcing the frame buffer to
154 cb
->driver_curr_frag
[ISL38XX_CB_RX_MGMTQ
] = cpu_to_le32(curr
);
160 * Create and transmit a management frame using "operation" and "oid",
161 * with arguments data/length.
162 * We either return an error and free the frame, or we return 0 and
163 * islpci_mgt_cleanup_transmit() frees the frame in the tx-done
167 islpci_mgt_transmit(struct net_device
*ndev
, int operation
, unsigned long oid
,
168 void *data
, int length
)
170 islpci_private
*priv
= netdev_priv(ndev
);
171 isl38xx_control_block
*cb
=
172 (isl38xx_control_block
*) priv
->control_block
;
176 isl38xx_fragment
*frag
;
177 struct islpci_membuf buf
;
180 int frag_len
= length
+ PIMFOR_HEADER_SIZE
;
182 #if VERBOSE > SHOW_ERROR_MESSAGES
183 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgt_transmit\n");
186 if (frag_len
> MGMT_FRAME_SIZE
) {
187 printk(KERN_DEBUG
"%s: mgmt frame too large %d\n",
188 ndev
->name
, frag_len
);
193 p
= buf
.mem
= kmalloc(frag_len
, GFP_KERNEL
);
195 printk(KERN_DEBUG
"%s: cannot allocate mgmt frame\n",
201 /* create the header directly in the fragment data area */
202 pimfor_encode_header(operation
, oid
, length
, (pimfor_header_t
*) p
);
203 p
+= PIMFOR_HEADER_SIZE
;
206 memcpy(p
, data
, length
);
208 memset(p
, 0, length
);
210 #if VERBOSE > SHOW_ERROR_MESSAGES
212 pimfor_header_t
*h
= buf
.mem
;
213 DEBUG(SHOW_PIMFOR_FRAMES
,
214 "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x \n",
215 h
->operation
, oid
, h
->device_id
, h
->flags
, length
);
217 /* display the buffer contents for debugging */
218 display_buffer((char *) h
, sizeof (pimfor_header_t
));
219 display_buffer(p
, length
);
224 buf
.pci_addr
= pci_map_single(priv
->pdev
, buf
.mem
, frag_len
,
227 printk(KERN_WARNING
"%s: cannot map PCI memory for mgmt\n",
232 /* Protect the control block modifications against interrupts. */
233 spin_lock_irqsave(&priv
->slock
, flags
);
234 curr_frag
= le32_to_cpu(cb
->driver_curr_frag
[ISL38XX_CB_TX_MGMTQ
]);
235 if (curr_frag
- priv
->index_mgmt_tx
>= ISL38XX_CB_MGMT_QSIZE
) {
236 printk(KERN_WARNING
"%s: mgmt tx queue is still full\n",
241 /* commit the frame to the tx device queue */
242 index
= curr_frag
% ISL38XX_CB_MGMT_QSIZE
;
243 priv
->mgmt_tx
[index
] = buf
;
244 frag
= &cb
->tx_data_mgmt
[index
];
245 frag
->size
= cpu_to_le16(frag_len
);
246 frag
->flags
= 0; /* for any other than the last fragment, set to 1 */
247 frag
->address
= cpu_to_le32(buf
.pci_addr
);
249 /* The fragment address in the control block must have
250 * been written before announcing the frame buffer to
253 cb
->driver_curr_frag
[ISL38XX_CB_TX_MGMTQ
] = cpu_to_le32(curr_frag
+ 1);
254 spin_unlock_irqrestore(&priv
->slock
, flags
);
256 /* trigger the device */
257 islpci_trigger(priv
);
261 spin_unlock_irqrestore(&priv
->slock
, flags
);
269 * Receive a management frame from the device.
270 * This can be an arbitrary number of traps, and at most one response
271 * frame for a previous request sent via islpci_mgt_transmit().
274 islpci_mgt_receive(struct net_device
*ndev
)
276 islpci_private
*priv
= netdev_priv(ndev
);
277 isl38xx_control_block
*cb
=
278 (isl38xx_control_block
*) priv
->control_block
;
281 #if VERBOSE > SHOW_ERROR_MESSAGES
282 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgt_receive \n");
285 /* Only once per interrupt, determine fragment range to
286 * process. This avoids an endless loop (i.e. lockup) if
287 * frames come in faster than we can process them. */
288 curr_frag
= le32_to_cpu(cb
->device_curr_frag
[ISL38XX_CB_RX_MGMTQ
]);
291 for (; priv
->index_mgmt_rx
< curr_frag
; priv
->index_mgmt_rx
++) {
292 pimfor_header_t
*header
;
293 u32 index
= priv
->index_mgmt_rx
% ISL38XX_CB_MGMT_QSIZE
;
294 struct islpci_membuf
*buf
= &priv
->mgmt_rx
[index
];
297 struct islpci_mgmtframe
*frame
;
299 /* I have no idea (and no documentation) if flags != 0
300 * is possible. Drop the frame, reuse the buffer. */
301 if (le16_to_cpu(cb
->rx_data_mgmt
[index
].flags
) != 0) {
302 printk(KERN_WARNING
"%s: unknown flags 0x%04x\n",
304 le16_to_cpu(cb
->rx_data_mgmt
[index
].flags
));
308 /* The device only returns the size of the header(s) here. */
309 frag_len
= le16_to_cpu(cb
->rx_data_mgmt
[index
].size
);
312 * We appear to have no way to tell the device the
313 * size of a receive buffer. Thus, if this check
314 * triggers, we likely have kernel heap corruption. */
315 if (frag_len
> MGMT_FRAME_SIZE
) {
317 "%s: Bogus packet size of %d (%#x).\n",
318 ndev
->name
, frag_len
, frag_len
);
319 frag_len
= MGMT_FRAME_SIZE
;
322 /* Ensure the results of device DMA are visible to the CPU. */
323 pci_dma_sync_single_for_cpu(priv
->pdev
, buf
->pci_addr
,
324 buf
->size
, PCI_DMA_FROMDEVICE
);
326 /* Perform endianess conversion for PIMFOR header in-place. */
327 header
= pimfor_decode_header(buf
->mem
, frag_len
);
329 printk(KERN_WARNING
"%s: no PIMFOR header found\n",
334 /* The device ID from the PIMFOR packet received from
335 * the MVC is always 0. We forward a sensible device_id.
336 * Not that anyone upstream would care... */
337 header
->device_id
= priv
->ndev
->ifindex
;
339 #if VERBOSE > SHOW_ERROR_MESSAGES
340 DEBUG(SHOW_PIMFOR_FRAMES
,
341 "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n",
342 header
->operation
, header
->oid
, header
->device_id
,
343 header
->flags
, header
->length
);
345 /* display the buffer contents for debugging */
346 display_buffer((char *) header
, PIMFOR_HEADER_SIZE
);
347 display_buffer((char *) header
+ PIMFOR_HEADER_SIZE
,
351 /* nobody sends these */
352 if (header
->flags
& PIMFOR_FLAG_APPLIC_ORIGIN
) {
354 "%s: errant PIMFOR application frame\n",
359 /* Determine frame size, skipping OID_INL_TUNNEL headers. */
360 size
= PIMFOR_HEADER_SIZE
+ header
->length
;
361 frame
= kmalloc(sizeof (struct islpci_mgmtframe
) + size
,
365 "%s: Out of memory, cannot handle oid 0x%08x\n",
366 ndev
->name
, header
->oid
);
370 memcpy(&frame
->buf
, header
, size
);
371 frame
->header
= (pimfor_header_t
*) frame
->buf
;
372 frame
->data
= frame
->buf
+ PIMFOR_HEADER_SIZE
;
374 #if VERBOSE > SHOW_ERROR_MESSAGES
375 DEBUG(SHOW_PIMFOR_FRAMES
,
376 "frame: header: %p, data: %p, size: %d\n",
377 frame
->header
, frame
->data
, size
);
380 if (header
->operation
== PIMFOR_OP_TRAP
) {
381 #if VERBOSE > SHOW_ERROR_MESSAGES
383 "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n",
384 header
->oid
, header
->device_id
, header
->flags
,
388 /* Create work to handle trap out of interrupt
390 INIT_WORK(&frame
->ws
, prism54_process_trap
);
391 schedule_work(&frame
->ws
);
394 /* Signal the one waiting process that a response
395 * has been received. */
396 if ((frame
= xchg(&priv
->mgmt_received
, frame
)) != NULL
) {
398 "%s: mgmt response not collected\n",
402 #if VERBOSE > SHOW_ERROR_MESSAGES
403 DEBUG(SHOW_TRACING
, "Wake up Mgmt Queue\n");
405 wake_up(&priv
->mgmt_wqueue
);
414 * Cleanup the transmit queue by freeing all frames handled by the device.
417 islpci_mgt_cleanup_transmit(struct net_device
*ndev
)
419 islpci_private
*priv
= netdev_priv(ndev
);
420 isl38xx_control_block
*cb
= /* volatile not needed */
421 (isl38xx_control_block
*) priv
->control_block
;
424 #if VERBOSE > SHOW_ERROR_MESSAGES
425 DEBUG(SHOW_FUNCTION_CALLS
, "islpci_mgt_cleanup_transmit\n");
428 /* Only once per cleanup, determine fragment range to
429 * process. This avoids an endless loop (i.e. lockup) if
430 * the device became confused, incrementing device_curr_frag
432 curr_frag
= le32_to_cpu(cb
->device_curr_frag
[ISL38XX_CB_TX_MGMTQ
]);
435 for (; priv
->index_mgmt_tx
< curr_frag
; priv
->index_mgmt_tx
++) {
436 int index
= priv
->index_mgmt_tx
% ISL38XX_CB_MGMT_QSIZE
;
437 struct islpci_membuf
*buf
= &priv
->mgmt_tx
[index
];
438 pci_unmap_single(priv
->pdev
, buf
->pci_addr
, buf
->size
,
448 * Perform one request-response transaction to the device.
451 islpci_mgt_transaction(struct net_device
*ndev
,
452 int operation
, unsigned long oid
,
453 void *senddata
, int sendlen
,
454 struct islpci_mgmtframe
**recvframe
)
456 islpci_private
*priv
= netdev_priv(ndev
);
457 const long wait_cycle_jiffies
= msecs_to_jiffies(ISL38XX_WAIT_CYCLE
* 10);
458 long timeout_left
= ISL38XX_MAX_WAIT_CYCLES
* wait_cycle_jiffies
;
464 if (down_interruptible(&priv
->mgmt_sem
))
467 prepare_to_wait(&priv
->mgmt_wqueue
, &wait
, TASK_UNINTERRUPTIBLE
);
468 err
= islpci_mgt_transmit(ndev
, operation
, oid
, senddata
, sendlen
);
473 while (timeout_left
> 0) {
475 struct islpci_mgmtframe
*frame
;
477 timeleft
= schedule_timeout_uninterruptible(wait_cycle_jiffies
);
478 frame
= xchg(&priv
->mgmt_received
, NULL
);
480 if (frame
->header
->oid
== oid
) {
486 "%s: expecting oid 0x%x, received 0x%x.\n",
487 ndev
->name
, (unsigned int) oid
,
495 "%s: timeout waiting for mgmt response %lu, "
496 "triggering device\n",
497 ndev
->name
, timeout_left
);
498 islpci_trigger(priv
);
500 timeout_left
+= timeleft
- wait_cycle_jiffies
;
502 printk(KERN_WARNING
"%s: timeout waiting for mgmt response\n",
505 /* TODO: we should reset the device here */
507 finish_wait(&priv
->mgmt_wqueue
, &wait
);