IB/uverbs: Handle large number of entries in poll CQ
[linux-2.6/kvm.git] / drivers / staging / octeon / ethernet-rx.c
blobcb38f9eb2cc079c6822485cc225dcc9fe5bf049c
1 /**********************************************************************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2010 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/cache.h>
30 #include <linux/cpumask.h>
31 #include <linux/netdevice.h>
32 #include <linux/init.h>
33 #include <linux/etherdevice.h>
34 #include <linux/ip.h>
35 #include <linux/string.h>
36 #include <linux/prefetch.h>
37 #include <linux/smp.h>
38 #include <net/dst.h>
39 #ifdef CONFIG_XFRM
40 #include <linux/xfrm.h>
41 #include <net/xfrm.h>
42 #endif /* CONFIG_XFRM */
44 #include <asm/atomic.h>
46 #include <asm/octeon/octeon.h>
48 #include "ethernet-defines.h"
49 #include "ethernet-mem.h"
50 #include "ethernet-rx.h"
51 #include "octeon-ethernet.h"
52 #include "ethernet-util.h"
54 #include "cvmx-helper.h"
55 #include "cvmx-wqe.h"
56 #include "cvmx-fau.h"
57 #include "cvmx-pow.h"
58 #include "cvmx-pip.h"
59 #include "cvmx-scratch.h"
61 #include "cvmx-gmxx-defs.h"
63 struct cvm_napi_wrapper {
64 struct napi_struct napi;
65 } ____cacheline_aligned_in_smp;
67 static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
69 struct cvm_oct_core_state {
70 int baseline_cores;
72 * The number of additional cores that could be processing
73 * input packtes.
75 atomic_t available_cores;
76 cpumask_t cpu_state;
77 } ____cacheline_aligned_in_smp;
79 static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
81 static void cvm_oct_enable_napi(void *_)
83 int cpu = smp_processor_id();
84 napi_schedule(&cvm_oct_napi[cpu].napi);
87 static void cvm_oct_enable_one_cpu(void)
89 int v;
90 int cpu;
92 /* Check to see if more CPUs are available for receive processing... */
93 v = atomic_sub_if_positive(1, &core_state.available_cores);
94 if (v < 0)
95 return;
97 /* ... if a CPU is available, Turn on NAPI polling for that CPU. */
98 for_each_online_cpu(cpu) {
99 if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
100 v = smp_call_function_single(cpu, cvm_oct_enable_napi,
101 NULL, 0);
102 if (v)
103 panic("Can't enable NAPI.");
104 break;
109 static void cvm_oct_no_more_work(void)
111 int cpu = smp_processor_id();
114 * CPU zero is special. It always has the irq enabled when
115 * waiting for incoming packets.
117 if (cpu == 0) {
118 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
119 return;
122 cpu_clear(cpu, core_state.cpu_state);
123 atomic_add(1, &core_state.available_cores);
127 * cvm_oct_do_interrupt - interrupt handler.
129 * The interrupt occurs whenever the POW has packets in our group.
132 static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
134 /* Disable the IRQ and start napi_poll. */
135 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
136 cvm_oct_enable_napi(NULL);
138 return IRQ_HANDLED;
142 * cvm_oct_check_rcv_error - process receive errors
143 * @work: Work queue entry pointing to the packet.
145 * Returns Non-zero if the packet can be dropped, zero otherwise.
147 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
149 if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
151 * Ignore length errors on min size packets. Some
152 * equipment incorrectly pads packets to 64+4FCS
153 * instead of 60+4FCS. Note these packets still get
154 * counted as frame errors.
156 } else
157 if (USE_10MBPS_PREAMBLE_WORKAROUND
158 && ((work->word2.snoip.err_code == 5)
159 || (work->word2.snoip.err_code == 7))) {
162 * We received a packet with either an alignment error
163 * or a FCS error. This may be signalling that we are
164 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK}
165 * off. If this is the case we need to parse the
166 * packet to determine if we can remove a non spec
167 * preamble and generate a correct packet.
169 int interface = cvmx_helper_get_interface_num(work->ipprt);
170 int index = cvmx_helper_get_interface_index_num(work->ipprt);
171 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
172 gmxx_rxx_frm_ctl.u64 =
173 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
174 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
176 uint8_t *ptr =
177 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
178 int i = 0;
180 while (i < work->len - 1) {
181 if (*ptr != 0x55)
182 break;
183 ptr++;
184 i++;
187 if (*ptr == 0xd5) {
189 DEBUGPRINT("Port %d received 0xd5 preamble\n", work->ipprt);
191 work->packet_ptr.s.addr += i + 1;
192 work->len -= i + 5;
193 } else if ((*ptr & 0xf) == 0xd) {
195 DEBUGPRINT("Port %d received 0x?d preamble\n", work->ipprt);
197 work->packet_ptr.s.addr += i;
198 work->len -= i + 4;
199 for (i = 0; i < work->len; i++) {
200 *ptr =
201 ((*ptr & 0xf0) >> 4) |
202 ((*(ptr + 1) & 0xf) << 4);
203 ptr++;
205 } else {
206 DEBUGPRINT("Port %d unknown preamble, packet "
207 "dropped\n",
208 work->ipprt);
210 cvmx_helper_dump_packet(work);
212 cvm_oct_free_work(work);
213 return 1;
216 } else {
217 DEBUGPRINT("Port %d receive error code %d, packet dropped\n",
218 work->ipprt, work->word2.snoip.err_code);
219 cvm_oct_free_work(work);
220 return 1;
223 return 0;
227 * cvm_oct_napi_poll - the NAPI poll function.
228 * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
229 * @budget: Maximum number of packets to receive.
231 * Returns the number of packets processed.
233 static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
235 const int coreid = cvmx_get_core_num();
236 uint64_t old_group_mask;
237 uint64_t old_scratch;
238 int rx_count = 0;
239 int did_work_request = 0;
240 int packet_not_copied;
242 /* Prefetch cvm_oct_device since we know we need it soon */
243 prefetch(cvm_oct_device);
245 if (USE_ASYNC_IOBDMA) {
246 /* Save scratch in case userspace is using it */
247 CVMX_SYNCIOBDMA;
248 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
251 /* Only allow work for our group (and preserve priorities) */
252 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
253 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
254 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
256 if (USE_ASYNC_IOBDMA) {
257 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
258 did_work_request = 1;
261 while (rx_count < budget) {
262 struct sk_buff *skb = NULL;
263 struct sk_buff **pskb = NULL;
264 int skb_in_hw;
265 cvmx_wqe_t *work;
267 if (USE_ASYNC_IOBDMA && did_work_request)
268 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
269 else
270 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
272 prefetch(work);
273 did_work_request = 0;
274 if (work == NULL) {
275 union cvmx_pow_wq_int wq_int;
276 wq_int.u64 = 0;
277 wq_int.s.iq_dis = 1 << pow_receive_group;
278 wq_int.s.wq_int = 1 << pow_receive_group;
279 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
280 break;
282 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
283 prefetch(pskb);
285 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
286 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
287 did_work_request = 1;
290 if (rx_count == 0) {
292 * First time through, see if there is enough
293 * work waiting to merit waking another
294 * CPU.
296 union cvmx_pow_wq_int_cntx counts;
297 int backlog;
298 int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
299 counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
300 backlog = counts.s.iq_cnt + counts.s.ds_cnt;
301 if (backlog > budget * cores_in_use && napi != NULL)
302 cvm_oct_enable_one_cpu();
305 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
306 if (likely(skb_in_hw)) {
307 skb = *pskb;
308 prefetch(&skb->head);
309 prefetch(&skb->len);
311 prefetch(cvm_oct_device[work->ipprt]);
313 /* Immediately throw away all packets with receive errors */
314 if (unlikely(work->word2.snoip.rcv_error)) {
315 if (cvm_oct_check_rcv_error(work))
316 continue;
320 * We can only use the zero copy path if skbuffs are
321 * in the FPA pool and the packet fits in a single
322 * buffer.
324 if (likely(skb_in_hw)) {
325 skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
326 prefetch(skb->data);
327 skb->len = work->len;
328 skb_set_tail_pointer(skb, skb->len);
329 packet_not_copied = 1;
330 } else {
332 * We have to copy the packet. First allocate
333 * an skbuff for it.
335 skb = dev_alloc_skb(work->len);
336 if (!skb) {
337 DEBUGPRINT("Port %d failed to allocate skbuff, packet dropped\n",
338 work->ipprt);
339 cvm_oct_free_work(work);
340 continue;
344 * Check if we've received a packet that was
345 * entirely stored in the work entry.
347 if (unlikely(work->word2.s.bufs == 0)) {
348 uint8_t *ptr = work->packet_data;
350 if (likely(!work->word2.s.not_IP)) {
352 * The beginning of the packet
353 * moves for IP packets.
355 if (work->word2.s.is_v6)
356 ptr += 2;
357 else
358 ptr += 6;
360 memcpy(skb_put(skb, work->len), ptr, work->len);
361 /* No packet buffers to free */
362 } else {
363 int segments = work->word2.s.bufs;
364 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
365 int len = work->len;
367 while (segments--) {
368 union cvmx_buf_ptr next_ptr =
369 *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
372 * Octeon Errata PKI-100: The segment size is
373 * wrong. Until it is fixed, calculate the
374 * segment size based on the packet pool
375 * buffer size. When it is fixed, the
376 * following line should be replaced with this
377 * one: int segment_size =
378 * segment_ptr.s.size;
380 int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
381 (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
383 * Don't copy more than what
384 * is left in the packet.
386 if (segment_size > len)
387 segment_size = len;
388 /* Copy the data into the packet */
389 memcpy(skb_put(skb, segment_size),
390 cvmx_phys_to_ptr(segment_ptr.s.addr),
391 segment_size);
392 len -= segment_size;
393 segment_ptr = next_ptr;
396 packet_not_copied = 0;
399 if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
400 cvm_oct_device[work->ipprt])) {
401 struct net_device *dev = cvm_oct_device[work->ipprt];
402 struct octeon_ethernet *priv = netdev_priv(dev);
405 * Only accept packets for devices that are
406 * currently up.
408 if (likely(dev->flags & IFF_UP)) {
409 skb->protocol = eth_type_trans(skb, dev);
410 skb->dev = dev;
412 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
413 skb->ip_summed = CHECKSUM_NONE;
414 else
415 skb->ip_summed = CHECKSUM_UNNECESSARY;
417 /* Increment RX stats for virtual ports */
418 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
419 #ifdef CONFIG_64BIT
420 atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
421 atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
422 #else
423 atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
424 atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
425 #endif
427 netif_receive_skb(skb);
428 rx_count++;
429 } else {
430 /* Drop any packet received for a device that isn't up */
432 DEBUGPRINT("%s: Device not up, packet dropped\n",
433 dev->name);
435 #ifdef CONFIG_64BIT
436 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
437 #else
438 atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
439 #endif
440 dev_kfree_skb_irq(skb);
442 } else {
444 * Drop any packet received for a device that
445 * doesn't exist.
447 DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n",
448 work->ipprt);
449 dev_kfree_skb_irq(skb);
452 * Check to see if the skbuff and work share the same
453 * packet buffer.
455 if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) {
457 * This buffer needs to be replaced, increment
458 * the number of buffers we need to free by
459 * one.
461 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
464 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
465 DONT_WRITEBACK(1));
466 } else {
467 cvm_oct_free_work(work);
470 /* Restore the original POW group mask */
471 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
472 if (USE_ASYNC_IOBDMA) {
473 /* Restore the scratch area */
474 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
476 cvm_oct_rx_refill_pool(0);
478 if (rx_count < budget && napi != NULL) {
479 /* No more work */
480 napi_complete(napi);
481 cvm_oct_no_more_work();
483 return rx_count;
486 #ifdef CONFIG_NET_POLL_CONTROLLER
488 * cvm_oct_poll_controller - poll for receive packets
489 * device.
491 * @dev: Device to poll. Unused
493 void cvm_oct_poll_controller(struct net_device *dev)
495 cvm_oct_napi_poll(NULL, 16);
497 #endif
499 void cvm_oct_rx_initialize(void)
501 int i;
502 struct net_device *dev_for_napi = NULL;
503 union cvmx_pow_wq_int_thrx int_thr;
504 union cvmx_pow_wq_int_pc int_pc;
506 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
507 if (cvm_oct_device[i]) {
508 dev_for_napi = cvm_oct_device[i];
509 break;
513 if (NULL == dev_for_napi)
514 panic("No net_devices were allocated.");
516 if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
517 atomic_set(&core_state.available_cores, max_rx_cpus);
518 else
519 atomic_set(&core_state.available_cores, num_online_cpus());
520 core_state.baseline_cores = atomic_read(&core_state.available_cores);
522 core_state.cpu_state = CPU_MASK_NONE;
523 for_each_possible_cpu(i) {
524 netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
525 cvm_oct_napi_poll, rx_napi_weight);
526 napi_enable(&cvm_oct_napi[i].napi);
528 /* Register an IRQ hander for to receive POW interrupts */
529 i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
530 cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
532 if (i)
533 panic("Could not acquire Ethernet IRQ %d\n",
534 OCTEON_IRQ_WORKQ0 + pow_receive_group);
536 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
538 int_thr.u64 = 0;
539 int_thr.s.tc_en = 1;
540 int_thr.s.tc_thr = 1;
541 /* Enable POW interrupt when our port has at least one packet */
542 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
544 int_pc.u64 = 0;
545 int_pc.s.pc_thr = 5;
546 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
549 /* Scheduld NAPI now. This will indirectly enable interrupts. */
550 cvm_oct_enable_one_cpu();
553 void cvm_oct_rx_shutdown(void)
555 int i;
556 /* Shutdown all of the NAPIs */
557 for_each_possible_cpu(i)
558 netif_napi_del(&cvm_oct_napi[i].napi);