ehea: Allocate large enough skbs to avoid partial cacheline DMA writes
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / ethernet / ibm / ehea / ehea.h
blob7aa47d86d9d7241675931550480aa4623b0fd93b
1 /*
2 * linux/drivers/net/ehea/ehea.h
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #ifndef __EHEA_H__
30 #define __EHEA_H__
32 #include <linux/module.h>
33 #include <linux/ethtool.h>
34 #include <linux/vmalloc.h>
35 #include <linux/if_vlan.h>
36 #include <linux/inet_lro.h>
38 #include <asm/ibmebus.h>
39 #include <asm/abs_addr.h>
40 #include <asm/io.h>
42 #define DRV_NAME "ehea"
43 #define DRV_VERSION "EHEA_0107"
45 /* eHEA capability flags */
46 #define DLPAR_PORT_ADD_REM 1
47 #define DLPAR_MEM_ADD 2
48 #define DLPAR_MEM_REM 4
49 #define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
51 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
52 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
54 #define EHEA_MAX_ENTRIES_RQ1 32767
55 #define EHEA_MAX_ENTRIES_RQ2 16383
56 #define EHEA_MAX_ENTRIES_RQ3 16383
57 #define EHEA_MAX_ENTRIES_SQ 32767
58 #define EHEA_MIN_ENTRIES_QP 127
60 #define EHEA_SMALL_QUEUES
61 #define EHEA_LRO_MAX_AGGR 64
63 #ifdef EHEA_SMALL_QUEUES
64 #define EHEA_MAX_CQE_COUNT 1023
65 #define EHEA_DEF_ENTRIES_SQ 1023
66 #define EHEA_DEF_ENTRIES_RQ1 4095
67 #define EHEA_DEF_ENTRIES_RQ2 1023
68 #define EHEA_DEF_ENTRIES_RQ3 1023
69 #else
70 #define EHEA_MAX_CQE_COUNT 4080
71 #define EHEA_DEF_ENTRIES_SQ 4080
72 #define EHEA_DEF_ENTRIES_RQ1 8160
73 #define EHEA_DEF_ENTRIES_RQ2 2040
74 #define EHEA_DEF_ENTRIES_RQ3 2040
75 #endif
77 #define EHEA_MAX_ENTRIES_EQ 20
79 #define EHEA_SG_SQ 2
80 #define EHEA_SG_RQ1 1
81 #define EHEA_SG_RQ2 0
82 #define EHEA_SG_RQ3 0
84 #define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */
85 #define EHEA_RQ2_PKT_SIZE 2048
86 #define EHEA_L_PKT_SIZE 256 /* low latency */
88 #define MAX_LRO_DESCRIPTORS 8
90 /* Send completion signaling */
92 /* Protection Domain Identifier */
93 #define EHEA_PD_ID 0xaabcdeff
95 #define EHEA_RQ2_THRESHOLD 1
96 #define EHEA_RQ3_THRESHOLD 4 /* use RQ3 threshold of 2048 bytes */
98 #define EHEA_SPEED_10G 10000
99 #define EHEA_SPEED_1G 1000
100 #define EHEA_SPEED_100M 100
101 #define EHEA_SPEED_10M 10
102 #define EHEA_SPEED_AUTONEG 0
104 /* Broadcast/Multicast registration types */
105 #define EHEA_BCMC_SCOPE_ALL 0x08
106 #define EHEA_BCMC_SCOPE_SINGLE 0x00
107 #define EHEA_BCMC_MULTICAST 0x04
108 #define EHEA_BCMC_BROADCAST 0x00
109 #define EHEA_BCMC_UNTAGGED 0x02
110 #define EHEA_BCMC_TAGGED 0x00
111 #define EHEA_BCMC_VLANID_ALL 0x01
112 #define EHEA_BCMC_VLANID_SINGLE 0x00
114 #define EHEA_CACHE_LINE 128
116 /* Memory Regions */
117 #define EHEA_MR_ACC_CTRL 0x00800000
119 #define EHEA_BUSMAP_START 0x8000000000000000ULL
120 #define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
121 #define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
122 #define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
123 #define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
124 #define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */
125 #define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
128 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ
130 /* utility functions */
132 void ehea_dump(void *adr, int len, char *msg);
134 #define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
136 #define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
138 #define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
140 #define EHEA_BMASK_MASK(mask) \
141 (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
143 #define EHEA_BMASK_SET(mask, value) \
144 ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
146 #define EHEA_BMASK_GET(mask, value) \
147 (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
150 * Generic ehea page
152 struct ehea_page {
153 u8 entries[PAGE_SIZE];
157 * Generic queue in linux kernel virtual memory
159 struct hw_queue {
160 u64 current_q_offset; /* current queue entry */
161 struct ehea_page **queue_pages; /* array of pages belonging to queue */
162 u32 qe_size; /* queue entry size */
163 u32 queue_length; /* queue length allocated in bytes */
164 u32 pagesize;
165 u32 toggle_state; /* toggle flag - per page */
166 u32 reserved; /* 64 bit alignment */
170 * For pSeries this is a 64bit memory address where
171 * I/O memory is mapped into CPU address space
173 struct h_epa {
174 void __iomem *addr;
177 struct h_epa_user {
178 u64 addr;
181 struct h_epas {
182 struct h_epa kernel; /* kernel space accessible resource,
183 set to 0 if unused */
184 struct h_epa_user user; /* user space accessible resource
185 set to 0 if unused */
189 * Memory map data structures
191 struct ehea_dir_bmap
193 u64 ent[EHEA_MAP_ENTRIES];
195 struct ehea_top_bmap
197 struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
199 struct ehea_bmap
201 struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
204 struct ehea_qp;
205 struct ehea_cq;
206 struct ehea_eq;
207 struct ehea_port;
208 struct ehea_av;
211 * Queue attributes passed to ehea_create_qp()
213 struct ehea_qp_init_attr {
214 /* input parameter */
215 u32 qp_token; /* queue token */
216 u8 low_lat_rq1;
217 u8 signalingtype; /* cqe generation flag */
218 u8 rq_count; /* num of receive queues */
219 u8 eqe_gen; /* eqe generation flag */
220 u16 max_nr_send_wqes; /* max number of send wqes */
221 u16 max_nr_rwqes_rq1; /* max number of receive wqes */
222 u16 max_nr_rwqes_rq2;
223 u16 max_nr_rwqes_rq3;
224 u8 wqe_size_enc_sq;
225 u8 wqe_size_enc_rq1;
226 u8 wqe_size_enc_rq2;
227 u8 wqe_size_enc_rq3;
228 u8 swqe_imm_data_len; /* immediate data length for swqes */
229 u16 port_nr;
230 u16 rq2_threshold;
231 u16 rq3_threshold;
232 u64 send_cq_handle;
233 u64 recv_cq_handle;
234 u64 aff_eq_handle;
236 /* output parameter */
237 u32 qp_nr;
238 u16 act_nr_send_wqes;
239 u16 act_nr_rwqes_rq1;
240 u16 act_nr_rwqes_rq2;
241 u16 act_nr_rwqes_rq3;
242 u8 act_wqe_size_enc_sq;
243 u8 act_wqe_size_enc_rq1;
244 u8 act_wqe_size_enc_rq2;
245 u8 act_wqe_size_enc_rq3;
246 u32 nr_sq_pages;
247 u32 nr_rq1_pages;
248 u32 nr_rq2_pages;
249 u32 nr_rq3_pages;
250 u32 liobn_sq;
251 u32 liobn_rq1;
252 u32 liobn_rq2;
253 u32 liobn_rq3;
257 * Event Queue attributes, passed as parameter
259 struct ehea_eq_attr {
260 u32 type;
261 u32 max_nr_of_eqes;
262 u8 eqe_gen; /* generate eqe flag */
263 u64 eq_handle;
264 u32 act_nr_of_eqes;
265 u32 nr_pages;
266 u32 ist1; /* Interrupt service token */
267 u32 ist2;
268 u32 ist3;
269 u32 ist4;
274 * Event Queue
276 struct ehea_eq {
277 struct ehea_adapter *adapter;
278 struct hw_queue hw_queue;
279 u64 fw_handle;
280 struct h_epas epas;
281 spinlock_t spinlock;
282 struct ehea_eq_attr attr;
286 * HEA Queues
288 struct ehea_qp {
289 struct ehea_adapter *adapter;
290 u64 fw_handle; /* QP handle for firmware calls */
291 struct hw_queue hw_squeue;
292 struct hw_queue hw_rqueue1;
293 struct hw_queue hw_rqueue2;
294 struct hw_queue hw_rqueue3;
295 struct h_epas epas;
296 struct ehea_qp_init_attr init_attr;
300 * Completion Queue attributes
302 struct ehea_cq_attr {
303 /* input parameter */
304 u32 max_nr_of_cqes;
305 u32 cq_token;
306 u64 eq_handle;
308 /* output parameter */
309 u32 act_nr_of_cqes;
310 u32 nr_pages;
314 * Completion Queue
316 struct ehea_cq {
317 struct ehea_adapter *adapter;
318 u64 fw_handle;
319 struct hw_queue hw_queue;
320 struct h_epas epas;
321 struct ehea_cq_attr attr;
325 * Memory Region
327 struct ehea_mr {
328 struct ehea_adapter *adapter;
329 u64 handle;
330 u64 vaddr;
331 u32 lkey;
335 * Port state information
337 struct port_stats {
338 int poll_receive_errors;
339 int queue_stopped;
340 int err_tcp_cksum;
341 int err_ip_cksum;
342 int err_frame_crc;
345 #define EHEA_IRQ_NAME_SIZE 20
348 * Queue SKB Array
350 struct ehea_q_skb_arr {
351 struct sk_buff **arr; /* skb array for queue */
352 int len; /* array length */
353 int index; /* array index */
354 int os_skbs; /* rq2/rq3 only: outstanding skbs */
358 * Port resources
360 struct ehea_port_res {
361 struct napi_struct napi;
362 struct port_stats p_stats;
363 struct ehea_mr send_mr; /* send memory region */
364 struct ehea_mr recv_mr; /* receive memory region */
365 struct ehea_port *port;
366 char int_recv_name[EHEA_IRQ_NAME_SIZE];
367 char int_send_name[EHEA_IRQ_NAME_SIZE];
368 struct ehea_qp *qp;
369 struct ehea_cq *send_cq;
370 struct ehea_cq *recv_cq;
371 struct ehea_eq *eq;
372 struct ehea_q_skb_arr rq1_skba;
373 struct ehea_q_skb_arr rq2_skba;
374 struct ehea_q_skb_arr rq3_skba;
375 struct ehea_q_skb_arr sq_skba;
376 int sq_skba_size;
377 int swqe_refill_th;
378 atomic_t swqe_avail;
379 int swqe_ll_count;
380 u32 swqe_id_counter;
381 u64 tx_packets;
382 u64 tx_bytes;
383 u64 rx_packets;
384 u64 rx_bytes;
385 struct net_lro_mgr lro_mgr;
386 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
387 int sq_restart_flag;
391 #define EHEA_MAX_PORTS 16
393 #define EHEA_NUM_PORTRES_FW_HANDLES 6 /* QP handle, SendCQ handle,
394 RecvCQ handle, EQ handle,
395 SendMR handle, RecvMR handle */
396 #define EHEA_NUM_PORT_FW_HANDLES 1 /* EQ handle */
397 #define EHEA_NUM_ADAPTER_FW_HANDLES 2 /* MR handle, NEQ handle */
399 struct ehea_adapter {
400 u64 handle;
401 struct platform_device *ofdev;
402 struct ehea_port *port[EHEA_MAX_PORTS];
403 struct ehea_eq *neq; /* notification event queue */
404 struct tasklet_struct neq_tasklet;
405 struct ehea_mr mr;
406 u32 pd; /* protection domain */
407 u64 max_mc_mac; /* max number of multicast mac addresses */
408 int active_ports;
409 struct list_head list;
413 struct ehea_mc_list {
414 struct list_head list;
415 u64 macaddr;
418 /* kdump support */
419 struct ehea_fw_handle_entry {
420 u64 adh; /* Adapter Handle */
421 u64 fwh; /* Firmware Handle */
424 struct ehea_fw_handle_array {
425 struct ehea_fw_handle_entry *arr;
426 int num_entries;
427 struct mutex lock;
430 struct ehea_bcmc_reg_entry {
431 u64 adh; /* Adapter Handle */
432 u32 port_id; /* Logical Port Id */
433 u8 reg_type; /* Registration Type */
434 u64 macaddr;
437 struct ehea_bcmc_reg_array {
438 struct ehea_bcmc_reg_entry *arr;
439 int num_entries;
440 spinlock_t lock;
443 #define EHEA_PORT_UP 1
444 #define EHEA_PORT_DOWN 0
445 #define EHEA_PHY_LINK_UP 1
446 #define EHEA_PHY_LINK_DOWN 0
447 #define EHEA_MAX_PORT_RES 16
448 struct ehea_port {
449 struct ehea_adapter *adapter; /* adapter that owns this port */
450 struct net_device *netdev;
451 struct net_device_stats stats;
452 struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
453 struct platform_device ofdev; /* Open Firmware Device */
454 struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
455 struct ehea_eq *qp_eq;
456 struct work_struct reset_task;
457 struct delayed_work stats_work;
458 struct mutex port_lock;
459 char int_aff_name[EHEA_IRQ_NAME_SIZE];
460 int allmulti; /* Indicates IFF_ALLMULTI state */
461 int promisc; /* Indicates IFF_PROMISC state */
462 int num_mcs;
463 int resets;
464 unsigned long flags;
465 u64 mac_addr;
466 u32 logical_port_id;
467 u32 port_speed;
468 u32 msg_enable;
469 u32 sig_comp_iv;
470 u32 state;
471 u32 lro_max_aggr;
472 u8 phy_link;
473 u8 full_duplex;
474 u8 autoneg;
475 u8 num_def_qps;
476 wait_queue_head_t swqe_avail_wq;
477 wait_queue_head_t restart_wq;
480 struct port_res_cfg {
481 int max_entries_rcq;
482 int max_entries_scq;
483 int max_entries_sq;
484 int max_entries_rq1;
485 int max_entries_rq2;
486 int max_entries_rq3;
489 enum ehea_flag_bits {
490 __EHEA_STOP_XFER,
491 __EHEA_DISABLE_PORT_RESET
494 void ehea_set_ethtool_ops(struct net_device *netdev);
495 int ehea_sense_port_attr(struct ehea_port *port);
496 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
498 #endif /* __EHEA_H__ */