Fix dnotify/close race
[linux-2.6.22.y-op.git] / drivers / net / ehea / ehea_qmr.h
blobc0eb3e03a1021ddd4273bd473b9715fab603403c
1 /*
2 * linux/drivers/net/ehea/ehea_qmr.h
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #ifndef __EHEA_QMR_H__
30 #define __EHEA_QMR_H__
32 #include "ehea.h"
33 #include "ehea_hw.h"
36 * page size of ehea hardware queues
39 #define EHEA_PAGESHIFT 12
40 #define EHEA_PAGESIZE 4096UL
42 /* Some abbreviations used here:
44 * WQE - Work Queue Entry
45 * SWQE - Send Work Queue Entry
46 * RWQE - Receive Work Queue Entry
47 * CQE - Completion Queue Entry
48 * EQE - Event Queue Entry
49 * MR - Memory Region
52 /* Use of WR_ID field for EHEA */
53 #define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
54 #define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
55 #define EHEA_SWQE2_TYPE 0x1
56 #define EHEA_SWQE3_TYPE 0x2
57 #define EHEA_RWQE2_TYPE 0x3
58 #define EHEA_RWQE3_TYPE 0x4
59 #define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
60 #define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
62 struct ehea_vsgentry {
63 u64 vaddr;
64 u32 l_key;
65 u32 len;
68 /* maximum number of sg entries allowed in a WQE */
69 #define EHEA_MAX_WQE_SG_ENTRIES 252
70 #define SWQE2_MAX_IMM (0xD0 - 0x30)
71 #define SWQE3_MAX_IMM 224
73 /* tx control flags for swqe */
74 #define EHEA_SWQE_CRC 0x8000
75 #define EHEA_SWQE_IP_CHECKSUM 0x4000
76 #define EHEA_SWQE_TCP_CHECKSUM 0x2000
77 #define EHEA_SWQE_TSO 0x1000
78 #define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
79 #define EHEA_SWQE_VLAN_INSERT 0x0400
80 #define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
81 #define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
82 #define EHEA_SWQE_WRAP_CTL_REC 0x0080
83 #define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
84 #define EHEA_SWQE_BIND 0x0020
85 #define EHEA_SWQE_PURGE 0x0010
87 /* sizeof(struct ehea_swqe) less the union */
88 #define SWQE_HEADER_SIZE 32
90 struct ehea_swqe {
91 u64 wr_id;
92 u16 tx_control;
93 u16 vlan_tag;
94 u8 reserved1;
95 u8 ip_start;
96 u8 ip_end;
97 u8 immediate_data_length;
98 u8 tcp_offset;
99 u8 reserved2;
100 u16 tcp_end;
101 u8 wrap_tag;
102 u8 descriptors; /* number of valid descriptors in WQE */
103 u16 reserved3;
104 u16 reserved4;
105 u16 mss;
106 u32 reserved5;
107 union {
108 /* Send WQE Format 1 */
109 struct {
110 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
111 } no_immediate_data;
113 /* Send WQE Format 2 */
114 struct {
115 struct ehea_vsgentry sg_entry;
116 /* 0x30 */
117 u8 immediate_data[SWQE2_MAX_IMM];
118 /* 0xd0 */
119 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
120 } immdata_desc __attribute__ ((packed));
122 /* Send WQE Format 3 */
123 struct {
124 u8 immediate_data[SWQE3_MAX_IMM];
125 } immdata_nodesc;
126 } u;
129 struct ehea_rwqe {
130 u64 wr_id; /* work request ID */
131 u8 reserved1[5];
132 u8 data_segments;
133 u16 reserved2;
134 u64 reserved3;
135 u64 reserved4;
136 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
139 #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
141 #define EHEA_CQE_TYPE_RQ 0x60
142 #define EHEA_CQE_STAT_ERR_MASK 0x721F
143 #define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F
144 #define EHEA_CQE_STAT_ERR_TCP 0x4000
145 #define EHEA_CQE_STAT_ERR_IP 0x2000
146 #define EHEA_CQE_STAT_ERR_CRC 0x1000
148 struct ehea_cqe {
149 u64 wr_id; /* work request ID from WQE */
150 u8 type;
151 u8 valid;
152 u16 status;
153 u16 reserved1;
154 u16 num_bytes_transfered;
155 u16 vlan_tag;
156 u16 inet_checksum_value;
157 u8 reserved2;
158 u8 header_length;
159 u16 reserved3;
160 u16 page_offset;
161 u16 wqe_count;
162 u32 qp_token;
163 u32 timestamp;
164 u32 reserved4;
165 u64 reserved5[3];
168 #define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
169 #define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
170 #define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
171 #define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
172 #define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
173 #define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
174 #define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
175 #define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
176 #define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
177 #define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
178 #define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
179 #define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
181 struct ehea_eqe {
182 u64 entry;
185 #define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52,63)
186 #define ERROR_DATA_TYPE EHEA_BMASK_IBM(0,7)
188 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
190 struct ehea_page *current_page;
192 if (q_offset >= queue->queue_length)
193 q_offset -= queue->queue_length;
194 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
195 return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
198 static inline void *hw_qeit_get(struct hw_queue *queue)
200 return hw_qeit_calc(queue, queue->current_q_offset);
203 static inline void hw_qeit_inc(struct hw_queue *queue)
205 queue->current_q_offset += queue->qe_size;
206 if (queue->current_q_offset >= queue->queue_length) {
207 queue->current_q_offset = 0;
208 /* toggle the valid flag */
209 queue->toggle_state = (~queue->toggle_state) & 1;
213 static inline void *hw_qeit_get_inc(struct hw_queue *queue)
215 void *retvalue = hw_qeit_get(queue);
216 hw_qeit_inc(queue);
217 return retvalue;
220 static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
222 struct ehea_cqe *retvalue = hw_qeit_get(queue);
223 u8 valid = retvalue->valid;
224 void *pref;
226 if ((valid >> 7) == (queue->toggle_state & 1)) {
227 /* this is a good one */
228 hw_qeit_inc(queue);
229 pref = hw_qeit_calc(queue, queue->current_q_offset);
230 prefetch(pref);
231 prefetch(pref + 128);
232 } else
233 retvalue = NULL;
234 return retvalue;
237 static inline void *hw_qeit_get_valid(struct hw_queue *queue)
239 struct ehea_cqe *retvalue = hw_qeit_get(queue);
240 void *pref;
241 u8 valid;
243 pref = hw_qeit_calc(queue, queue->current_q_offset);
244 prefetch(pref);
245 prefetch(pref + 128);
246 prefetch(pref + 256);
247 valid = retvalue->valid;
248 if (!((valid >> 7) == (queue->toggle_state & 1)))
249 retvalue = NULL;
250 return retvalue;
253 static inline void *hw_qeit_reset(struct hw_queue *queue)
255 queue->current_q_offset = 0;
256 return hw_qeit_get(queue);
259 static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
261 u64 last_entry_in_q = queue->queue_length - queue->qe_size;
262 void *retvalue;
264 retvalue = hw_qeit_get(queue);
265 queue->current_q_offset += queue->qe_size;
266 if (queue->current_q_offset > last_entry_in_q) {
267 queue->current_q_offset = 0;
268 queue->toggle_state = (~queue->toggle_state) & 1;
270 return retvalue;
273 static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
275 void *retvalue = hw_qeit_get(queue);
276 u32 qe = *(u8*)retvalue;
277 if ((qe >> 7) == (queue->toggle_state & 1))
278 hw_qeit_eq_get_inc(queue);
279 else
280 retvalue = NULL;
281 return retvalue;
284 static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
285 int rq_nr)
287 struct hw_queue *queue;
289 if (rq_nr == 1)
290 queue = &qp->hw_rqueue1;
291 else if (rq_nr == 2)
292 queue = &qp->hw_rqueue2;
293 else
294 queue = &qp->hw_rqueue3;
296 return hw_qeit_get_inc(queue);
299 static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
300 int *wqe_index)
302 struct hw_queue *queue = &my_qp->hw_squeue;
303 struct ehea_swqe *wqe_p;
305 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
306 wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
308 return wqe_p;
311 static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
313 iosync();
314 ehea_update_sqa(my_qp, 1);
317 static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
319 struct hw_queue *queue = &qp->hw_rqueue1;
321 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
322 return hw_qeit_get_valid(queue);
325 static inline void ehea_inc_cq(struct ehea_cq *cq)
327 hw_qeit_inc(&cq->hw_queue);
330 static inline void ehea_inc_rq1(struct ehea_qp *qp)
332 hw_qeit_inc(&qp->hw_rqueue1);
335 static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
337 return hw_qeit_get_valid(&my_cq->hw_queue);
340 #define EHEA_CQ_REGISTER_ORIG 0
341 #define EHEA_EQ_REGISTER_ORIG 0
343 enum ehea_eq_type {
344 EHEA_EQ = 0, /* event queue */
345 EHEA_NEQ /* notification event queue */
348 struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
349 enum ehea_eq_type type,
350 const u32 length, const u8 eqe_gen);
352 int ehea_destroy_eq(struct ehea_eq *eq);
354 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
356 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
357 u64 eq_handle, u32 cq_token);
359 int ehea_destroy_cq(struct ehea_cq *cq);
361 struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
362 struct ehea_qp_init_attr *init_attr);
364 int ehea_destroy_qp(struct ehea_qp *qp);
366 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
368 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
369 struct ehea_mr *shared_mr);
371 int ehea_rem_mr(struct ehea_mr *mr);
373 void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
375 #endif /* __EHEA_QMR_H__ */