2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * internal queue handling
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #ifndef __IPZ_PT_FN_H__
44 #define __IPZ_PT_FN_H__
46 #define EHCA_PAGESHIFT 12
47 #define EHCA_PAGESIZE 4096UL
48 #define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1))
49 #define EHCA_PT_ENTRIES 512UL
51 #include "ehca_tools.h"
54 /* struct generic ehca page */
56 u8 entries
[EHCA_PAGESIZE
];
59 /* struct generic queue in linux kernel virtual memory (kv) */
61 u64 current_q_offset
; /* current queue entry */
63 struct ipz_page
**queue_pages
; /* array of pages belonging to queue */
64 u32 qe_size
; /* queue entry size */
66 u32 queue_length
; /* queue length allocated in bytes */
68 u32 toggle_state
; /* toggle flag - per page */
69 u32 dummy3
; /* 64 bit alignment */
73 * return current Queue Entry for a certain q_offset
74 * returns address (kv) of Queue Entry
76 static inline void *ipz_qeit_calc(struct ipz_queue
*queue
, u64 q_offset
)
78 struct ipz_page
*current_page
;
79 if (q_offset
>= queue
->queue_length
)
81 current_page
= (queue
->queue_pages
)[q_offset
>> EHCA_PAGESHIFT
];
82 return ¤t_page
->entries
[q_offset
& (EHCA_PAGESIZE
- 1)];
86 * return current Queue Entry
87 * returns address (kv) of Queue Entry
89 static inline void *ipz_qeit_get(struct ipz_queue
*queue
)
91 return ipz_qeit_calc(queue
, queue
->current_q_offset
);
95 * return current Queue Page , increment Queue Page iterator from
96 * page to page in struct ipz_queue, last increment will return 0! and
98 * returns address (kv) of Queue Page
99 * warning don't use in parallel with ipz_QE_get_inc()
101 void *ipz_qpageit_get_inc(struct ipz_queue
*queue
);
104 * return current Queue Entry, increment Queue Entry iterator by one
105 * step in struct ipz_queue, will wrap in ringbuffer
106 * returns address (kv) of Queue Entry BEFORE increment
107 * warning don't use in parallel with ipz_qpageit_get_inc()
108 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
110 static inline void *ipz_qeit_get_inc(struct ipz_queue
*queue
)
112 void *ret
= ipz_qeit_get(queue
);
113 queue
->current_q_offset
+= queue
->qe_size
;
114 if (queue
->current_q_offset
>= queue
->queue_length
) {
115 queue
->current_q_offset
= 0;
116 /* toggle the valid flag */
117 queue
->toggle_state
= (~queue
->toggle_state
) & 1;
124 * return current Queue Entry, increment Queue Entry iterator by one
125 * step in struct ipz_queue, will wrap in ringbuffer
126 * returns address (kv) of Queue Entry BEFORE increment
127 * returns 0 and does not increment, if wrong valid state
128 * warning don't use in parallel with ipz_qpageit_get_inc()
129 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
131 static inline void *ipz_qeit_get_inc_valid(struct ipz_queue
*queue
)
133 struct ehca_cqe
*cqe
= ipz_qeit_get(queue
);
134 u32 cqe_flags
= cqe
->cqe_flags
;
136 if ((cqe_flags
>> 7) != (queue
->toggle_state
& 1))
139 ipz_qeit_get_inc(queue
);
143 static inline int ipz_qeit_is_valid(struct ipz_queue
*queue
)
145 struct ehca_cqe
*cqe
= ipz_qeit_get(queue
);
146 u32 cqe_flags
= cqe
->cqe_flags
;
148 return cqe_flags
>> 7 == (queue
->toggle_state
& 1);
152 * returns and resets Queue Entry iterator
153 * returns address (kv) of first Queue Entry
155 static inline void *ipz_qeit_reset(struct ipz_queue
*queue
)
157 queue
->current_q_offset
= 0;
158 return ipz_qeit_get(queue
);
162 * return the q_offset corresponding to an absolute address
164 int ipz_queue_abs_to_offset(struct ipz_queue
*queue
, u64 addr
, u64
*q_offset
);
167 * return the next queue offset. don't modify the queue.
169 static inline u64
ipz_queue_advance_offset(struct ipz_queue
*queue
, u64 offset
)
171 offset
+= queue
->qe_size
;
172 if (offset
>= queue
->queue_length
) offset
= 0;
176 /* struct generic page table */
178 u64 entries
[EHCA_PT_ENTRIES
];
181 /* struct page table for a queue, only to be used in pf */
183 /* queue page tables (kv), use u64 because we know the element length */
186 u32 n_ptes
; /* number of page table entries */
187 u64
*current_pte_addr
;
191 * constructor for a ipz_queue_t, placement new for ipz_queue_t,
192 * new for all dependent datastructors
193 * all QP Tables are the same
197 * returns true if ok, false if out of memory
199 int ipz_queue_ctor(struct ipz_queue
*queue
, const u32 nr_of_pages
,
200 const u32 pagesize
, const u32 qe_size
,
204 * destructor for a ipz_queue_t
206 * see ipz_queue_ctor()
207 * returns true if ok, false if queue was NULL-ptr of free failed
209 int ipz_queue_dtor(struct ipz_queue
*queue
);
212 * constructor for a ipz_qpt_t,
213 * placement new for struct ipz_queue, new for all dependent datastructors
214 * all QP Tables are the same,
216 * -# allocate+pin queue
218 * -# allocate+pin PTs
219 * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
220 * -# the ring must have room for exactly nr_of_PTEs
223 void ipz_qpt_ctor(struct ipz_qpt
*qpt
,
227 const u8 lowbyte
, const u8 toggle
,
228 u32
* act_nr_of_QEs
, u32
* act_nr_of_pages
);
231 * return current Queue Entry, increment Queue Entry iterator by one
232 * step in struct ipz_queue, will wrap in ringbuffer
233 * returns address (kv) of Queue Entry BEFORE increment
234 * warning don't use in parallel with ipz_qpageit_get_inc()
235 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
236 * fix EQ page problems
238 void *ipz_qeit_eq_get_inc(struct ipz_queue
*queue
);
241 * return current Event Queue Entry, increment Queue Entry iterator
242 * by one step in struct ipz_queue if valid, will wrap in ringbuffer
243 * returns address (kv) of Queue Entry BEFORE increment
244 * returns 0 and does not increment, if wrong valid state
245 * warning don't use in parallel with ipz_queue_QPageit_get_inc()
246 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
248 static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue
*queue
)
250 void *ret
= ipz_qeit_get(queue
);
251 u32 qe
= *(u8
*) ret
;
252 if ((qe
>> 7) != (queue
->toggle_state
& 1))
254 ipz_qeit_eq_get_inc(queue
); /* this is a good one */
258 static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue
*queue
)
260 void *ret
= ipz_qeit_get(queue
);
261 u32 qe
= *(u8
*) ret
;
262 if ((qe
>> 7) != (queue
->toggle_state
& 1))
267 /* returns address (GX) of first queue entry */
268 static inline u64
ipz_qpt_get_firstpage(struct ipz_qpt
*qpt
)
270 return be64_to_cpu(qpt
->qpts
[0]);
273 /* returns address (kv) of first page of queue page table */
274 static inline void *ipz_qpt_get_qpt(struct ipz_qpt
*qpt
)
279 #endif /* __IPZ_PT_FN_H__ */