4 * Setup and helper functions to access QDIO.
6 * Copyright IBM Corporation 2002, 2009
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include "zfcp_qdio.h"
15 #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
17 static int zfcp_qdio_buffers_enqueue(struct qdio_buffer
**sbal
)
21 for (pos
= 0; pos
< QDIO_MAX_BUFFERS_PER_Q
; pos
+= QBUFF_PER_PAGE
) {
22 sbal
[pos
] = (struct qdio_buffer
*) get_zeroed_page(GFP_KERNEL
);
26 for (pos
= 0; pos
< QDIO_MAX_BUFFERS_PER_Q
; pos
++)
27 if (pos
% QBUFF_PER_PAGE
)
28 sbal
[pos
] = sbal
[pos
- 1] + 1;
32 static void zfcp_qdio_handler_error(struct zfcp_qdio
*qdio
, char *id
)
34 struct zfcp_adapter
*adapter
= qdio
->adapter
;
36 dev_warn(&adapter
->ccw_device
->dev
, "A QDIO problem occurred\n");
38 zfcp_erp_adapter_reopen(adapter
,
39 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
|
40 ZFCP_STATUS_COMMON_ERP_FAILED
, id
, NULL
);
43 static void zfcp_qdio_zero_sbals(struct qdio_buffer
*sbal
[], int first
, int cnt
)
47 for (i
= first
; i
< first
+ cnt
; i
++) {
48 sbal_idx
= i
% QDIO_MAX_BUFFERS_PER_Q
;
49 memset(sbal
[sbal_idx
], 0, sizeof(struct qdio_buffer
));
53 /* this needs to be called prior to updating the queue fill level */
54 static inline void zfcp_qdio_account(struct zfcp_qdio
*qdio
)
56 unsigned long long now
, span
;
59 spin_lock(&qdio
->stat_lock
);
60 now
= get_clock_monotonic();
61 span
= (now
- qdio
->req_q_time
) >> 12;
62 free
= atomic_read(&qdio
->req_q
.count
);
63 used
= QDIO_MAX_BUFFERS_PER_Q
- free
;
64 qdio
->req_q_util
+= used
* span
;
65 qdio
->req_q_time
= now
;
66 spin_unlock(&qdio
->stat_lock
);
69 static void zfcp_qdio_int_req(struct ccw_device
*cdev
, unsigned int qdio_err
,
70 int queue_no
, int first
, int count
,
73 struct zfcp_qdio
*qdio
= (struct zfcp_qdio
*) parm
;
74 struct zfcp_qdio_queue
*queue
= &qdio
->req_q
;
76 if (unlikely(qdio_err
)) {
77 zfcp_dbf_hba_qdio(qdio
->adapter
->dbf
, qdio_err
, first
,
79 zfcp_qdio_handler_error(qdio
, "qdireq1");
83 /* cleanup all SBALs being program-owned now */
84 zfcp_qdio_zero_sbals(queue
->sbal
, first
, count
);
86 zfcp_qdio_account(qdio
);
87 atomic_add(count
, &queue
->count
);
88 wake_up(&qdio
->req_q_wq
);
91 static void zfcp_qdio_resp_put_back(struct zfcp_qdio
*qdio
, int processed
)
93 struct zfcp_qdio_queue
*queue
= &qdio
->resp_q
;
94 struct ccw_device
*cdev
= qdio
->adapter
->ccw_device
;
95 u8 count
, start
= queue
->first
;
98 count
= atomic_read(&queue
->count
) + processed
;
100 retval
= do_QDIO(cdev
, QDIO_FLAG_SYNC_INPUT
, 0, start
, count
);
102 if (unlikely(retval
)) {
103 atomic_set(&queue
->count
, count
);
104 zfcp_erp_adapter_reopen(qdio
->adapter
, 0, "qdrpb_1", NULL
);
106 queue
->first
+= count
;
107 queue
->first
%= QDIO_MAX_BUFFERS_PER_Q
;
108 atomic_set(&queue
->count
, 0);
112 static void zfcp_qdio_int_resp(struct ccw_device
*cdev
, unsigned int qdio_err
,
113 int queue_no
, int first
, int count
,
116 struct zfcp_qdio
*qdio
= (struct zfcp_qdio
*) parm
;
117 int sbal_idx
, sbal_no
;
119 if (unlikely(qdio_err
)) {
120 zfcp_dbf_hba_qdio(qdio
->adapter
->dbf
, qdio_err
, first
,
122 zfcp_qdio_handler_error(qdio
, "qdires1");
127 * go through all SBALs from input queue currently
128 * returned by QDIO layer
130 for (sbal_no
= 0; sbal_no
< count
; sbal_no
++) {
131 sbal_idx
= (first
+ sbal_no
) % QDIO_MAX_BUFFERS_PER_Q
;
132 /* go through all SBALEs of SBAL */
133 zfcp_fsf_reqid_check(qdio
, sbal_idx
);
137 * put range of SBALs back to response queue
138 * (including SBALs which have already been free before)
140 zfcp_qdio_resp_put_back(qdio
, count
);
143 static void zfcp_qdio_sbal_limit(struct zfcp_qdio
*qdio
,
144 struct zfcp_qdio_req
*q_req
, int max_sbals
)
146 int count
= atomic_read(&qdio
->req_q
.count
);
147 count
= min(count
, max_sbals
);
148 q_req
->sbal_limit
= (q_req
->sbal_first
+ count
- 1)
149 % QDIO_MAX_BUFFERS_PER_Q
;
152 static struct qdio_buffer_element
*
153 zfcp_qdio_sbal_chain(struct zfcp_qdio
*qdio
, struct zfcp_qdio_req
*q_req
,
154 unsigned long sbtype
)
156 struct qdio_buffer_element
*sbale
;
158 /* set last entry flag in current SBALE of current SBAL */
159 sbale
= zfcp_qdio_sbale_curr(qdio
, q_req
);
160 sbale
->flags
|= SBAL_FLAGS_LAST_ENTRY
;
162 /* don't exceed last allowed SBAL */
163 if (q_req
->sbal_last
== q_req
->sbal_limit
)
166 /* set chaining flag in first SBALE of current SBAL */
167 sbale
= zfcp_qdio_sbale_req(qdio
, q_req
);
168 sbale
->flags
|= SBAL_FLAGS0_MORE_SBALS
;
170 /* calculate index of next SBAL */
172 q_req
->sbal_last
%= QDIO_MAX_BUFFERS_PER_Q
;
174 /* keep this requests number of SBALs up-to-date */
175 q_req
->sbal_number
++;
177 /* start at first SBALE of new SBAL */
178 q_req
->sbale_curr
= 0;
180 /* set storage-block type for new SBAL */
181 sbale
= zfcp_qdio_sbale_curr(qdio
, q_req
);
182 sbale
->flags
|= sbtype
;
187 static struct qdio_buffer_element
*
188 zfcp_qdio_sbale_next(struct zfcp_qdio
*qdio
, struct zfcp_qdio_req
*q_req
,
191 if (q_req
->sbale_curr
== ZFCP_LAST_SBALE_PER_SBAL
)
192 return zfcp_qdio_sbal_chain(qdio
, q_req
, sbtype
);
194 return zfcp_qdio_sbale_curr(qdio
, q_req
);
197 static void zfcp_qdio_undo_sbals(struct zfcp_qdio
*qdio
,
198 struct zfcp_qdio_req
*q_req
)
200 struct qdio_buffer
**sbal
= qdio
->req_q
.sbal
;
201 int first
= q_req
->sbal_first
;
202 int last
= q_req
->sbal_last
;
203 int count
= (last
- first
+ QDIO_MAX_BUFFERS_PER_Q
) %
204 QDIO_MAX_BUFFERS_PER_Q
+ 1;
205 zfcp_qdio_zero_sbals(sbal
, first
, count
);
208 static int zfcp_qdio_fill_sbals(struct zfcp_qdio
*qdio
,
209 struct zfcp_qdio_req
*q_req
,
210 unsigned int sbtype
, void *start_addr
,
211 unsigned int total_length
)
213 struct qdio_buffer_element
*sbale
;
214 unsigned long remaining
, length
;
217 /* split segment up */
218 for (addr
= start_addr
, remaining
= total_length
; remaining
> 0;
219 addr
+= length
, remaining
-= length
) {
220 sbale
= zfcp_qdio_sbale_next(qdio
, q_req
, sbtype
);
222 atomic_inc(&qdio
->req_q_full
);
223 zfcp_qdio_undo_sbals(qdio
, q_req
);
227 /* new piece must not exceed next page boundary */
228 length
= min(remaining
,
229 (PAGE_SIZE
- ((unsigned long)addr
&
232 sbale
->length
= length
;
238 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
239 * @fsf_req: request to be processed
240 * @sbtype: SBALE flags
241 * @sg: scatter-gather list
242 * @max_sbals: upper bound for number of SBALs to be used
243 * Returns: number of bytes, or error (negativ)
245 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio
*qdio
, struct zfcp_qdio_req
*q_req
,
246 unsigned long sbtype
, struct scatterlist
*sg
,
249 struct qdio_buffer_element
*sbale
;
250 int retval
, bytes
= 0;
252 /* figure out last allowed SBAL */
253 zfcp_qdio_sbal_limit(qdio
, q_req
, max_sbals
);
255 /* set storage-block type for this request */
256 sbale
= zfcp_qdio_sbale_req(qdio
, q_req
);
257 sbale
->flags
|= sbtype
;
259 for (; sg
; sg
= sg_next(sg
)) {
260 retval
= zfcp_qdio_fill_sbals(qdio
, q_req
, sbtype
,
261 sg_virt(sg
), sg
->length
);
267 /* assume that no other SBALEs are to follow in the same SBAL */
268 sbale
= zfcp_qdio_sbale_curr(qdio
, q_req
);
269 sbale
->flags
|= SBAL_FLAGS_LAST_ENTRY
;
275 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
276 * @qdio: pointer to struct zfcp_qdio
277 * @q_req: pointer to struct zfcp_qdio_req
278 * Returns: 0 on success, error otherwise
280 int zfcp_qdio_send(struct zfcp_qdio
*qdio
, struct zfcp_qdio_req
*q_req
)
282 struct zfcp_qdio_queue
*req_q
= &qdio
->req_q
;
283 int first
= q_req
->sbal_first
;
284 int count
= q_req
->sbal_number
;
286 unsigned int qdio_flags
= QDIO_FLAG_SYNC_OUTPUT
;
288 zfcp_qdio_account(qdio
);
290 retval
= do_QDIO(qdio
->adapter
->ccw_device
, qdio_flags
, 0, first
,
292 if (unlikely(retval
)) {
293 zfcp_qdio_zero_sbals(req_q
->sbal
, first
, count
);
297 /* account for transferred buffers */
298 atomic_sub(count
, &req_q
->count
);
299 req_q
->first
+= count
;
300 req_q
->first
%= QDIO_MAX_BUFFERS_PER_Q
;
305 static void zfcp_qdio_setup_init_data(struct qdio_initialize
*id
,
306 struct zfcp_qdio
*qdio
)
309 id
->cdev
= qdio
->adapter
->ccw_device
;
310 id
->q_format
= QDIO_ZFCP_QFMT
;
311 memcpy(id
->adapter_name
, dev_name(&id
->cdev
->dev
), 8);
312 ASCEBC(id
->adapter_name
, 8);
313 id
->qib_param_field_format
= 0;
314 id
->qib_param_field
= NULL
;
315 id
->input_slib_elements
= NULL
;
316 id
->output_slib_elements
= NULL
;
318 id
->no_output_qs
= 1;
319 id
->input_handler
= zfcp_qdio_int_resp
;
320 id
->output_handler
= zfcp_qdio_int_req
;
321 id
->int_parm
= (unsigned long) qdio
;
322 id
->input_sbal_addr_array
= (void **) (qdio
->resp_q
.sbal
);
323 id
->output_sbal_addr_array
= (void **) (qdio
->req_q
.sbal
);
327 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
328 * @adapter: pointer to struct zfcp_adapter
329 * Returns: -ENOMEM on memory allocation error or return value from
332 static int zfcp_qdio_allocate(struct zfcp_qdio
*qdio
)
334 struct qdio_initialize init_data
;
336 if (zfcp_qdio_buffers_enqueue(qdio
->req_q
.sbal
) ||
337 zfcp_qdio_buffers_enqueue(qdio
->resp_q
.sbal
))
340 zfcp_qdio_setup_init_data(&init_data
, qdio
);
342 return qdio_allocate(&init_data
);
346 * zfcp_close_qdio - close qdio queues for an adapter
347 * @qdio: pointer to structure zfcp_qdio
349 void zfcp_qdio_close(struct zfcp_qdio
*qdio
)
351 struct zfcp_qdio_queue
*req_q
;
354 if (!(atomic_read(&qdio
->adapter
->status
) & ZFCP_STATUS_ADAPTER_QDIOUP
))
357 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
358 req_q
= &qdio
->req_q
;
359 spin_lock_bh(&qdio
->req_q_lock
);
360 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP
, &qdio
->adapter
->status
);
361 spin_unlock_bh(&qdio
->req_q_lock
);
363 qdio_shutdown(qdio
->adapter
->ccw_device
,
364 QDIO_FLAG_CLEANUP_USING_CLEAR
);
366 /* cleanup used outbound sbals */
367 count
= atomic_read(&req_q
->count
);
368 if (count
< QDIO_MAX_BUFFERS_PER_Q
) {
369 first
= (req_q
->first
+ count
) % QDIO_MAX_BUFFERS_PER_Q
;
370 count
= QDIO_MAX_BUFFERS_PER_Q
- count
;
371 zfcp_qdio_zero_sbals(req_q
->sbal
, first
, count
);
374 atomic_set(&req_q
->count
, 0);
375 qdio
->resp_q
.first
= 0;
376 atomic_set(&qdio
->resp_q
.count
, 0);
380 * zfcp_qdio_open - prepare and initialize response queue
381 * @qdio: pointer to struct zfcp_qdio
382 * Returns: 0 on success, otherwise -EIO
384 int zfcp_qdio_open(struct zfcp_qdio
*qdio
)
386 struct qdio_buffer_element
*sbale
;
387 struct qdio_initialize init_data
;
388 struct ccw_device
*cdev
= qdio
->adapter
->ccw_device
;
391 if (atomic_read(&qdio
->adapter
->status
) & ZFCP_STATUS_ADAPTER_QDIOUP
)
394 zfcp_qdio_setup_init_data(&init_data
, qdio
);
396 if (qdio_establish(&init_data
))
397 goto failed_establish
;
399 if (qdio_activate(cdev
))
402 for (cc
= 0; cc
< QDIO_MAX_BUFFERS_PER_Q
; cc
++) {
403 sbale
= &(qdio
->resp_q
.sbal
[cc
]->element
[0]);
405 sbale
->flags
= SBAL_FLAGS_LAST_ENTRY
;
409 if (do_QDIO(cdev
, QDIO_FLAG_SYNC_INPUT
, 0, 0,
410 QDIO_MAX_BUFFERS_PER_Q
))
413 /* set index of first avalable SBALS / number of available SBALS */
414 qdio
->req_q
.first
= 0;
415 atomic_set(&qdio
->req_q
.count
, QDIO_MAX_BUFFERS_PER_Q
);
420 qdio_shutdown(cdev
, QDIO_FLAG_CLEANUP_USING_CLEAR
);
423 "Setting up the QDIO connection to the FCP adapter failed\n");
427 void zfcp_qdio_destroy(struct zfcp_qdio
*qdio
)
429 struct qdio_buffer
**sbal_req
, **sbal_resp
;
435 if (qdio
->adapter
->ccw_device
)
436 qdio_free(qdio
->adapter
->ccw_device
);
438 sbal_req
= qdio
->req_q
.sbal
;
439 sbal_resp
= qdio
->resp_q
.sbal
;
441 for (p
= 0; p
< QDIO_MAX_BUFFERS_PER_Q
; p
+= QBUFF_PER_PAGE
) {
442 free_page((unsigned long) sbal_req
[p
]);
443 free_page((unsigned long) sbal_resp
[p
]);
449 int zfcp_qdio_setup(struct zfcp_adapter
*adapter
)
451 struct zfcp_qdio
*qdio
;
453 qdio
= kzalloc(sizeof(struct zfcp_qdio
), GFP_KERNEL
);
457 qdio
->adapter
= adapter
;
459 if (zfcp_qdio_allocate(qdio
)) {
460 zfcp_qdio_destroy(qdio
);
464 spin_lock_init(&qdio
->req_q_lock
);
465 spin_lock_init(&qdio
->stat_lock
);
467 adapter
->qdio
= qdio
;