5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/completion.h>
26 #include <linux/slab.h>
28 #include "rsxx_priv.h"
30 #define CREG_TIMEOUT_MSEC 10000
32 typedef void (*creg_cmd_cb
)(struct rsxx_cardinfo
*card
,
37 struct list_head list
;
48 static struct kmem_cache
*creg_cmd_pool
;
51 /*------------ Private Functions --------------*/
53 #if defined(__LITTLE_ENDIAN)
54 #define LITTLE_ENDIAN 1
55 #elif defined(__BIG_ENDIAN)
56 #define LITTLE_ENDIAN 0
58 #error Unknown endianess!!! Aborting...
61 static int copy_to_creg_data(struct rsxx_cardinfo
*card
,
69 if (unlikely(card
->eeh_state
))
72 for (i
= 0; cnt8
> 0; i
++, cnt8
-= 4) {
74 * Firmware implementation makes it necessary to byte swap on
75 * little endian processors.
77 if (LITTLE_ENDIAN
&& stream
)
78 iowrite32be(data
[i
], card
->regmap
+ CREG_DATA(i
));
80 iowrite32(data
[i
], card
->regmap
+ CREG_DATA(i
));
87 static int copy_from_creg_data(struct rsxx_cardinfo
*card
,
95 if (unlikely(card
->eeh_state
))
98 for (i
= 0; cnt8
> 0; i
++, cnt8
-= 4) {
100 * Firmware implementation makes it necessary to byte swap on
101 * little endian processors.
103 if (LITTLE_ENDIAN
&& stream
)
104 data
[i
] = ioread32be(card
->regmap
+ CREG_DATA(i
));
106 data
[i
] = ioread32(card
->regmap
+ CREG_DATA(i
));
112 static void creg_issue_cmd(struct rsxx_cardinfo
*card
, struct creg_cmd
*cmd
)
116 if (unlikely(card
->eeh_state
))
119 iowrite32(cmd
->addr
, card
->regmap
+ CREG_ADD
);
120 iowrite32(cmd
->cnt8
, card
->regmap
+ CREG_CNT
);
122 if (cmd
->op
== CREG_OP_WRITE
) {
124 st
= copy_to_creg_data(card
, cmd
->cnt8
,
125 cmd
->buf
, cmd
->stream
);
131 if (unlikely(card
->eeh_state
))
134 /* Setting the valid bit will kick off the command. */
135 iowrite32(cmd
->op
, card
->regmap
+ CREG_CMD
);
138 static void creg_kick_queue(struct rsxx_cardinfo
*card
)
140 if (card
->creg_ctrl
.active
|| list_empty(&card
->creg_ctrl
.queue
))
143 card
->creg_ctrl
.active
= 1;
144 card
->creg_ctrl
.active_cmd
= list_first_entry(&card
->creg_ctrl
.queue
,
145 struct creg_cmd
, list
);
146 list_del(&card
->creg_ctrl
.active_cmd
->list
);
147 card
->creg_ctrl
.q_depth
--;
150 * We have to set the timer before we push the new command. Otherwise,
151 * we could create a race condition that would occur if the timer
152 * was not canceled, and expired after the new command was pushed,
153 * but before the command was issued to hardware.
155 mod_timer(&card
->creg_ctrl
.cmd_timer
,
156 jiffies
+ msecs_to_jiffies(CREG_TIMEOUT_MSEC
));
158 creg_issue_cmd(card
, card
->creg_ctrl
.active_cmd
);
161 static int creg_queue_cmd(struct rsxx_cardinfo
*card
,
167 creg_cmd_cb callback
,
170 struct creg_cmd
*cmd
;
172 /* Don't queue stuff up if we're halted. */
173 if (unlikely(card
->halt
))
176 if (card
->creg_ctrl
.reset
)
179 if (cnt8
> MAX_CREG_DATA8
)
182 cmd
= kmem_cache_alloc(creg_cmd_pool
, GFP_KERNEL
);
186 INIT_LIST_HEAD(&cmd
->list
);
192 cmd
->stream
= stream
;
194 cmd
->cb_private
= cb_private
;
197 spin_lock_bh(&card
->creg_ctrl
.lock
);
198 list_add_tail(&cmd
->list
, &card
->creg_ctrl
.queue
);
199 card
->creg_ctrl
.q_depth
++;
200 creg_kick_queue(card
);
201 spin_unlock_bh(&card
->creg_ctrl
.lock
);
206 static void creg_cmd_timed_out(unsigned long data
)
208 struct rsxx_cardinfo
*card
= (struct rsxx_cardinfo
*) data
;
209 struct creg_cmd
*cmd
;
211 spin_lock(&card
->creg_ctrl
.lock
);
212 cmd
= card
->creg_ctrl
.active_cmd
;
213 card
->creg_ctrl
.active_cmd
= NULL
;
214 spin_unlock(&card
->creg_ctrl
.lock
);
217 card
->creg_ctrl
.creg_stats
.creg_timeout
++;
218 dev_warn(CARD_TO_DEV(card
),
219 "No active command associated with timeout!\n");
224 cmd
->cb(card
, cmd
, -ETIMEDOUT
);
226 kmem_cache_free(creg_cmd_pool
, cmd
);
229 spin_lock(&card
->creg_ctrl
.lock
);
230 card
->creg_ctrl
.active
= 0;
231 creg_kick_queue(card
);
232 spin_unlock(&card
->creg_ctrl
.lock
);
236 static void creg_cmd_done(struct work_struct
*work
)
238 struct rsxx_cardinfo
*card
;
239 struct creg_cmd
*cmd
;
242 card
= container_of(work
, struct rsxx_cardinfo
,
243 creg_ctrl
.done_work
);
246 * The timer could not be cancelled for some reason,
247 * race to pop the active command.
249 if (del_timer_sync(&card
->creg_ctrl
.cmd_timer
) == 0)
250 card
->creg_ctrl
.creg_stats
.failed_cancel_timer
++;
252 spin_lock_bh(&card
->creg_ctrl
.lock
);
253 cmd
= card
->creg_ctrl
.active_cmd
;
254 card
->creg_ctrl
.active_cmd
= NULL
;
255 spin_unlock_bh(&card
->creg_ctrl
.lock
);
258 dev_err(CARD_TO_DEV(card
),
259 "Spurious creg interrupt!\n");
263 card
->creg_ctrl
.creg_stats
.stat
= ioread32(card
->regmap
+ CREG_STAT
);
264 cmd
->status
= card
->creg_ctrl
.creg_stats
.stat
;
265 if ((cmd
->status
& CREG_STAT_STATUS_MASK
) == 0) {
266 dev_err(CARD_TO_DEV(card
),
267 "Invalid status on creg command\n");
269 * At this point we're probably reading garbage from HW. Don't
270 * do anything else that could mess up the system and let
271 * the sync function return an error.
275 } else if (cmd
->status
& CREG_STAT_ERROR
) {
279 if ((cmd
->op
== CREG_OP_READ
)) {
280 unsigned int cnt8
= ioread32(card
->regmap
+ CREG_CNT
);
282 /* Paranoid Sanity Checks */
284 dev_err(CARD_TO_DEV(card
),
285 "Buffer not given for read.\n");
289 if (cnt8
!= cmd
->cnt8
) {
290 dev_err(CARD_TO_DEV(card
),
296 st
= copy_from_creg_data(card
, cnt8
, cmd
->buf
, cmd
->stream
);
301 cmd
->cb(card
, cmd
, st
);
303 kmem_cache_free(creg_cmd_pool
, cmd
);
305 spin_lock_bh(&card
->creg_ctrl
.lock
);
306 card
->creg_ctrl
.active
= 0;
307 creg_kick_queue(card
);
308 spin_unlock_bh(&card
->creg_ctrl
.lock
);
311 static void creg_reset(struct rsxx_cardinfo
*card
)
313 struct creg_cmd
*cmd
= NULL
;
314 struct creg_cmd
*tmp
;
318 * mutex_trylock is used here because if reset_lock is taken then a
319 * reset is already happening. So, we can just go ahead and return.
321 if (!mutex_trylock(&card
->creg_ctrl
.reset_lock
))
324 card
->creg_ctrl
.reset
= 1;
325 spin_lock_irqsave(&card
->irq_lock
, flags
);
326 rsxx_disable_ier_and_isr(card
, CR_INTR_CREG
| CR_INTR_EVENT
);
327 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
329 dev_warn(CARD_TO_DEV(card
),
330 "Resetting creg interface for recovery\n");
332 /* Cancel outstanding commands */
333 spin_lock_bh(&card
->creg_ctrl
.lock
);
334 list_for_each_entry_safe(cmd
, tmp
, &card
->creg_ctrl
.queue
, list
) {
335 list_del(&cmd
->list
);
336 card
->creg_ctrl
.q_depth
--;
338 cmd
->cb(card
, cmd
, -ECANCELED
);
339 kmem_cache_free(creg_cmd_pool
, cmd
);
342 cmd
= card
->creg_ctrl
.active_cmd
;
343 card
->creg_ctrl
.active_cmd
= NULL
;
345 if (timer_pending(&card
->creg_ctrl
.cmd_timer
))
346 del_timer_sync(&card
->creg_ctrl
.cmd_timer
);
349 cmd
->cb(card
, cmd
, -ECANCELED
);
350 kmem_cache_free(creg_cmd_pool
, cmd
);
352 card
->creg_ctrl
.active
= 0;
354 spin_unlock_bh(&card
->creg_ctrl
.lock
);
356 card
->creg_ctrl
.reset
= 0;
357 spin_lock_irqsave(&card
->irq_lock
, flags
);
358 rsxx_enable_ier_and_isr(card
, CR_INTR_CREG
| CR_INTR_EVENT
);
359 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
361 mutex_unlock(&card
->creg_ctrl
.reset_lock
);
364 /* Used for synchronous accesses */
365 struct creg_completion
{
366 struct completion
*cmd_done
;
371 static void creg_cmd_done_cb(struct rsxx_cardinfo
*card
,
372 struct creg_cmd
*cmd
,
375 struct creg_completion
*cmd_completion
;
377 cmd_completion
= cmd
->cb_private
;
378 BUG_ON(!cmd_completion
);
380 cmd_completion
->st
= st
;
381 cmd_completion
->creg_status
= cmd
->status
;
382 complete(cmd_completion
->cmd_done
);
385 static int __issue_creg_rw(struct rsxx_cardinfo
*card
,
391 unsigned int *hw_stat
)
393 DECLARE_COMPLETION_ONSTACK(cmd_done
);
394 struct creg_completion completion
;
395 unsigned long timeout
;
398 completion
.cmd_done
= &cmd_done
;
400 completion
.creg_status
= 0;
402 st
= creg_queue_cmd(card
, op
, addr
, cnt8
, buf
, stream
, creg_cmd_done_cb
,
408 * This timeout is necessary for unresponsive hardware. The additional
409 * 20 seconds to used to guarantee that each cregs requests has time to
412 timeout
= msecs_to_jiffies(CREG_TIMEOUT_MSEC
*
413 card
->creg_ctrl
.q_depth
+ 20000);
416 * The creg interface is guaranteed to complete. It has a timeout
417 * mechanism that will kick in if hardware does not respond.
419 st
= wait_for_completion_timeout(completion
.cmd_done
, timeout
);
422 * This is really bad, because the kernel timer did not
423 * expire and notify us of a timeout!
425 dev_crit(CARD_TO_DEV(card
),
426 "cregs timer failed\n");
431 *hw_stat
= completion
.creg_status
;
434 dev_warn(CARD_TO_DEV(card
),
435 "creg command failed(%d x%08x)\n",
436 completion
.st
, addr
);
437 return completion
.st
;
443 static int issue_creg_rw(struct rsxx_cardinfo
*card
,
450 unsigned int hw_stat
;
455 op
= read
? CREG_OP_READ
: CREG_OP_WRITE
;
458 xfer
= min_t(unsigned int, size8
, MAX_CREG_DATA8
);
460 st
= __issue_creg_rw(card
, op
, addr
, xfer
,
461 data
, stream
, &hw_stat
);
465 data
= (char *)data
+ xfer
;
473 /* ---------------------------- Public API ---------------------------------- */
474 int rsxx_creg_write(struct rsxx_cardinfo
*card
,
480 return issue_creg_rw(card
, addr
, size8
, data
, byte_stream
, 0);
483 int rsxx_creg_read(struct rsxx_cardinfo
*card
,
489 return issue_creg_rw(card
, addr
, size8
, data
, byte_stream
, 1);
492 int rsxx_get_card_state(struct rsxx_cardinfo
*card
, unsigned int *state
)
494 return rsxx_creg_read(card
, CREG_ADD_CARD_STATE
,
495 sizeof(*state
), state
, 0);
498 int rsxx_get_card_size8(struct rsxx_cardinfo
*card
, u64
*size8
)
503 st
= rsxx_creg_read(card
, CREG_ADD_CARD_SIZE
,
504 sizeof(size
), &size
, 0);
508 *size8
= (u64
)size
* RSXX_HW_BLK_SIZE
;
512 int rsxx_get_num_targets(struct rsxx_cardinfo
*card
,
513 unsigned int *n_targets
)
515 return rsxx_creg_read(card
, CREG_ADD_NUM_TARGETS
,
516 sizeof(*n_targets
), n_targets
, 0);
519 int rsxx_get_card_capabilities(struct rsxx_cardinfo
*card
,
522 return rsxx_creg_read(card
, CREG_ADD_CAPABILITIES
,
523 sizeof(*capabilities
), capabilities
, 0);
526 int rsxx_issue_card_cmd(struct rsxx_cardinfo
*card
, u32 cmd
)
528 return rsxx_creg_write(card
, CREG_ADD_CARD_CMD
,
529 sizeof(cmd
), &cmd
, 0);
533 /*----------------- HW Log Functions -------------------*/
534 static void hw_log_msg(struct rsxx_cardinfo
*card
, const char *str
, int len
)
539 * New messages start with "<#>", where # is the log level. Messages
540 * that extend past the log buffer will use the previous level
542 if ((len
> 3) && (str
[0] == '<') && (str
[2] == '>')) {
544 str
+= 3; /* Skip past the log level. */
550 dev_emerg(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
553 dev_alert(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
556 dev_crit(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
559 dev_err(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
562 dev_warn(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
565 dev_notice(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
568 dev_info(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
571 dev_dbg(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
574 dev_info(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
580 * The substrncpy function copies the src string (which includes the
581 * terminating '\0' character), up to the count into the dest pointer.
582 * Returns the number of bytes copied to dest.
584 static int substrncpy(char *dest
, const char *src
, int count
)
596 return max_cnt
- count
;
600 static void read_hw_log_done(struct rsxx_cardinfo
*card
,
601 struct creg_cmd
*cmd
,
613 /* Failed getting the log message */
617 while (off
< cmd
->cnt8
) {
618 log_str
= &card
->log
.buf
[card
->log
.buf_len
];
619 cnt
= min(cmd
->cnt8
- off
, LOG_BUF_SIZE8
- card
->log
.buf_len
);
620 len
= substrncpy(log_str
, &buf
[off
], cnt
);
623 card
->log
.buf_len
+= len
;
626 * Flush the log if we've hit the end of a message or if we've
627 * run out of buffer space.
629 if ((log_str
[len
- 1] == '\0') ||
630 (card
->log
.buf_len
== LOG_BUF_SIZE8
)) {
631 if (card
->log
.buf_len
!= 1) /* Don't log blank lines. */
632 hw_log_msg(card
, card
->log
.buf
,
634 card
->log
.buf_len
= 0;
639 if (cmd
->status
& CREG_STAT_LOG_PENDING
)
640 rsxx_read_hw_log(card
);
643 int rsxx_read_hw_log(struct rsxx_cardinfo
*card
)
647 st
= creg_queue_cmd(card
, CREG_OP_READ
, CREG_ADD_LOG
,
648 sizeof(card
->log
.tmp
), card
->log
.tmp
,
649 1, read_hw_log_done
, NULL
);
651 dev_err(CARD_TO_DEV(card
),
652 "Failed getting log text\n");
657 /*-------------- IOCTL REG Access ------------------*/
658 static int issue_reg_cmd(struct rsxx_cardinfo
*card
,
659 struct rsxx_reg_access
*cmd
,
662 unsigned int op
= read
? CREG_OP_READ
: CREG_OP_WRITE
;
664 return __issue_creg_rw(card
, op
, cmd
->addr
, cmd
->cnt
, cmd
->data
,
665 cmd
->stream
, &cmd
->stat
);
668 int rsxx_reg_access(struct rsxx_cardinfo
*card
,
669 struct rsxx_reg_access __user
*ucmd
,
672 struct rsxx_reg_access cmd
;
675 st
= copy_from_user(&cmd
, ucmd
, sizeof(cmd
));
679 if (cmd
.cnt
> RSXX_MAX_REG_CNT
)
682 st
= issue_reg_cmd(card
, &cmd
, read
);
686 st
= put_user(cmd
.stat
, &ucmd
->stat
);
691 st
= copy_to_user(ucmd
->data
, cmd
.data
, cmd
.cnt
);
699 void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo
*card
)
701 struct creg_cmd
*cmd
= NULL
;
703 cmd
= card
->creg_ctrl
.active_cmd
;
704 card
->creg_ctrl
.active_cmd
= NULL
;
707 del_timer_sync(&card
->creg_ctrl
.cmd_timer
);
709 spin_lock_bh(&card
->creg_ctrl
.lock
);
710 list_add(&cmd
->list
, &card
->creg_ctrl
.queue
);
711 card
->creg_ctrl
.q_depth
++;
712 card
->creg_ctrl
.active
= 0;
713 spin_unlock_bh(&card
->creg_ctrl
.lock
);
717 void rsxx_kick_creg_queue(struct rsxx_cardinfo
*card
)
719 spin_lock_bh(&card
->creg_ctrl
.lock
);
720 if (!list_empty(&card
->creg_ctrl
.queue
))
721 creg_kick_queue(card
);
722 spin_unlock_bh(&card
->creg_ctrl
.lock
);
725 /*------------ Initialization & Setup --------------*/
726 int rsxx_creg_setup(struct rsxx_cardinfo
*card
)
728 card
->creg_ctrl
.active_cmd
= NULL
;
730 INIT_WORK(&card
->creg_ctrl
.done_work
, creg_cmd_done
);
731 mutex_init(&card
->creg_ctrl
.reset_lock
);
732 INIT_LIST_HEAD(&card
->creg_ctrl
.queue
);
733 spin_lock_init(&card
->creg_ctrl
.lock
);
734 setup_timer(&card
->creg_ctrl
.cmd_timer
, creg_cmd_timed_out
,
735 (unsigned long) card
);
740 void rsxx_creg_destroy(struct rsxx_cardinfo
*card
)
742 struct creg_cmd
*cmd
;
743 struct creg_cmd
*tmp
;
746 /* Cancel outstanding commands */
747 spin_lock_bh(&card
->creg_ctrl
.lock
);
748 list_for_each_entry_safe(cmd
, tmp
, &card
->creg_ctrl
.queue
, list
) {
749 list_del(&cmd
->list
);
751 cmd
->cb(card
, cmd
, -ECANCELED
);
752 kmem_cache_free(creg_cmd_pool
, cmd
);
757 dev_info(CARD_TO_DEV(card
),
758 "Canceled %d queue creg commands\n", cnt
);
760 cmd
= card
->creg_ctrl
.active_cmd
;
761 card
->creg_ctrl
.active_cmd
= NULL
;
763 if (timer_pending(&card
->creg_ctrl
.cmd_timer
))
764 del_timer_sync(&card
->creg_ctrl
.cmd_timer
);
767 cmd
->cb(card
, cmd
, -ECANCELED
);
768 dev_info(CARD_TO_DEV(card
),
769 "Canceled active creg command\n");
770 kmem_cache_free(creg_cmd_pool
, cmd
);
772 spin_unlock_bh(&card
->creg_ctrl
.lock
);
774 cancel_work_sync(&card
->creg_ctrl
.done_work
);
778 int rsxx_creg_init(void)
780 creg_cmd_pool
= KMEM_CACHE(creg_cmd
, SLAB_HWCACHE_ALIGN
);
787 void rsxx_creg_cleanup(void)
789 kmem_cache_destroy(creg_cmd_pool
);