vmwgfx: Fix assignment in vmw_framebuffer_create_handle
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / rts_pstor / rtsx_transport.c
blob4e3d2c106af085d9d79d6cb6bcf356b0124bee9f
1 /* Driver for Realtek PCI-Express card reader
3 * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2, or (at your option) any
8 * later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Author:
19 * wwang (wei_wang@realsil.com.cn)
20 * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
23 #include <linux/blkdev.h>
24 #include <linux/kthread.h>
25 #include <linux/sched.h>
27 #include "rtsx.h"
28 #include "rtsx_scsi.h"
29 #include "rtsx_transport.h"
30 #include "rtsx_chip.h"
31 #include "rtsx_card.h"
32 #include "debug.h"
34 /***********************************************************************
35 * Scatter-gather transfer buffer access routines
36 ***********************************************************************/
38 /* Copy a buffer of length buflen to/from the srb's transfer buffer.
39 * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
40 * points to a list of s-g entries and we ignore srb->request_bufflen.
41 * For non-scatter-gather transfers, srb->request_buffer points to the
42 * transfer buffer itself and srb->request_bufflen is the buffer's length.)
43 * Update the *index and *offset variables so that the next copy will
44 * pick up from where this one left off. */
46 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
47 unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
48 unsigned int *offset, enum xfer_buf_dir dir)
50 unsigned int cnt;
52 /* If not using scatter-gather, just transfer the data directly.
53 * Make certain it will fit in the available buffer space. */
54 if (scsi_sg_count(srb) == 0) {
55 if (*offset >= scsi_bufflen(srb))
56 return 0;
57 cnt = min(buflen, scsi_bufflen(srb) - *offset);
58 if (dir == TO_XFER_BUF)
59 memcpy((unsigned char *) scsi_sglist(srb) + *offset,
60 buffer, cnt);
61 else
62 memcpy(buffer, (unsigned char *) scsi_sglist(srb) +
63 *offset, cnt);
64 *offset += cnt;
66 /* Using scatter-gather. We have to go through the list one entry
67 * at a time. Each s-g entry contains some number of pages, and
68 * each page has to be kmap()'ed separately. If the page is already
69 * in kernel-addressable memory then kmap() will return its address.
70 * If the page is not directly accessible -- such as a user buffer
71 * located in high memory -- then kmap() will map it to a temporary
72 * position in the kernel's virtual address space. */
73 } else {
74 struct scatterlist *sg =
75 (struct scatterlist *) scsi_sglist(srb)
76 + *index;
78 /* This loop handles a single s-g list entry, which may
79 * include multiple pages. Find the initial page structure
80 * and the starting offset within the page, and update
81 * the *offset and *index values for the next loop. */
82 cnt = 0;
83 while (cnt < buflen && *index < scsi_sg_count(srb)) {
84 struct page *page = sg_page(sg) +
85 ((sg->offset + *offset) >> PAGE_SHIFT);
86 unsigned int poff =
87 (sg->offset + *offset) & (PAGE_SIZE-1);
88 unsigned int sglen = sg->length - *offset;
90 if (sglen > buflen - cnt) {
92 /* Transfer ends within this s-g entry */
93 sglen = buflen - cnt;
94 *offset += sglen;
95 } else {
97 /* Transfer continues to next s-g entry */
98 *offset = 0;
99 ++*index;
100 ++sg;
103 /* Transfer the data for all the pages in this
104 * s-g entry. For each page: call kmap(), do the
105 * transfer, and call kunmap() immediately after. */
106 while (sglen > 0) {
107 unsigned int plen = min(sglen, (unsigned int)
108 PAGE_SIZE - poff);
109 unsigned char *ptr = kmap(page);
111 if (dir == TO_XFER_BUF)
112 memcpy(ptr + poff, buffer + cnt, plen);
113 else
114 memcpy(buffer + cnt, ptr + poff, plen);
115 kunmap(page);
117 /* Start at the beginning of the next page */
118 poff = 0;
119 ++page;
120 cnt += plen;
121 sglen -= plen;
126 /* Return the amount actually transferred */
127 return cnt;
130 /* Store the contents of buffer into srb's transfer buffer and set the
131 * SCSI residue. */
132 void rtsx_stor_set_xfer_buf(unsigned char *buffer,
133 unsigned int buflen, struct scsi_cmnd *srb)
135 unsigned int index = 0, offset = 0;
137 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
138 TO_XFER_BUF);
139 if (buflen < scsi_bufflen(srb))
140 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
143 void rtsx_stor_get_xfer_buf(unsigned char *buffer,
144 unsigned int buflen, struct scsi_cmnd *srb)
146 unsigned int index = 0, offset = 0;
148 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
149 FROM_XFER_BUF);
150 if (buflen < scsi_bufflen(srb))
151 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
155 /***********************************************************************
156 * Transport routines
157 ***********************************************************************/
159 /* Invoke the transport and basic error-handling/recovery methods
161 * This is used to send the message to the device and receive the response.
163 void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
165 int result;
167 result = rtsx_scsi_handler(srb, chip);
169 /* if the command gets aborted by the higher layers, we need to
170 * short-circuit all other processing
172 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
173 RTSX_DEBUGP("-- command was aborted\n");
174 srb->result = DID_ABORT << 16;
175 goto Handle_Errors;
178 /* if there is a transport error, reset and don't auto-sense */
179 if (result == TRANSPORT_ERROR) {
180 RTSX_DEBUGP("-- transport indicates error, resetting\n");
181 srb->result = DID_ERROR << 16;
182 goto Handle_Errors;
185 srb->result = SAM_STAT_GOOD;
188 * If we have a failure, we're going to do a REQUEST_SENSE
189 * automatically. Note that we differentiate between a command
190 * "failure" and an "error" in the transport mechanism.
192 if (result == TRANSPORT_FAILED) {
193 /* set the result so the higher layers expect this data */
194 srb->result = SAM_STAT_CHECK_CONDITION;
195 memcpy(srb->sense_buffer,
196 (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
197 sizeof(struct sense_data_t));
200 return;
202 /* Error and abort processing: try to resynchronize with the device
203 * by issuing a port reset. If that fails, try a class-specific
204 * device reset. */
205 Handle_Errors:
206 return;
209 void rtsx_add_cmd(struct rtsx_chip *chip,
210 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
212 u32 *cb = (u32 *)(chip->host_cmds_ptr);
213 u32 val = 0;
215 val |= (u32)(cmd_type & 0x03) << 30;
216 val |= (u32)(reg_addr & 0x3FFF) << 16;
217 val |= (u32)mask << 8;
218 val |= (u32)data;
220 spin_lock_irq(&chip->rtsx->reg_lock);
221 if (chip->ci < (HOST_CMDS_BUF_LEN / 4)) {
222 cb[(chip->ci)++] = cpu_to_le32(val);
224 spin_unlock_irq(&chip->rtsx->reg_lock);
227 void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
229 u32 val = 1 << 31;
231 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
233 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
234 /* Hardware Auto Response */
235 val |= 0x40000000;
236 rtsx_writel(chip, RTSX_HCBCTLR, val);
239 int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
241 struct rtsx_dev *rtsx = chip->rtsx;
242 struct completion trans_done;
243 u32 val = 1 << 31;
244 long timeleft;
245 int err = 0;
247 if (card == SD_CARD) {
248 rtsx->check_card_cd = SD_EXIST;
249 } else if (card == MS_CARD) {
250 rtsx->check_card_cd = MS_EXIST;
251 } else if (card == XD_CARD) {
252 rtsx->check_card_cd = XD_EXIST;
253 } else {
254 rtsx->check_card_cd = 0;
257 spin_lock_irq(&rtsx->reg_lock);
259 /* set up data structures for the wakeup system */
260 rtsx->done = &trans_done;
261 rtsx->trans_result = TRANS_NOT_READY;
262 init_completion(&trans_done);
263 rtsx->trans_state = STATE_TRANS_CMD;
265 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
267 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
268 /* Hardware Auto Response */
269 val |= 0x40000000;
270 rtsx_writel(chip, RTSX_HCBCTLR, val);
272 spin_unlock_irq(&rtsx->reg_lock);
274 /* Wait for TRANS_OK_INT */
275 timeleft = wait_for_completion_interruptible_timeout(
276 &trans_done, timeout * HZ / 1000);
277 if (timeleft <= 0) {
278 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
279 err = -ETIMEDOUT;
280 TRACE_GOTO(chip, finish_send_cmd);
283 spin_lock_irq(&rtsx->reg_lock);
284 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
285 err = -EIO;
286 } else if (rtsx->trans_result == TRANS_RESULT_OK) {
287 err = 0;
289 spin_unlock_irq(&rtsx->reg_lock);
291 finish_send_cmd:
292 rtsx->done = NULL;
293 rtsx->trans_state = STATE_TRANS_NONE;
295 if (err < 0)
296 rtsx_stop_cmd(chip, card);
298 return err;
301 static inline void rtsx_add_sg_tbl(
302 struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
304 u64 *sgb = (u64 *)(chip->host_sg_tbl_ptr);
305 u64 val = 0;
306 u32 temp_len = 0;
307 u8 temp_opt = 0;
309 do {
310 if (len > 0x80000) {
311 temp_len = 0x80000;
312 temp_opt = option & (~SG_END);
313 } else {
314 temp_len = len;
315 temp_opt = option;
317 val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
319 if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
320 sgb[(chip->sgi)++] = cpu_to_le64(val);
322 len -= temp_len;
323 addr += temp_len;
324 } while (len);
327 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
328 struct scatterlist *sg, int num_sg, unsigned int *index,
329 unsigned int *offset, int size,
330 enum dma_data_direction dma_dir, int timeout)
332 struct rtsx_dev *rtsx = chip->rtsx;
333 struct completion trans_done;
334 u8 dir;
335 int sg_cnt, i, resid;
336 int err = 0;
337 long timeleft;
338 u32 val = TRIG_DMA;
340 if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
341 return -EIO;
343 if (dma_dir == DMA_TO_DEVICE) {
344 dir = HOST_TO_DEVICE;
345 } else if (dma_dir == DMA_FROM_DEVICE) {
346 dir = DEVICE_TO_HOST;
347 } else {
348 return -ENXIO;
351 if (card == SD_CARD) {
352 rtsx->check_card_cd = SD_EXIST;
353 } else if (card == MS_CARD) {
354 rtsx->check_card_cd = MS_EXIST;
355 } else if (card == XD_CARD) {
356 rtsx->check_card_cd = XD_EXIST;
357 } else {
358 rtsx->check_card_cd = 0;
361 spin_lock_irq(&rtsx->reg_lock);
363 /* set up data structures for the wakeup system */
364 rtsx->done = &trans_done;
366 rtsx->trans_state = STATE_TRANS_SG;
367 rtsx->trans_result = TRANS_NOT_READY;
369 spin_unlock_irq(&rtsx->reg_lock);
371 sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
373 resid = size;
375 chip->sgi = 0;
376 /* Usually the next entry will be @sg@ + 1, but if this sg element
377 * is part of a chained scatterlist, it could jump to the start of
378 * a new scatterlist array. So here we use sg_next to move to
379 * the proper sg
381 for (i = 0; i < *index; i++)
382 sg = sg_next(sg);
383 for (i = *index; i < sg_cnt; i++) {
384 dma_addr_t addr;
385 unsigned int len;
386 u8 option;
388 addr = sg_dma_address(sg);
389 len = sg_dma_len(sg);
391 RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
392 (unsigned int)addr, len);
393 RTSX_DEBUGP("*index = %d, *offset = %d\n", *index, *offset);
395 addr += *offset;
397 if ((len - *offset) > resid) {
398 *offset += resid;
399 len = resid;
400 resid = 0;
401 } else {
402 resid -= (len - *offset);
403 len -= *offset;
404 *offset = 0;
405 *index = *index + 1;
407 if ((i == (sg_cnt - 1)) || !resid) {
408 option = SG_VALID | SG_END | SG_TRANS_DATA;
409 } else {
410 option = SG_VALID | SG_TRANS_DATA;
413 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
415 if (!resid)
416 break;
418 sg = sg_next(sg);
421 RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
423 val |= (u32)(dir & 0x01) << 29;
424 val |= ADMA_MODE;
426 spin_lock_irq(&rtsx->reg_lock);
428 init_completion(&trans_done);
430 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
431 rtsx_writel(chip, RTSX_HDBCTLR, val);
433 spin_unlock_irq(&rtsx->reg_lock);
435 timeleft = wait_for_completion_interruptible_timeout(
436 &trans_done, timeout * HZ / 1000);
437 if (timeleft <= 0) {
438 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
439 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
440 err = -ETIMEDOUT;
441 goto out;
444 spin_lock_irq(&rtsx->reg_lock);
445 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
446 err = -EIO;
447 spin_unlock_irq(&rtsx->reg_lock);
448 goto out;
450 spin_unlock_irq(&rtsx->reg_lock);
452 /* Wait for TRANS_OK_INT */
453 spin_lock_irq(&rtsx->reg_lock);
454 if (rtsx->trans_result == TRANS_NOT_READY) {
455 init_completion(&trans_done);
456 spin_unlock_irq(&rtsx->reg_lock);
457 timeleft = wait_for_completion_interruptible_timeout(
458 &trans_done, timeout * HZ / 1000);
459 if (timeleft <= 0) {
460 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
461 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
462 err = -ETIMEDOUT;
463 goto out;
465 } else {
466 spin_unlock_irq(&rtsx->reg_lock);
469 spin_lock_irq(&rtsx->reg_lock);
470 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
471 err = -EIO;
472 } else if (rtsx->trans_result == TRANS_RESULT_OK) {
473 err = 0;
475 spin_unlock_irq(&rtsx->reg_lock);
477 out:
478 rtsx->done = NULL;
479 rtsx->trans_state = STATE_TRANS_NONE;
480 dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
482 if (err < 0)
483 rtsx_stop_cmd(chip, card);
485 return err;
488 static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
489 struct scatterlist *sg, int num_sg,
490 enum dma_data_direction dma_dir, int timeout)
492 struct rtsx_dev *rtsx = chip->rtsx;
493 struct completion trans_done;
494 u8 dir;
495 int buf_cnt, i;
496 int err = 0;
497 long timeleft;
498 struct scatterlist *sg_ptr;
500 if ((sg == NULL) || (num_sg <= 0))
501 return -EIO;
503 if (dma_dir == DMA_TO_DEVICE) {
504 dir = HOST_TO_DEVICE;
505 } else if (dma_dir == DMA_FROM_DEVICE) {
506 dir = DEVICE_TO_HOST;
507 } else {
508 return -ENXIO;
511 if (card == SD_CARD) {
512 rtsx->check_card_cd = SD_EXIST;
513 } else if (card == MS_CARD) {
514 rtsx->check_card_cd = MS_EXIST;
515 } else if (card == XD_CARD) {
516 rtsx->check_card_cd = XD_EXIST;
517 } else {
518 rtsx->check_card_cd = 0;
521 spin_lock_irq(&rtsx->reg_lock);
523 /* set up data structures for the wakeup system */
524 rtsx->done = &trans_done;
526 rtsx->trans_state = STATE_TRANS_SG;
527 rtsx->trans_result = TRANS_NOT_READY;
529 spin_unlock_irq(&rtsx->reg_lock);
531 buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
533 sg_ptr = sg;
535 for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
536 u32 val = TRIG_DMA;
537 int sg_cnt, j;
539 if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8)) {
540 sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
541 } else {
542 sg_cnt = (HOST_SG_TBL_BUF_LEN / 8);
545 chip->sgi = 0;
546 for (j = 0; j < sg_cnt; j++) {
547 dma_addr_t addr = sg_dma_address(sg_ptr);
548 unsigned int len = sg_dma_len(sg_ptr);
549 u8 option;
551 RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
552 (unsigned int)addr, len);
554 if (j == (sg_cnt - 1)) {
555 option = SG_VALID | SG_END | SG_TRANS_DATA;
556 } else {
557 option = SG_VALID | SG_TRANS_DATA;
560 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
562 sg_ptr = sg_next(sg_ptr);
565 RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
567 val |= (u32)(dir & 0x01) << 29;
568 val |= ADMA_MODE;
570 spin_lock_irq(&rtsx->reg_lock);
572 init_completion(&trans_done);
574 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
575 rtsx_writel(chip, RTSX_HDBCTLR, val);
577 spin_unlock_irq(&rtsx->reg_lock);
579 timeleft = wait_for_completion_interruptible_timeout(
580 &trans_done, timeout * HZ / 1000);
581 if (timeleft <= 0) {
582 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
583 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
584 err = -ETIMEDOUT;
585 goto out;
588 spin_lock_irq(&rtsx->reg_lock);
589 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
590 err = -EIO;
591 spin_unlock_irq(&rtsx->reg_lock);
592 goto out;
594 spin_unlock_irq(&rtsx->reg_lock);
596 sg_ptr += sg_cnt;
599 /* Wait for TRANS_OK_INT */
600 spin_lock_irq(&rtsx->reg_lock);
601 if (rtsx->trans_result == TRANS_NOT_READY) {
602 init_completion(&trans_done);
603 spin_unlock_irq(&rtsx->reg_lock);
604 timeleft = wait_for_completion_interruptible_timeout(
605 &trans_done, timeout * HZ / 1000);
606 if (timeleft <= 0) {
607 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
608 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
609 err = -ETIMEDOUT;
610 goto out;
612 } else {
613 spin_unlock_irq(&rtsx->reg_lock);
616 spin_lock_irq(&rtsx->reg_lock);
617 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
618 err = -EIO;
619 } else if (rtsx->trans_result == TRANS_RESULT_OK) {
620 err = 0;
622 spin_unlock_irq(&rtsx->reg_lock);
624 out:
625 rtsx->done = NULL;
626 rtsx->trans_state = STATE_TRANS_NONE;
627 dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
629 if (err < 0)
630 rtsx_stop_cmd(chip, card);
632 return err;
635 static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
636 enum dma_data_direction dma_dir, int timeout)
638 struct rtsx_dev *rtsx = chip->rtsx;
639 struct completion trans_done;
640 dma_addr_t addr;
641 u8 dir;
642 int err = 0;
643 u32 val = (1 << 31);
644 long timeleft;
646 if ((buf == NULL) || (len <= 0))
647 return -EIO;
649 if (dma_dir == DMA_TO_DEVICE) {
650 dir = HOST_TO_DEVICE;
651 } else if (dma_dir == DMA_FROM_DEVICE) {
652 dir = DEVICE_TO_HOST;
653 } else {
654 return -ENXIO;
657 addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir);
658 if (!addr)
659 return -ENOMEM;
661 if (card == SD_CARD) {
662 rtsx->check_card_cd = SD_EXIST;
663 } else if (card == MS_CARD) {
664 rtsx->check_card_cd = MS_EXIST;
665 } else if (card == XD_CARD) {
666 rtsx->check_card_cd = XD_EXIST;
667 } else {
668 rtsx->check_card_cd = 0;
671 val |= (u32)(dir & 0x01) << 29;
672 val |= (u32)(len & 0x00FFFFFF);
674 spin_lock_irq(&rtsx->reg_lock);
676 /* set up data structures for the wakeup system */
677 rtsx->done = &trans_done;
679 init_completion(&trans_done);
681 rtsx->trans_state = STATE_TRANS_BUF;
682 rtsx->trans_result = TRANS_NOT_READY;
684 rtsx_writel(chip, RTSX_HDBAR, addr);
685 rtsx_writel(chip, RTSX_HDBCTLR, val);
687 spin_unlock_irq(&rtsx->reg_lock);
689 /* Wait for TRANS_OK_INT */
690 timeleft = wait_for_completion_interruptible_timeout(
691 &trans_done, timeout * HZ / 1000);
692 if (timeleft <= 0) {
693 RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
694 RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
695 err = -ETIMEDOUT;
696 goto out;
699 spin_lock_irq(&rtsx->reg_lock);
700 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
701 err = -EIO;
702 } else if (rtsx->trans_result == TRANS_RESULT_OK) {
703 err = 0;
705 spin_unlock_irq(&rtsx->reg_lock);
707 out:
708 rtsx->done = NULL;
709 rtsx->trans_state = STATE_TRANS_NONE;
710 dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir);
712 if (err < 0)
713 rtsx_stop_cmd(chip, card);
715 return err;
718 int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
719 void *buf, size_t len, int use_sg, unsigned int *index,
720 unsigned int *offset, enum dma_data_direction dma_dir,
721 int timeout)
723 int err = 0;
725 /* don't transfer data during abort processing */
726 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
727 return -EIO;
729 if (use_sg) {
730 err = rtsx_transfer_sglist_adma_partial(chip, card,
731 (struct scatterlist *)buf, use_sg,
732 index, offset, (int)len, dma_dir, timeout);
733 } else {
734 err = rtsx_transfer_buf(chip, card,
735 buf, len, dma_dir, timeout);
738 if (err < 0) {
739 if (RTSX_TST_DELINK(chip)) {
740 RTSX_CLR_DELINK(chip);
741 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
742 rtsx_reinit_cards(chip, 1);
746 return err;
749 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
750 int use_sg, enum dma_data_direction dma_dir, int timeout)
752 int err = 0;
754 RTSX_DEBUGP("use_sg = %d\n", use_sg);
756 /* don't transfer data during abort processing */
757 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
758 return -EIO;
760 if (use_sg) {
761 err = rtsx_transfer_sglist_adma(chip, card,
762 (struct scatterlist *)buf,
763 use_sg, dma_dir, timeout);
764 } else {
765 err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
768 if (err < 0) {
769 if (RTSX_TST_DELINK(chip)) {
770 RTSX_CLR_DELINK(chip);
771 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
772 rtsx_reinit_cards(chip, 1);
776 return err;