2 * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
4 * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
6 * Licensed under GPL version 2 only.
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/uio_driver.h>
13 #include <linux/spinlock.h>
14 #include <linux/cdev.h>
15 #include <linux/delay.h>
16 #include <linux/sysfs.h>
17 #include <linux/poll.h>
18 #include <linux/idr.h>
19 #include <linux/interrupt.h>
20 #include <linux/init.h>
21 #include <linux/ioctl.h>
26 #include <asm/cacheflush.h>
28 #ifndef PCI_VENDOR_ID_RRAPIDS
29 #define PCI_VENDOR_ID_RRAPIDS 0x17D2
32 #ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
33 #define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
36 #define POCH_NCHANNELS 2
38 #define MAX_POCH_CARDS 8
39 #define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
41 #define DRV_NAME "poch"
42 #define PFX DRV_NAME ": "
45 * BAR0 Bridge Register Definitions
48 #define BRIDGE_REV_REG 0x0
49 #define BRIDGE_INT_MASK_REG 0x4
50 #define BRIDGE_INT_STAT_REG 0x8
52 #define BRIDGE_INT_ACTIVE (0x1 << 31)
53 #define BRIDGE_INT_FPGA (0x1 << 2)
54 #define BRIDGE_INT_TEMP_FAIL (0x1 << 1)
55 #define BRIDGE_INT_TEMP_WARN (0x1 << 0)
57 #define BRIDGE_FPGA_RESET_REG 0xC
59 #define BRIDGE_CARD_POWER_REG 0x10
60 #define BRIDGE_CARD_POWER_EN (0x1 << 0)
61 #define BRIDGE_CARD_POWER_PROG_DONE (0x1 << 31)
63 #define BRIDGE_JTAG_REG 0x14
64 #define BRIDGE_DMA_GO_REG 0x18
65 #define BRIDGE_STAT_0_REG 0x1C
66 #define BRIDGE_STAT_1_REG 0x20
67 #define BRIDGE_STAT_2_REG 0x24
68 #define BRIDGE_STAT_3_REG 0x28
69 #define BRIDGE_TEMP_STAT_REG 0x2C
70 #define BRIDGE_TEMP_THRESH_REG 0x30
71 #define BRIDGE_EEPROM_REVSEL_REG 0x34
72 #define BRIDGE_CIS_STRUCT_REG 0x100
73 #define BRIDGE_BOARDREV_REG 0x124
76 * BAR1 FPGA Register Definitions
79 #define FPGA_IFACE_REV_REG 0x0
80 #define FPGA_RX_BLOCK_SIZE_REG 0x8
81 #define FPGA_TX_BLOCK_SIZE_REG 0xC
82 #define FPGA_RX_BLOCK_COUNT_REG 0x10
83 #define FPGA_TX_BLOCK_COUNT_REG 0x14
84 #define FPGA_RX_CURR_DMA_BLOCK_REG 0x18
85 #define FPGA_TX_CURR_DMA_BLOCK_REG 0x1C
86 #define FPGA_RX_GROUP_COUNT_REG 0x20
87 #define FPGA_TX_GROUP_COUNT_REG 0x24
88 #define FPGA_RX_CURR_GROUP_REG 0x28
89 #define FPGA_TX_CURR_GROUP_REG 0x2C
90 #define FPGA_RX_CURR_PCI_REG 0x38
91 #define FPGA_TX_CURR_PCI_REG 0x3C
92 #define FPGA_RX_GROUP0_START_REG 0x40
93 #define FPGA_TX_GROUP0_START_REG 0xC0
94 #define FPGA_DMA_DESC_1_REG 0x140
95 #define FPGA_DMA_DESC_2_REG 0x144
96 #define FPGA_DMA_DESC_3_REG 0x148
97 #define FPGA_DMA_DESC_4_REG 0x14C
99 #define FPGA_DMA_INT_STAT_REG 0x150
100 #define FPGA_DMA_INT_MASK_REG 0x154
101 #define FPGA_DMA_INT_RX (1 << 0)
102 #define FPGA_DMA_INT_TX (1 << 1)
104 #define FPGA_RX_GROUPS_PER_INT_REG 0x158
105 #define FPGA_TX_GROUPS_PER_INT_REG 0x15C
106 #define FPGA_DMA_ADR_PAGE_REG 0x160
107 #define FPGA_FPGA_REV_REG 0x200
109 #define FPGA_ADC_CLOCK_CTL_REG 0x204
110 #define FPGA_ADC_CLOCK_CTL_OSC_EN (0x1 << 3)
111 #define FPGA_ADC_CLOCK_LOCAL_CLK (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
112 #define FPGA_ADC_CLOCK_EXT_SAMP_CLK 0X0
114 #define FPGA_ADC_DAC_EN_REG 0x208
115 #define FPGA_ADC_DAC_EN_DAC_OFF (0x1 << 1)
116 #define FPGA_ADC_DAC_EN_ADC_OFF (0x1 << 0)
118 #define FPGA_INT_STAT_REG 0x20C
119 #define FPGA_INT_MASK_REG 0x210
120 #define FPGA_INT_PLL_UNLOCKED (0x1 << 9)
121 #define FPGA_INT_DMA_CORE (0x1 << 8)
122 #define FPGA_INT_TX_FF_EMPTY (0x1 << 7)
123 #define FPGA_INT_RX_FF_EMPTY (0x1 << 6)
124 #define FPGA_INT_TX_FF_OVRFLW (0x1 << 3)
125 #define FPGA_INT_RX_FF_OVRFLW (0x1 << 2)
126 #define FPGA_INT_TX_ACQ_DONE (0x1 << 1)
127 #define FPGA_INT_RX_ACQ_DONE (0x1)
129 #define FPGA_RX_ADC_CTL_REG 0x214
130 #define FPGA_RX_ADC_CTL_CONT_CAP (0x0)
131 #define FPGA_RX_ADC_CTL_SNAP_CAP (0x1)
133 #define FPGA_RX_ARM_REG 0x21C
135 #define FPGA_DOM_REG 0x224
136 #define FPGA_DOM_DCM_RESET (0x1 << 5)
137 #define FPGA_DOM_SOFT_RESET (0x1 << 4)
138 #define FPGA_DOM_DUAL_M_SG_DMA (0x0)
139 #define FPGA_DOM_TARGET_ACCESS (0x1)
141 #define FPGA_TX_CTL_REG 0x228
142 #define FPGA_TX_CTL_FIFO_FLUSH (0x1 << 9)
143 #define FPGA_TX_CTL_OUTPUT_ZERO (0x0 << 2)
144 #define FPGA_TX_CTL_OUTPUT_CARDBUS (0x1 << 2)
145 #define FPGA_TX_CTL_OUTPUT_ADC (0x2 << 2)
146 #define FPGA_TX_CTL_OUTPUT_SNAPSHOT (0x3 << 2)
147 #define FPGA_TX_CTL_LOOPBACK (0x1 << 0)
149 #define FPGA_ENDIAN_MODE_REG 0x22C
150 #define FPGA_RX_FIFO_COUNT_REG 0x28C
151 #define FPGA_TX_ENABLE_REG 0x298
152 #define FPGA_TX_TRIGGER_REG 0x29C
153 #define FPGA_TX_DATAMEM_COUNT_REG 0x2A8
154 #define FPGA_CAP_FIFO_REG 0x300
155 #define FPGA_TX_SNAPSHOT_REG 0x8000
158 * Channel Index Definitions
173 struct poch_group_info
{
176 unsigned long user_offset
;
179 struct channel_info
{
182 atomic_t sys_block_size
;
183 atomic_t sys_group_size
;
184 atomic_t sys_group_count
;
186 enum channel_dir dir
;
188 unsigned long block_size
;
189 unsigned long group_size
;
190 unsigned long group_count
;
192 /* Contains the DMA address and VM offset of each group. */
193 struct poch_group_info
*groups
;
195 /* Contains the header and circular buffer exported to userspace. */
196 spinlock_t group_offsets_lock
;
197 struct poch_cbuf_header
*header
;
198 struct page
*header_pg
;
199 unsigned long header_size
;
201 /* Last group indicated as 'complete' to user space. */
202 unsigned int transfer
;
204 wait_queue_head_t wq
;
207 unsigned int data_available
;
208 unsigned int space_available
;
211 void __iomem
*bridge_iomem
;
212 void __iomem
*fpga_iomem
;
213 spinlock_t
*iomem_lock
;
219 struct poch_counters counters
;
220 spinlock_t counters_lock
;
227 struct pci_dev
*pci_dev
;
228 unsigned int nchannels
;
229 struct channel_info channels
[POCH_NCHANNELS
];
232 /* Counts the no. of channels that have been opened. On first
233 * open, the card is powered on. On last channel close, the
234 * card is powered off.
238 void __iomem
*bridge_iomem
;
239 void __iomem
*fpga_iomem
;
240 spinlock_t iomem_lock
;
245 static dev_t poch_first_dev
;
246 static struct class *poch_cls
;
247 static DEFINE_IDR(poch_ids
);
249 static ssize_t
store_block_size(struct device
*dev
,
250 struct device_attribute
*attr
,
251 const char *buf
, size_t count
)
253 struct channel_info
*channel
= dev_get_drvdata(dev
);
254 unsigned long block_size
;
256 sscanf(buf
, "%lu", &block_size
);
257 atomic_set(&channel
->sys_block_size
, block_size
);
261 static DEVICE_ATTR(block_size
, S_IWUSR
|S_IWGRP
, NULL
, store_block_size
);
263 static ssize_t
store_group_size(struct device
*dev
,
264 struct device_attribute
*attr
,
265 const char *buf
, size_t count
)
267 struct channel_info
*channel
= dev_get_drvdata(dev
);
268 unsigned long group_size
;
270 sscanf(buf
, "%lu", &group_size
);
271 atomic_set(&channel
->sys_group_size
, group_size
);
275 static DEVICE_ATTR(group_size
, S_IWUSR
|S_IWGRP
, NULL
, store_group_size
);
277 static ssize_t
store_group_count(struct device
*dev
,
278 struct device_attribute
*attr
,
279 const char *buf
, size_t count
)
281 struct channel_info
*channel
= dev_get_drvdata(dev
);
282 unsigned long group_count
;
284 sscanf(buf
, "%lu", &group_count
);
285 atomic_set(&channel
->sys_group_count
, group_count
);
289 static DEVICE_ATTR(group_count
, S_IWUSR
|S_IWGRP
, NULL
, store_group_count
);
291 static ssize_t
show_direction(struct device
*dev
,
292 struct device_attribute
*attr
, char *buf
)
294 struct channel_info
*channel
= dev_get_drvdata(dev
);
297 len
= sprintf(buf
, "%s\n", (channel
->dir
? "tx" : "rx"));
300 static DEVICE_ATTR(dir
, S_IRUSR
|S_IRGRP
, show_direction
, NULL
);
302 static ssize_t
show_mmap_size(struct device
*dev
,
303 struct device_attribute
*attr
, char *buf
)
305 struct channel_info
*channel
= dev_get_drvdata(dev
);
307 unsigned long mmap_size
;
308 unsigned long group_pages
;
309 unsigned long header_pages
;
310 unsigned long total_group_pages
;
312 /* FIXME: We do not have to add 1, if group_size a multiple of
314 group_pages
= (channel
->group_size
/ PAGE_SIZE
) + 1;
315 header_pages
= (channel
->header_size
/ PAGE_SIZE
) + 1;
316 total_group_pages
= group_pages
* channel
->group_count
;
318 mmap_size
= (header_pages
+ total_group_pages
) * PAGE_SIZE
;
319 len
= sprintf(buf
, "%lu\n", mmap_size
);
322 static DEVICE_ATTR(mmap_size
, S_IRUSR
|S_IRGRP
, show_mmap_size
, NULL
);
324 static struct device_attribute
*poch_class_attrs
[] = {
325 &dev_attr_block_size
,
326 &dev_attr_group_size
,
327 &dev_attr_group_count
,
332 static void poch_channel_free_groups(struct channel_info
*channel
)
336 for (i
= 0; i
< channel
->group_count
; i
++) {
337 struct poch_group_info
*group
;
340 group
= &channel
->groups
[i
];
341 order
= get_order(channel
->group_size
);
343 __free_pages(group
->pg
, order
);
347 static int poch_channel_alloc_groups(struct channel_info
*channel
)
350 unsigned long group_pages
;
351 unsigned long header_pages
;
353 group_pages
= (channel
->group_size
/ PAGE_SIZE
) + 1;
354 header_pages
= (channel
->header_size
/ PAGE_SIZE
) + 1;
356 for (i
= 0; i
< channel
->group_count
; i
++) {
357 struct poch_group_info
*group
;
361 group
= &channel
->groups
[i
];
362 order
= get_order(channel
->group_size
);
365 * __GFP_COMP is required here since we are going to
366 * perform non-linear mapping to userspace. For more
367 * information read the vm_insert_page() function
371 gfp_mask
= GFP_KERNEL
| GFP_DMA32
| __GFP_ZERO
;
372 group
->pg
= alloc_pages(gfp_mask
, order
);
374 poch_channel_free_groups(channel
);
378 /* FIXME: This is the physical address not the bus
379 * address! This won't work in architectures that
380 * have an IOMMU. Can we use pci_map_single() for
383 group
->dma_addr
= page_to_pfn(group
->pg
) * PAGE_SIZE
;
385 (header_pages
+ (i
* group_pages
)) * PAGE_SIZE
;
387 printk(KERN_INFO PFX
"%ld: user_offset: 0x%lx dma: 0x%x\n", i
,
388 group
->user_offset
, group
->dma_addr
);
394 static void channel_latch_attr(struct channel_info
*channel
)
396 channel
->group_count
= atomic_read(&channel
->sys_group_count
);
397 channel
->group_size
= atomic_read(&channel
->sys_group_size
);
398 channel
->block_size
= atomic_read(&channel
->sys_block_size
);
402 * Configure DMA group registers
404 static void channel_dma_init(struct channel_info
*channel
)
406 void __iomem
*fpga
= channel
->fpga_iomem
;
410 unsigned int group_in_page
;
415 u32 groups_per_int_reg
;
418 if (channel
->chno
== CHNO_RX_CHANNEL
) {
419 group_regs_base
= FPGA_RX_GROUP0_START_REG
;
420 block_size_reg
= FPGA_RX_BLOCK_SIZE_REG
;
421 block_count_reg
= FPGA_RX_BLOCK_COUNT_REG
;
422 group_count_reg
= FPGA_RX_GROUP_COUNT_REG
;
423 groups_per_int_reg
= FPGA_RX_GROUPS_PER_INT_REG
;
424 curr_pci_reg
= FPGA_RX_CURR_PCI_REG
;
426 group_regs_base
= FPGA_TX_GROUP0_START_REG
;
427 block_size_reg
= FPGA_TX_BLOCK_SIZE_REG
;
428 block_count_reg
= FPGA_TX_BLOCK_COUNT_REG
;
429 group_count_reg
= FPGA_TX_GROUP_COUNT_REG
;
430 groups_per_int_reg
= FPGA_TX_GROUPS_PER_INT_REG
;
431 curr_pci_reg
= FPGA_TX_CURR_PCI_REG
;
434 printk(KERN_WARNING
"block_size, group_size, group_count\n");
435 iowrite32(channel
->block_size
, fpga
+ block_size_reg
);
436 iowrite32(channel
->group_size
/ channel
->block_size
,
437 fpga
+ block_count_reg
);
438 iowrite32(channel
->group_count
, fpga
+ group_count_reg
);
439 /* FIXME: Hardcoded groups per int. Get it from sysfs? */
440 iowrite32(1, fpga
+ groups_per_int_reg
);
442 /* Unlock PCI address? Not defined in the data sheet, but used
443 * in the reference code by Redrapids.
445 iowrite32(0x1, fpga
+ curr_pci_reg
);
447 /* The DMA address page register is shared between the RX and
448 * TX channels, so acquire lock.
450 spin_lock(channel
->iomem_lock
);
451 for (i
= 0; i
< channel
->group_count
; i
++) {
453 group_in_page
= i
% 32;
455 group_reg
= group_regs_base
+ (group_in_page
* 4);
457 iowrite32(page
, fpga
+ FPGA_DMA_ADR_PAGE_REG
);
458 iowrite32(channel
->groups
[i
].dma_addr
, fpga
+ group_reg
);
460 for (i
= 0; i
< channel
->group_count
; i
++) {
462 group_in_page
= i
% 32;
464 group_reg
= group_regs_base
+ (group_in_page
* 4);
466 iowrite32(page
, fpga
+ FPGA_DMA_ADR_PAGE_REG
);
467 printk(KERN_INFO PFX
"%ld: read dma_addr: 0x%x\n", i
,
468 ioread32(fpga
+ group_reg
));
470 spin_unlock(channel
->iomem_lock
);
474 static int poch_channel_alloc_header(struct channel_info
*channel
)
476 struct poch_cbuf_header
*header
= channel
->header
;
477 unsigned long group_offset_size
;
478 unsigned long tot_group_offsets_size
;
480 /* Allocate memory to hold header exported userspace */
481 group_offset_size
= sizeof(header
->group_offsets
[0]);
482 tot_group_offsets_size
= group_offset_size
* channel
->group_count
;
483 channel
->header_size
= sizeof(*header
) + tot_group_offsets_size
;
484 channel
->header_pg
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
,
485 get_order(channel
->header_size
));
486 if (!channel
->header_pg
)
489 channel
->header
= page_address(channel
->header_pg
);
494 static void poch_channel_free_header(struct channel_info
*channel
)
498 order
= get_order(channel
->header_size
);
499 __free_pages(channel
->header_pg
, order
);
502 static void poch_channel_init_header(struct channel_info
*channel
)
505 struct poch_group_info
*groups
;
508 channel
->header
->group_size_bytes
= channel
->group_size
;
509 channel
->header
->group_count
= channel
->group_count
;
511 spin_lock_init(&channel
->group_offsets_lock
);
513 group_offsets
= channel
->header
->group_offsets
;
514 groups
= channel
->groups
;
516 for (i
= 0; i
< channel
->group_count
; i
++) {
517 if (channel
->dir
== CHANNEL_DIR_RX
)
518 group_offsets
[i
] = -1;
520 group_offsets
[i
] = groups
[i
].user_offset
;
524 static void __poch_channel_clear_counters(struct channel_info
*channel
)
526 channel
->counters
.pll_unlock
= 0;
527 channel
->counters
.fifo_empty
= 0;
528 channel
->counters
.fifo_overflow
= 0;
531 static int poch_channel_init(struct channel_info
*channel
,
532 struct poch_dev
*poch_dev
)
534 struct pci_dev
*pdev
= poch_dev
->pci_dev
;
535 struct device
*dev
= &pdev
->dev
;
536 unsigned long alloc_size
;
539 printk(KERN_WARNING
"channel_latch_attr\n");
541 channel_latch_attr(channel
);
543 channel
->transfer
= 0;
545 /* Allocate memory to hold group information. */
546 alloc_size
= channel
->group_count
* sizeof(struct poch_group_info
);
547 channel
->groups
= kzalloc(alloc_size
, GFP_KERNEL
);
548 if (!channel
->groups
) {
549 dev_err(dev
, "error allocating memory for group info\n");
554 printk(KERN_WARNING
"poch_channel_alloc_groups\n");
556 ret
= poch_channel_alloc_groups(channel
);
558 dev_err(dev
, "error allocating groups of order %d\n",
559 get_order(channel
->group_size
));
560 goto out_free_group_info
;
563 ret
= poch_channel_alloc_header(channel
);
565 dev_err(dev
, "error allocating user space header\n");
566 goto out_free_groups
;
569 channel
->fpga_iomem
= poch_dev
->fpga_iomem
;
570 channel
->bridge_iomem
= poch_dev
->bridge_iomem
;
571 channel
->iomem_lock
= &poch_dev
->iomem_lock
;
572 spin_lock_init(&channel
->counters_lock
);
574 __poch_channel_clear_counters(channel
);
576 printk(KERN_WARNING
"poch_channel_init_header\n");
578 poch_channel_init_header(channel
);
583 poch_channel_free_groups(channel
);
585 kfree(channel
->groups
);
590 static int poch_wait_fpga_prog(void __iomem
*bridge
)
592 unsigned long total_wait
;
593 const unsigned long wait_period
= 100;
594 /* FIXME: Get the actual timeout */
595 const unsigned long prog_timeo
= 10000; /* 10 Seconds */
598 printk(KERN_WARNING
"poch_wait_fpg_prog\n");
600 printk(KERN_INFO PFX
"programming fpga ...\n");
604 total_wait
+= wait_period
;
606 card_power
= ioread32(bridge
+ BRIDGE_CARD_POWER_REG
);
607 if (card_power
& BRIDGE_CARD_POWER_PROG_DONE
) {
608 printk(KERN_INFO PFX
"programming done\n");
611 if (total_wait
> prog_timeo
) {
613 "timed out while programming FPGA\n");
619 static void poch_card_power_off(struct poch_dev
*poch_dev
)
621 void __iomem
*bridge
= poch_dev
->bridge_iomem
;
624 iowrite32(0, bridge
+ BRIDGE_INT_MASK_REG
);
625 iowrite32(0, bridge
+ BRIDGE_DMA_GO_REG
);
627 card_power
= ioread32(bridge
+ BRIDGE_CARD_POWER_REG
);
628 iowrite32(card_power
& ~BRIDGE_CARD_POWER_EN
,
629 bridge
+ BRIDGE_CARD_POWER_REG
);
637 static void poch_card_clock_on(void __iomem
*fpga
)
639 /* FIXME: Get this data through sysfs? */
640 enum clk_src clk_src
= CLK_SRC_ON_BOARD
;
642 if (clk_src
== CLK_SRC_ON_BOARD
) {
643 iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK
| FPGA_ADC_CLOCK_CTL_OSC_EN
,
644 fpga
+ FPGA_ADC_CLOCK_CTL_REG
);
645 } else if (clk_src
== CLK_SRC_EXTERNAL
) {
646 iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK
,
647 fpga
+ FPGA_ADC_CLOCK_CTL_REG
);
651 static int poch_card_power_on(struct poch_dev
*poch_dev
)
653 void __iomem
*bridge
= poch_dev
->bridge_iomem
;
654 void __iomem
*fpga
= poch_dev
->fpga_iomem
;
656 iowrite32(BRIDGE_CARD_POWER_EN
, bridge
+ BRIDGE_CARD_POWER_REG
);
658 if (poch_wait_fpga_prog(bridge
) != 0) {
659 poch_card_power_off(poch_dev
);
663 poch_card_clock_on(fpga
);
665 /* Sync to new clock, reset state machines, set DMA mode. */
666 iowrite32(FPGA_DOM_DCM_RESET
| FPGA_DOM_SOFT_RESET
667 | FPGA_DOM_DUAL_M_SG_DMA
, fpga
+ FPGA_DOM_REG
);
669 /* FIXME: The time required for sync. needs to be tuned. */
675 static void poch_channel_analog_on(struct channel_info
*channel
)
677 void __iomem
*fpga
= channel
->fpga_iomem
;
680 spin_lock(channel
->iomem_lock
);
681 adc_dac_en
= ioread32(fpga
+ FPGA_ADC_DAC_EN_REG
);
682 switch (channel
->chno
) {
683 case CHNO_RX_CHANNEL
:
684 iowrite32(adc_dac_en
& ~FPGA_ADC_DAC_EN_ADC_OFF
,
685 fpga
+ FPGA_ADC_DAC_EN_REG
);
687 case CHNO_TX_CHANNEL
:
688 iowrite32(adc_dac_en
& ~FPGA_ADC_DAC_EN_DAC_OFF
,
689 fpga
+ FPGA_ADC_DAC_EN_REG
);
692 spin_unlock(channel
->iomem_lock
);
695 static int poch_open(struct inode
*inode
, struct file
*filp
)
697 struct poch_dev
*poch_dev
;
698 struct channel_info
*channel
;
699 void __iomem
*bridge
;
705 poch_dev
= container_of(inode
->i_cdev
, struct poch_dev
, cdev
);
706 bridge
= poch_dev
->bridge_iomem
;
707 fpga
= poch_dev
->fpga_iomem
;
709 chno
= iminor(inode
) % poch_dev
->nchannels
;
710 channel
= &poch_dev
->channels
[chno
];
712 if (!atomic_dec_and_test(&channel
->free
)) {
713 atomic_inc(&channel
->free
);
718 usage
= atomic_inc_return(&poch_dev
->usage
);
720 printk(KERN_WARNING
"poch_card_power_on\n");
723 ret
= poch_card_power_on(poch_dev
);
728 printk(KERN_INFO
"CardBus Bridge Revision: %x\n",
729 ioread32(bridge
+ BRIDGE_REV_REG
));
730 printk(KERN_INFO
"CardBus Interface Revision: %x\n",
731 ioread32(fpga
+ FPGA_IFACE_REV_REG
));
733 channel
->chno
= chno
;
734 filp
->private_data
= channel
;
736 printk(KERN_WARNING
"poch_channel_init\n");
738 ret
= poch_channel_init(channel
, poch_dev
);
742 poch_channel_analog_on(channel
);
744 printk(KERN_WARNING
"channel_dma_init\n");
746 channel_dma_init(channel
);
748 printk(KERN_WARNING
"poch_channel_analog_on\n");
751 printk(KERN_WARNING
"setting up DMA\n");
753 /* Initialize DMA Controller. */
754 iowrite32(FPGA_CAP_FIFO_REG
, bridge
+ BRIDGE_STAT_2_REG
);
755 iowrite32(FPGA_DMA_DESC_1_REG
, bridge
+ BRIDGE_STAT_3_REG
);
757 ioread32(fpga
+ FPGA_DMA_INT_STAT_REG
);
758 ioread32(fpga
+ FPGA_INT_STAT_REG
);
759 ioread32(bridge
+ BRIDGE_INT_STAT_REG
);
761 /* Initialize Interrupts. FIXME: Enable temperature
762 * handling We are enabling both Tx and Rx channel
763 * interrupts here. Do we need to enable interrupts
764 * only for the current channel? Anyways we won't get
765 * the interrupt unless the DMA is activated.
767 iowrite32(BRIDGE_INT_FPGA
, bridge
+ BRIDGE_INT_MASK_REG
);
768 iowrite32(FPGA_INT_DMA_CORE
769 | FPGA_INT_PLL_UNLOCKED
770 | FPGA_INT_TX_FF_EMPTY
771 | FPGA_INT_RX_FF_EMPTY
772 | FPGA_INT_TX_FF_OVRFLW
773 | FPGA_INT_RX_FF_OVRFLW
,
774 fpga
+ FPGA_INT_MASK_REG
);
775 iowrite32(FPGA_DMA_INT_RX
| FPGA_DMA_INT_TX
,
776 fpga
+ FPGA_DMA_INT_MASK_REG
);
779 if (channel
->dir
== CHANNEL_DIR_TX
) {
780 /* Flush TX FIFO and output data from cardbus. */
781 iowrite32(FPGA_TX_CTL_FIFO_FLUSH
782 | FPGA_TX_CTL_OUTPUT_CARDBUS
,
783 fpga
+ FPGA_TX_CTL_REG
);
786 atomic_inc(&channel
->inited
);
792 poch_card_power_off(poch_dev
);
794 atomic_dec(&poch_dev
->usage
);
795 atomic_inc(&channel
->free
);
800 static int poch_release(struct inode
*inode
, struct file
*filp
)
802 struct channel_info
*channel
= filp
->private_data
;
803 struct poch_dev
*poch_dev
;
806 poch_dev
= container_of(inode
->i_cdev
, struct poch_dev
, cdev
);
808 usage
= atomic_dec_return(&poch_dev
->usage
);
810 printk(KERN_WARNING
"poch_card_power_off\n");
811 poch_card_power_off(poch_dev
);
814 atomic_dec(&channel
->inited
);
815 poch_channel_free_header(channel
);
816 poch_channel_free_groups(channel
);
817 kfree(channel
->groups
);
818 atomic_inc(&channel
->free
);
824 * Map the header and the group buffers, to user space.
826 static int poch_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
828 struct channel_info
*channel
= filp
->private_data
;
833 unsigned long group_pages
;
834 unsigned long header_pages
;
835 unsigned long total_group_pages
;
843 printk(KERN_WARNING
"poch_mmap\n");
846 printk(KERN_WARNING PFX
"page offset: %lu\n", vma
->vm_pgoff
);
850 group_pages
= (channel
->group_size
/ PAGE_SIZE
) + 1;
851 header_pages
= (channel
->header_size
/ PAGE_SIZE
) + 1;
852 total_group_pages
= group_pages
* channel
->group_count
;
854 size
= vma
->vm_end
- vma
->vm_start
;
855 if (size
!= (header_pages
+ total_group_pages
) * PAGE_SIZE
) {
856 printk(KERN_WARNING PFX
"required %lu bytes\n", size
);
860 start
= vma
->vm_start
;
862 /* FIXME: Cleanup required on failure? */
863 pg
= channel
->header_pg
;
864 for (pg_num
= 0; pg_num
< header_pages
; pg_num
++, pg
++) {
865 printk(KERN_DEBUG PFX
"page_count: %d\n", page_count(pg
));
866 printk(KERN_DEBUG PFX
"%d: header: 0x%lx\n", pg_num
, start
);
867 ret
= vm_insert_page(vma
, start
, pg
);
869 printk(KERN_DEBUG
"vm_insert 1 failed at %lx\n", start
);
875 for (i
= 0; i
< channel
->group_count
; i
++) {
876 pg
= channel
->groups
[i
].pg
;
877 for (pg_num
= 0; pg_num
< group_pages
; pg_num
++, pg
++) {
878 printk(KERN_DEBUG PFX
"%d: group %d: 0x%lx\n",
880 ret
= vm_insert_page(vma
, start
, pg
);
882 printk(KERN_DEBUG PFX
883 "vm_insert 2 failed at %d\n", pg_num
);
894 * Check whether there is some group that the user space has not
895 * consumed yet. When the user space consumes a group, it sets it to
896 * -1. Cosuming could be reading data in case of RX and filling a
897 * buffer in case of TX.
899 static int poch_channel_available(struct channel_info
*channel
)
903 spin_lock_irq(&channel
->group_offsets_lock
);
905 for (i
= 0; i
< channel
->group_count
; i
++) {
906 if (channel
->dir
== CHANNEL_DIR_RX
907 && channel
->header
->group_offsets
[i
] == -1) {
908 spin_unlock_irq(&channel
->group_offsets_lock
);
912 if (channel
->dir
== CHANNEL_DIR_TX
913 && channel
->header
->group_offsets
[i
] != -1) {
914 spin_unlock_irq(&channel
->group_offsets_lock
);
919 spin_unlock_irq(&channel
->group_offsets_lock
);
924 static unsigned int poch_poll(struct file
*filp
, poll_table
*pt
)
926 struct channel_info
*channel
= filp
->private_data
;
927 unsigned int ret
= 0;
929 poll_wait(filp
, &channel
->wq
, pt
);
931 if (poch_channel_available(channel
)) {
932 if (channel
->dir
== CHANNEL_DIR_RX
)
933 ret
= POLLIN
| POLLRDNORM
;
935 ret
= POLLOUT
| POLLWRNORM
;
941 static int poch_ioctl(struct inode
*inode
, struct file
*filp
,
942 unsigned int cmd
, unsigned long arg
)
944 struct channel_info
*channel
= filp
->private_data
;
945 void __iomem
*fpga
= channel
->fpga_iomem
;
946 void __iomem
*bridge
= channel
->bridge_iomem
;
947 void __user
*argp
= (void __user
*)arg
;
948 struct vm_area_struct
*vms
;
949 struct poch_counters counters
;
953 case POCH_IOC_TRANSFER_START
:
954 switch (channel
->chno
) {
955 case CHNO_TX_CHANNEL
:
956 printk(KERN_INFO PFX
"ioctl: Tx start\n");
957 iowrite32(0x1, fpga
+ FPGA_TX_TRIGGER_REG
);
958 iowrite32(0x1, fpga
+ FPGA_TX_ENABLE_REG
);
960 /* FIXME: Does it make sense to do a DMA GO
961 * twice, once in Tx and once in Rx.
963 iowrite32(0x1, bridge
+ BRIDGE_DMA_GO_REG
);
965 case CHNO_RX_CHANNEL
:
966 printk(KERN_INFO PFX
"ioctl: Rx start\n");
967 iowrite32(0x1, fpga
+ FPGA_RX_ARM_REG
);
968 iowrite32(0x1, bridge
+ BRIDGE_DMA_GO_REG
);
972 case POCH_IOC_TRANSFER_STOP
:
973 switch (channel
->chno
) {
974 case CHNO_TX_CHANNEL
:
975 printk(KERN_INFO PFX
"ioctl: Tx stop\n");
976 iowrite32(0x0, fpga
+ FPGA_TX_ENABLE_REG
);
977 iowrite32(0x0, fpga
+ FPGA_TX_TRIGGER_REG
);
978 iowrite32(0x0, bridge
+ BRIDGE_DMA_GO_REG
);
980 case CHNO_RX_CHANNEL
:
981 printk(KERN_INFO PFX
"ioctl: Rx stop\n");
982 iowrite32(0x0, fpga
+ FPGA_RX_ARM_REG
);
983 iowrite32(0x0, bridge
+ BRIDGE_DMA_GO_REG
);
987 case POCH_IOC_GET_COUNTERS
:
988 if (access_ok(VERIFY_WRITE
, argp
, sizeof(struct poch_counters
)))
991 spin_lock_irq(&channel
->counters_lock
);
992 counters
= channel
->counters
;
993 __poch_channel_clear_counters(channel
);
994 spin_unlock_irq(&channel
->counters_lock
);
996 ret
= copy_to_user(argp
, &counters
,
997 sizeof(struct poch_counters
));
1002 case POCH_IOC_SYNC_GROUP_FOR_USER
:
1003 case POCH_IOC_SYNC_GROUP_FOR_DEVICE
:
1004 vms
= find_vma(current
->mm
, arg
);
1006 /* Address not mapped. */
1008 if (vms
->vm_file
!= filp
)
1009 /* Address mapped from different device/file. */
1012 flush_cache_range(vms
, arg
, arg
+ channel
->group_size
);
1018 static struct file_operations poch_fops
= {
1019 .owner
= THIS_MODULE
,
1021 .release
= poch_release
,
1022 .ioctl
= poch_ioctl
,
1027 static void poch_irq_dma(struct channel_info
*channel
)
1033 struct poch_group_info
*groups
;
1037 if (!atomic_read(&channel
->inited
))
1040 prev_transfer
= channel
->transfer
;
1042 if (channel
->chno
== CHNO_RX_CHANNEL
)
1043 curr_group_reg
= FPGA_RX_CURR_GROUP_REG
;
1045 curr_group_reg
= FPGA_TX_CURR_GROUP_REG
;
1047 curr_transfer
= ioread32(channel
->fpga_iomem
+ curr_group_reg
);
1049 groups_done
= curr_transfer
- prev_transfer
;
1050 /* Check wrap over, and handle it. */
1051 if (groups_done
<= 0)
1052 groups_done
+= channel
->group_count
;
1054 group_offsets
= channel
->header
->group_offsets
;
1055 groups
= channel
->groups
;
1057 spin_lock(&channel
->group_offsets_lock
);
1059 for (i
= 0; i
< groups_done
; i
++) {
1060 j
= (prev_transfer
+ i
) % channel
->group_count
;
1061 if (channel
->dir
== CHANNEL_DIR_RX
)
1062 group_offsets
[j
] = -1;
1064 group_offsets
[j
] = groups
[j
].user_offset
;
1067 spin_unlock(&channel
->group_offsets_lock
);
1069 channel
->transfer
= curr_transfer
;
1071 wake_up_interruptible(&channel
->wq
);
1074 static irqreturn_t
poch_irq_handler(int irq
, void *p
)
1076 struct poch_dev
*poch_dev
= p
;
1077 void __iomem
*bridge
= poch_dev
->bridge_iomem
;
1078 void __iomem
*fpga
= poch_dev
->fpga_iomem
;
1079 struct channel_info
*channel_rx
= &poch_dev
->channels
[CHNO_RX_CHANNEL
];
1080 struct channel_info
*channel_tx
= &poch_dev
->channels
[CHNO_TX_CHANNEL
];
1085 bridge_stat
= ioread32(bridge
+ BRIDGE_INT_STAT_REG
);
1086 fpga_stat
= ioread32(fpga
+ FPGA_INT_STAT_REG
);
1087 dma_stat
= ioread32(fpga
+ FPGA_DMA_INT_STAT_REG
);
1089 ioread32(fpga
+ FPGA_DMA_INT_STAT_REG
);
1090 ioread32(fpga
+ FPGA_INT_STAT_REG
);
1091 ioread32(bridge
+ BRIDGE_INT_STAT_REG
);
1093 if (bridge_stat
& BRIDGE_INT_FPGA
) {
1094 if (fpga_stat
& FPGA_INT_DMA_CORE
) {
1095 if (dma_stat
& FPGA_DMA_INT_RX
)
1096 poch_irq_dma(channel_rx
);
1097 if (dma_stat
& FPGA_DMA_INT_TX
)
1098 poch_irq_dma(channel_tx
);
1100 if (fpga_stat
& FPGA_INT_PLL_UNLOCKED
) {
1101 channel_tx
->counters
.pll_unlock
++;
1102 channel_rx
->counters
.pll_unlock
++;
1103 if (printk_ratelimit())
1104 printk(KERN_WARNING PFX
"PLL unlocked\n");
1106 if (fpga_stat
& FPGA_INT_TX_FF_EMPTY
)
1107 channel_tx
->counters
.fifo_empty
++;
1108 if (fpga_stat
& FPGA_INT_TX_FF_OVRFLW
)
1109 channel_tx
->counters
.fifo_overflow
++;
1110 if (fpga_stat
& FPGA_INT_RX_FF_EMPTY
)
1111 channel_rx
->counters
.fifo_empty
++;
1112 if (fpga_stat
& FPGA_INT_RX_FF_OVRFLW
)
1113 channel_rx
->counters
.fifo_overflow
++;
1116 * FIXME: These errors should be notified through the
1117 * poll interface as POLLERR.
1120 /* Re-enable interrupts. */
1121 iowrite32(BRIDGE_INT_FPGA
, bridge
+ BRIDGE_INT_MASK_REG
);
1129 static void poch_class_dev_unregister(struct poch_dev
*poch_dev
, int id
)
1133 struct channel_info
*channel
;
1136 if (poch_dev
->dev
== NULL
)
1139 for (i
= 0; i
< poch_dev
->nchannels
; i
++) {
1140 channel
= &poch_dev
->channels
[i
];
1141 devno
= poch_first_dev
+ (id
* poch_dev
->nchannels
) + i
;
1146 nattrs
= sizeof(poch_class_attrs
)/sizeof(poch_class_attrs
[0]);
1147 for (j
= 0; j
< nattrs
; j
++)
1148 device_remove_file(channel
->dev
, poch_class_attrs
[j
]);
1150 device_unregister(channel
->dev
);
1153 device_unregister(poch_dev
->dev
);
1156 static int __devinit
poch_class_dev_register(struct poch_dev
*poch_dev
,
1159 struct device
*dev
= &poch_dev
->pci_dev
->dev
;
1163 struct channel_info
*channel
;
1166 poch_dev
->dev
= device_create(poch_cls
, &poch_dev
->pci_dev
->dev
,
1167 MKDEV(0, 0), NULL
, "poch%d", id
);
1168 if (IS_ERR(poch_dev
->dev
)) {
1169 dev_err(dev
, "error creating parent class device");
1170 ret
= PTR_ERR(poch_dev
->dev
);
1171 poch_dev
->dev
= NULL
;
1175 for (i
= 0; i
< poch_dev
->nchannels
; i
++) {
1176 channel
= &poch_dev
->channels
[i
];
1178 devno
= poch_first_dev
+ (id
* poch_dev
->nchannels
) + i
;
1179 channel
->dev
= device_create(poch_cls
, poch_dev
->dev
, devno
,
1181 if (IS_ERR(channel
->dev
)) {
1182 dev_err(dev
, "error creating channel class device");
1183 ret
= PTR_ERR(channel
->dev
);
1184 channel
->dev
= NULL
;
1185 poch_class_dev_unregister(poch_dev
, id
);
1189 dev_set_drvdata(channel
->dev
, channel
);
1190 nattrs
= sizeof(poch_class_attrs
)/sizeof(poch_class_attrs
[0]);
1191 for (j
= 0; j
< nattrs
; j
++) {
1192 ret
= device_create_file(channel
->dev
,
1193 poch_class_attrs
[j
]);
1195 dev_err(dev
, "error creating attribute file");
1196 poch_class_dev_unregister(poch_dev
, id
);
1205 static int __devinit
poch_pci_probe(struct pci_dev
*pdev
,
1206 const struct pci_device_id
*pci_id
)
1208 struct device
*dev
= &pdev
->dev
;
1209 struct poch_dev
*poch_dev
;
1210 struct uio_info
*uio
;
1215 poch_dev
= kzalloc(sizeof(struct poch_dev
), GFP_KERNEL
);
1217 dev_err(dev
, "error allocating priv. data memory\n");
1221 poch_dev
->pci_dev
= pdev
;
1222 uio
= &poch_dev
->uio
;
1224 pci_set_drvdata(pdev
, poch_dev
);
1226 spin_lock_init(&poch_dev
->iomem_lock
);
1228 poch_dev
->nchannels
= POCH_NCHANNELS
;
1229 poch_dev
->channels
[CHNO_RX_CHANNEL
].dir
= CHANNEL_DIR_RX
;
1230 poch_dev
->channels
[CHNO_TX_CHANNEL
].dir
= CHANNEL_DIR_TX
;
1232 for (i
= 0; i
< poch_dev
->nchannels
; i
++) {
1233 init_waitqueue_head(&poch_dev
->channels
[i
].wq
);
1234 atomic_set(&poch_dev
->channels
[i
].free
, 1);
1235 atomic_set(&poch_dev
->channels
[i
].inited
, 0);
1238 ret
= pci_enable_device(pdev
);
1240 dev_err(dev
, "error enabling device\n");
1244 ret
= pci_request_regions(pdev
, "poch");
1246 dev_err(dev
, "error requesting resources\n");
1250 uio
->mem
[0].addr
= pci_resource_start(pdev
, 1);
1251 if (!uio
->mem
[0].addr
) {
1252 dev_err(dev
, "invalid BAR1\n");
1257 uio
->mem
[0].size
= pci_resource_len(pdev
, 1);
1258 uio
->mem
[0].memtype
= UIO_MEM_PHYS
;
1261 uio
->version
= "0.0.1";
1263 ret
= uio_register_device(dev
, uio
);
1265 dev_err(dev
, "error register UIO device: %d\n", ret
);
1269 poch_dev
->bridge_iomem
= ioremap(pci_resource_start(pdev
, 0),
1270 pci_resource_len(pdev
, 0));
1271 if (poch_dev
->bridge_iomem
== NULL
) {
1272 dev_err(dev
, "error mapping bridge (bar0) registers\n");
1277 poch_dev
->fpga_iomem
= ioremap(pci_resource_start(pdev
, 1),
1278 pci_resource_len(pdev
, 1));
1279 if (poch_dev
->fpga_iomem
== NULL
) {
1280 dev_err(dev
, "error mapping fpga (bar1) registers\n");
1282 goto out_bar0_unmap
;
1285 ret
= request_irq(pdev
->irq
, poch_irq_handler
, IRQF_SHARED
,
1286 dev
->bus_id
, poch_dev
);
1288 dev_err(dev
, "error requesting IRQ %u\n", pdev
->irq
);
1290 goto out_bar1_unmap
;
1293 if (!idr_pre_get(&poch_ids
, GFP_KERNEL
)) {
1294 dev_err(dev
, "error allocating memory ids\n");
1299 idr_get_new(&poch_ids
, poch_dev
, &id
);
1300 if (id
>= MAX_POCH_CARDS
) {
1301 dev_err(dev
, "minors exhausted\n");
1306 cdev_init(&poch_dev
->cdev
, &poch_fops
);
1307 poch_dev
->cdev
.owner
= THIS_MODULE
;
1308 ret
= cdev_add(&poch_dev
->cdev
,
1309 poch_first_dev
+ (id
* poch_dev
->nchannels
),
1310 poch_dev
->nchannels
);
1312 dev_err(dev
, "error register character device\n");
1313 goto out_idr_remove
;
1316 ret
= poch_class_dev_register(poch_dev
, id
);
1323 cdev_del(&poch_dev
->cdev
);
1325 idr_remove(&poch_ids
, id
);
1327 free_irq(pdev
->irq
, poch_dev
);
1329 iounmap(poch_dev
->fpga_iomem
);
1331 iounmap(poch_dev
->bridge_iomem
);
1333 uio_unregister_device(uio
);
1335 pci_release_regions(pdev
);
1337 pci_disable_device(pdev
);
1344 * FIXME: We are yet to handle the hot unplug case.
1346 static void poch_pci_remove(struct pci_dev
*pdev
)
1348 struct poch_dev
*poch_dev
= pci_get_drvdata(pdev
);
1349 struct uio_info
*uio
= &poch_dev
->uio
;
1350 unsigned int minor
= MINOR(poch_dev
->cdev
.dev
);
1351 unsigned int id
= minor
/ poch_dev
->nchannels
;
1353 /* FIXME: unmap fpga_iomem and bridge_iomem */
1355 poch_class_dev_unregister(poch_dev
, id
);
1356 cdev_del(&poch_dev
->cdev
);
1357 idr_remove(&poch_ids
, id
);
1358 free_irq(pdev
->irq
, poch_dev
);
1359 uio_unregister_device(uio
);
1360 pci_release_regions(pdev
);
1361 pci_disable_device(pdev
);
1362 pci_set_drvdata(pdev
, NULL
);
1363 iounmap(uio
->mem
[0].internal_addr
);
1368 static const struct pci_device_id poch_pci_ids
[] /* __devinitconst */ = {
1369 { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS
,
1370 PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
) },
1374 static struct pci_driver poch_pci_driver
= {
1376 .id_table
= poch_pci_ids
,
1377 .probe
= poch_pci_probe
,
1378 .remove
= poch_pci_remove
,
1381 static int __init
poch_init_module(void)
1385 ret
= alloc_chrdev_region(&poch_first_dev
, 0,
1386 MAX_POCH_DEVICES
, DRV_NAME
);
1388 printk(KERN_ERR PFX
"error allocating device no.");
1392 poch_cls
= class_create(THIS_MODULE
, "pocketchange");
1393 if (IS_ERR(poch_cls
)) {
1394 ret
= PTR_ERR(poch_cls
);
1395 goto out_unreg_chrdev
;
1398 ret
= pci_register_driver(&poch_pci_driver
);
1400 printk(KERN_ERR PFX
"error register PCI device");
1401 goto out_class_destroy
;
1407 class_destroy(poch_cls
);
1410 unregister_chrdev_region(poch_first_dev
, MAX_POCH_DEVICES
);
1415 static void __exit
poch_exit_module(void)
1417 pci_unregister_driver(&poch_pci_driver
);
1418 class_destroy(poch_cls
);
1419 unregister_chrdev_region(poch_first_dev
, MAX_POCH_DEVICES
);
1422 module_init(poch_init_module
);
1423 module_exit(poch_exit_module
);
1425 MODULE_LICENSE("GPL v2");