2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 Abstract: rt2x00 generic pci device routines.
26 #include <linux/dma-mapping.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
32 #include "rt2x00pci.h"
37 int rt2x00pci_write_tx_data(struct rt2x00_dev
*rt2x00dev
,
38 struct data_queue
*queue
, struct sk_buff
*skb
,
39 struct ieee80211_tx_control
*control
)
41 struct queue_entry
*entry
= rt2x00queue_get_entry(queue
, Q_INDEX
);
42 struct queue_entry_priv_pci
*entry_priv
= entry
->priv_data
;
43 struct skb_frame_desc
*skbdesc
;
44 struct txentry_desc txdesc
;
47 if (rt2x00queue_full(queue
))
50 rt2x00_desc_read(entry_priv
->desc
, 0, &word
);
52 if (rt2x00_get_field32(word
, TXD_ENTRY_OWNER_NIC
) ||
53 rt2x00_get_field32(word
, TXD_ENTRY_VALID
)) {
55 "Arrived at non-free entry in the non-full queue %d.\n"
56 "Please file bug report to %s.\n",
57 entry
->queue
->qid
, DRV_PROJECT
);
62 * Copy all TX descriptor information into txdesc,
63 * after that we are free to use the skb->cb array
64 * for our information.
67 rt2x00queue_create_tx_descriptor(entry
, &txdesc
, control
);
70 * Fill in skb descriptor
72 skbdesc
= get_skb_frame_desc(skb
);
73 skbdesc
->data
= skb
->data
;
74 skbdesc
->data_len
= skb
->len
;
75 skbdesc
->desc
= entry_priv
->desc
;
76 skbdesc
->desc_len
= queue
->desc_size
;
77 skbdesc
->entry
= entry
;
79 memcpy(&entry_priv
->control
, control
, sizeof(entry_priv
->control
));
80 memcpy(entry_priv
->data
, skb
->data
, skb
->len
);
82 rt2x00queue_write_tx_descriptor(entry
, &txdesc
);
83 rt2x00queue_index_inc(queue
, Q_INDEX
);
87 EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data
);
90 * TX/RX data handlers.
92 void rt2x00pci_rxdone(struct rt2x00_dev
*rt2x00dev
)
94 struct data_queue
*queue
= rt2x00dev
->rx
;
95 struct queue_entry
*entry
;
96 struct queue_entry_priv_pci
*entry_priv
;
97 struct ieee80211_hdr
*hdr
;
98 struct skb_frame_desc
*skbdesc
;
99 struct rxdone_entry_desc rxdesc
;
105 entry
= rt2x00queue_get_entry(queue
, Q_INDEX
);
106 entry_priv
= entry
->priv_data
;
107 rt2x00_desc_read(entry_priv
->desc
, 0, &word
);
109 if (rt2x00_get_field32(word
, RXD_ENTRY_OWNER_NIC
))
112 memset(&rxdesc
, 0, sizeof(rxdesc
));
113 rt2x00dev
->ops
->lib
->fill_rxdone(entry
, &rxdesc
);
115 hdr
= (struct ieee80211_hdr
*)entry_priv
->data
;
117 ieee80211_get_hdrlen(le16_to_cpu(hdr
->frame_control
));
120 * The data behind the ieee80211 header must be
121 * aligned on a 4 byte boundary.
123 align
= header_size
% 4;
126 * Allocate the sk_buffer, initialize it and copy
129 entry
->skb
= dev_alloc_skb(rxdesc
.size
+ align
);
133 skb_reserve(entry
->skb
, align
);
134 memcpy(skb_put(entry
->skb
, rxdesc
.size
),
135 entry_priv
->data
, rxdesc
.size
);
138 * Fill in skb descriptor
140 skbdesc
= get_skb_frame_desc(entry
->skb
);
141 memset(skbdesc
, 0, sizeof(*skbdesc
));
142 skbdesc
->data
= entry
->skb
->data
;
143 skbdesc
->data_len
= entry
->skb
->len
;
144 skbdesc
->desc
= entry_priv
->desc
;
145 skbdesc
->desc_len
= queue
->desc_size
;
146 skbdesc
->entry
= entry
;
149 * Send the frame to rt2x00lib for further processing.
151 rt2x00lib_rxdone(entry
, &rxdesc
);
153 if (test_bit(DEVICE_ENABLED_RADIO
, &queue
->rt2x00dev
->flags
)) {
154 rt2x00_set_field32(&word
, RXD_ENTRY_OWNER_NIC
, 1);
155 rt2x00_desc_write(entry_priv
->desc
, 0, word
);
158 rt2x00queue_index_inc(queue
, Q_INDEX
);
161 EXPORT_SYMBOL_GPL(rt2x00pci_rxdone
);
163 void rt2x00pci_txdone(struct rt2x00_dev
*rt2x00dev
, struct queue_entry
*entry
,
164 struct txdone_entry_desc
*txdesc
)
166 struct queue_entry_priv_pci
*entry_priv
= entry
->priv_data
;
169 txdesc
->control
= &entry_priv
->control
;
170 rt2x00lib_txdone(entry
, txdesc
);
173 * Make this entry available for reuse.
177 rt2x00_desc_read(entry_priv
->desc
, 0, &word
);
178 rt2x00_set_field32(&word
, TXD_ENTRY_OWNER_NIC
, 0);
179 rt2x00_set_field32(&word
, TXD_ENTRY_VALID
, 0);
180 rt2x00_desc_write(entry_priv
->desc
, 0, word
);
182 rt2x00queue_index_inc(entry
->queue
, Q_INDEX_DONE
);
185 * If the data queue was full before the txdone handler
186 * we must make sure the packet queue in the mac80211 stack
187 * is reenabled when the txdone handler has finished.
189 if (!rt2x00queue_full(entry
->queue
))
190 ieee80211_wake_queue(rt2x00dev
->hw
, entry_priv
->control
.queue
);
193 EXPORT_SYMBOL_GPL(rt2x00pci_txdone
);
196 * Device initialization handlers.
198 #define desc_size(__queue) \
200 ((__queue)->limit * (__queue)->desc_size);\
203 #define data_size(__queue) \
205 ((__queue)->limit * (__queue)->data_size);\
208 #define dma_size(__queue) \
210 data_size(__queue) + desc_size(__queue);\
213 #define desc_offset(__queue, __base, __i) \
215 (__base) + data_size(__queue) + \
216 ((__i) * (__queue)->desc_size); \
219 #define data_offset(__queue, __base, __i) \
222 ((__i) * (__queue)->data_size); \
225 static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev
*rt2x00dev
,
226 struct data_queue
*queue
)
228 struct pci_dev
*pci_dev
= rt2x00dev_pci(rt2x00dev
);
229 struct queue_entry_priv_pci
*entry_priv
;
235 * Allocate DMA memory for descriptor and buffer.
237 addr
= pci_alloc_consistent(pci_dev
, dma_size(queue
), &dma
);
241 memset(addr
, 0, dma_size(queue
));
244 * Initialize all queue entries to contain valid addresses.
246 for (i
= 0; i
< queue
->limit
; i
++) {
247 entry_priv
= queue
->entries
[i
].priv_data
;
248 entry_priv
->desc
= desc_offset(queue
, addr
, i
);
249 entry_priv
->desc_dma
= desc_offset(queue
, dma
, i
);
250 entry_priv
->data
= data_offset(queue
, addr
, i
);
251 entry_priv
->data_dma
= data_offset(queue
, dma
, i
);
257 static void rt2x00pci_free_queue_dma(struct rt2x00_dev
*rt2x00dev
,
258 struct data_queue
*queue
)
260 struct pci_dev
*pci_dev
= rt2x00dev_pci(rt2x00dev
);
261 struct queue_entry_priv_pci
*entry_priv
=
262 queue
->entries
[0].priv_data
;
264 if (entry_priv
->data
)
265 pci_free_consistent(pci_dev
, dma_size(queue
),
266 entry_priv
->data
, entry_priv
->data_dma
);
267 entry_priv
->data
= NULL
;
270 int rt2x00pci_initialize(struct rt2x00_dev
*rt2x00dev
)
272 struct pci_dev
*pci_dev
= rt2x00dev_pci(rt2x00dev
);
273 struct data_queue
*queue
;
279 queue_for_each(rt2x00dev
, queue
) {
280 status
= rt2x00pci_alloc_queue_dma(rt2x00dev
, queue
);
286 * Register interrupt handler.
288 status
= request_irq(pci_dev
->irq
, rt2x00dev
->ops
->lib
->irq_handler
,
289 IRQF_SHARED
, pci_name(pci_dev
), rt2x00dev
);
291 ERROR(rt2x00dev
, "IRQ %d allocation failed (error %d).\n",
292 pci_dev
->irq
, status
);
299 queue_for_each(rt2x00dev
, queue
)
300 rt2x00pci_free_queue_dma(rt2x00dev
, queue
);
304 EXPORT_SYMBOL_GPL(rt2x00pci_initialize
);
306 void rt2x00pci_uninitialize(struct rt2x00_dev
*rt2x00dev
)
308 struct data_queue
*queue
;
313 free_irq(rt2x00dev_pci(rt2x00dev
)->irq
, rt2x00dev
);
318 queue_for_each(rt2x00dev
, queue
)
319 rt2x00pci_free_queue_dma(rt2x00dev
, queue
);
321 EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize
);
324 * PCI driver handlers.
326 static void rt2x00pci_free_reg(struct rt2x00_dev
*rt2x00dev
)
328 kfree(rt2x00dev
->rf
);
329 rt2x00dev
->rf
= NULL
;
331 kfree(rt2x00dev
->eeprom
);
332 rt2x00dev
->eeprom
= NULL
;
334 if (rt2x00dev
->csr
.base
) {
335 iounmap(rt2x00dev
->csr
.base
);
336 rt2x00dev
->csr
.base
= NULL
;
340 static int rt2x00pci_alloc_reg(struct rt2x00_dev
*rt2x00dev
)
342 struct pci_dev
*pci_dev
= rt2x00dev_pci(rt2x00dev
);
344 rt2x00dev
->csr
.base
= ioremap(pci_resource_start(pci_dev
, 0),
345 pci_resource_len(pci_dev
, 0));
346 if (!rt2x00dev
->csr
.base
)
349 rt2x00dev
->eeprom
= kzalloc(rt2x00dev
->ops
->eeprom_size
, GFP_KERNEL
);
350 if (!rt2x00dev
->eeprom
)
353 rt2x00dev
->rf
= kzalloc(rt2x00dev
->ops
->rf_size
, GFP_KERNEL
);
360 ERROR_PROBE("Failed to allocate registers.\n");
362 rt2x00pci_free_reg(rt2x00dev
);
367 int rt2x00pci_probe(struct pci_dev
*pci_dev
, const struct pci_device_id
*id
)
369 struct rt2x00_ops
*ops
= (struct rt2x00_ops
*)id
->driver_data
;
370 struct ieee80211_hw
*hw
;
371 struct rt2x00_dev
*rt2x00dev
;
374 retval
= pci_request_regions(pci_dev
, pci_name(pci_dev
));
376 ERROR_PROBE("PCI request regions failed.\n");
380 retval
= pci_enable_device(pci_dev
);
382 ERROR_PROBE("Enable device failed.\n");
383 goto exit_release_regions
;
386 pci_set_master(pci_dev
);
388 if (pci_set_mwi(pci_dev
))
389 ERROR_PROBE("MWI not available.\n");
391 if (pci_set_dma_mask(pci_dev
, DMA_64BIT_MASK
) &&
392 pci_set_dma_mask(pci_dev
, DMA_32BIT_MASK
)) {
393 ERROR_PROBE("PCI DMA not supported.\n");
395 goto exit_disable_device
;
398 hw
= ieee80211_alloc_hw(sizeof(struct rt2x00_dev
), ops
->hw
);
400 ERROR_PROBE("Failed to allocate hardware.\n");
402 goto exit_disable_device
;
405 pci_set_drvdata(pci_dev
, hw
);
407 rt2x00dev
= hw
->priv
;
408 rt2x00dev
->dev
= pci_dev
;
409 rt2x00dev
->ops
= ops
;
412 retval
= rt2x00pci_alloc_reg(rt2x00dev
);
414 goto exit_free_device
;
416 retval
= rt2x00lib_probe_dev(rt2x00dev
);
423 rt2x00pci_free_reg(rt2x00dev
);
426 ieee80211_free_hw(hw
);
429 if (retval
!= -EBUSY
)
430 pci_disable_device(pci_dev
);
432 exit_release_regions
:
433 pci_release_regions(pci_dev
);
435 pci_set_drvdata(pci_dev
, NULL
);
439 EXPORT_SYMBOL_GPL(rt2x00pci_probe
);
441 void rt2x00pci_remove(struct pci_dev
*pci_dev
)
443 struct ieee80211_hw
*hw
= pci_get_drvdata(pci_dev
);
444 struct rt2x00_dev
*rt2x00dev
= hw
->priv
;
447 * Free all allocated data.
449 rt2x00lib_remove_dev(rt2x00dev
);
450 rt2x00pci_free_reg(rt2x00dev
);
451 ieee80211_free_hw(hw
);
454 * Free the PCI device data.
456 pci_set_drvdata(pci_dev
, NULL
);
457 pci_disable_device(pci_dev
);
458 pci_release_regions(pci_dev
);
460 EXPORT_SYMBOL_GPL(rt2x00pci_remove
);
463 int rt2x00pci_suspend(struct pci_dev
*pci_dev
, pm_message_t state
)
465 struct ieee80211_hw
*hw
= pci_get_drvdata(pci_dev
);
466 struct rt2x00_dev
*rt2x00dev
= hw
->priv
;
469 retval
= rt2x00lib_suspend(rt2x00dev
, state
);
473 rt2x00pci_free_reg(rt2x00dev
);
475 pci_save_state(pci_dev
);
476 pci_disable_device(pci_dev
);
477 return pci_set_power_state(pci_dev
, pci_choose_state(pci_dev
, state
));
479 EXPORT_SYMBOL_GPL(rt2x00pci_suspend
);
481 int rt2x00pci_resume(struct pci_dev
*pci_dev
)
483 struct ieee80211_hw
*hw
= pci_get_drvdata(pci_dev
);
484 struct rt2x00_dev
*rt2x00dev
= hw
->priv
;
487 if (pci_set_power_state(pci_dev
, PCI_D0
) ||
488 pci_enable_device(pci_dev
) ||
489 pci_restore_state(pci_dev
)) {
490 ERROR(rt2x00dev
, "Failed to resume device.\n");
494 retval
= rt2x00pci_alloc_reg(rt2x00dev
);
498 retval
= rt2x00lib_resume(rt2x00dev
);
505 rt2x00pci_free_reg(rt2x00dev
);
509 EXPORT_SYMBOL_GPL(rt2x00pci_resume
);
510 #endif /* CONFIG_PM */
513 * rt2x00pci module information.
515 MODULE_AUTHOR(DRV_PROJECT
);
516 MODULE_VERSION(DRV_VERSION
);
517 MODULE_DESCRIPTION("rt2x00 pci library");
518 MODULE_LICENSE("GPL");