rtlwifi: Fix kernel oops on ARM SOC
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / wireless / rtlwifi / pci.c
blob56f12358389d8614ba9167ad23239808cc489905
1 /******************************************************************************
3 * Copyright(c) 2009-2010 Realtek Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
26 * Larry Finger <Larry.Finger@lwfinger.net>
28 *****************************************************************************/
30 #include "core.h"
31 #include "wifi.h"
32 #include "pci.h"
33 #include "base.h"
34 #include "ps.h"
35 #include "efuse.h"
37 static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
38 PCI_VENDOR_ID_INTEL,
39 PCI_VENDOR_ID_ATI,
40 PCI_VENDOR_ID_AMD,
41 PCI_VENDOR_ID_SI
44 static const u8 ac_to_hwq[] = {
45 VO_QUEUE,
46 VI_QUEUE,
47 BE_QUEUE,
48 BK_QUEUE
51 static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
52 struct sk_buff *skb)
54 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
55 __le16 fc = rtl_get_fc(skb);
56 u8 queue_index = skb_get_queue_mapping(skb);
58 if (unlikely(ieee80211_is_beacon(fc)))
59 return BEACON_QUEUE;
60 if (ieee80211_is_mgmt(fc))
61 return MGNT_QUEUE;
62 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
63 if (ieee80211_is_nullfunc(fc))
64 return HIGH_QUEUE;
66 return ac_to_hwq[queue_index];
69 /* Update PCI dependent default settings*/
70 static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
72 struct rtl_priv *rtlpriv = rtl_priv(hw);
73 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
74 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
75 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
76 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
77 u8 init_aspm;
79 ppsc->reg_rfps_level = 0;
80 ppsc->support_aspm = 0;
82 /*Update PCI ASPM setting */
83 ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
84 switch (rtlpci->const_pci_aspm) {
85 case 0:
86 /*No ASPM */
87 break;
89 case 1:
90 /*ASPM dynamically enabled/disable. */
91 ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
92 break;
94 case 2:
95 /*ASPM with Clock Req dynamically enabled/disable. */
96 ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
97 RT_RF_OFF_LEVL_CLK_REQ);
98 break;
100 case 3:
102 * Always enable ASPM and Clock Req
103 * from initialization to halt.
104 * */
105 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
106 ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
107 RT_RF_OFF_LEVL_CLK_REQ);
108 break;
110 case 4:
112 * Always enable ASPM without Clock Req
113 * from initialization to halt.
114 * */
115 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
116 RT_RF_OFF_LEVL_CLK_REQ);
117 ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
118 break;
121 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
123 /*Update Radio OFF setting */
124 switch (rtlpci->const_hwsw_rfoff_d3) {
125 case 1:
126 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
127 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
128 break;
130 case 2:
131 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
132 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
133 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
134 break;
136 case 3:
137 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
138 break;
141 /*Set HW definition to determine if it supports ASPM. */
142 switch (rtlpci->const_support_pciaspm) {
143 case 0:{
144 /*Not support ASPM. */
145 bool support_aspm = false;
146 ppsc->support_aspm = support_aspm;
147 break;
149 case 1:{
150 /*Support ASPM. */
151 bool support_aspm = true;
152 bool support_backdoor = true;
153 ppsc->support_aspm = support_aspm;
155 /*if (priv->oem_id == RT_CID_TOSHIBA &&
156 !priv->ndis_adapter.amd_l1_patch)
157 support_backdoor = false; */
159 ppsc->support_backdoor = support_backdoor;
161 break;
163 case 2:
164 /*ASPM value set by chipset. */
165 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
166 bool support_aspm = true;
167 ppsc->support_aspm = support_aspm;
169 break;
170 default:
171 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
172 ("switch case not process\n"));
173 break;
176 /* toshiba aspm issue, toshiba will set aspm selfly
177 * so we should not set aspm in driver */
178 pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
179 if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
180 init_aspm == 0x43)
181 ppsc->support_aspm = false;
184 static bool _rtl_pci_platform_switch_device_pci_aspm(
185 struct ieee80211_hw *hw,
186 u8 value)
188 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
189 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
191 if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
192 value |= 0x40;
194 pci_write_config_byte(rtlpci->pdev, 0x80, value);
196 return false;
199 /*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
200 static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
202 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
203 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
205 pci_write_config_byte(rtlpci->pdev, 0x81, value);
207 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
208 udelay(100);
210 return true;
213 /*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
214 static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
216 struct rtl_priv *rtlpriv = rtl_priv(hw);
217 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
218 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
219 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
220 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
221 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
222 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
223 /*Retrieve original configuration settings. */
224 u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
225 u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
226 pcibridge_linkctrlreg;
227 u16 aspmlevel = 0;
228 u8 tmp_u1b = 0;
230 if (!ppsc->support_aspm)
231 return;
233 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
234 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
235 ("PCI(Bridge) UNKNOWN.\n"));
237 return;
240 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
241 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
242 _rtl_pci_switch_clk_req(hw, 0x0);
245 /*for promising device will in L0 state after an I/O. */
246 pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
248 /*Set corresponding value. */
249 aspmlevel |= BIT(0) | BIT(1);
250 linkctrl_reg &= ~aspmlevel;
251 pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
253 _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
254 udelay(50);
256 /*4 Disable Pci Bridge ASPM */
257 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
258 pcicfg_addrport + (num4bytes << 2));
259 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg);
261 udelay(50);
265 *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
266 *power saving We should follow the sequence to enable
267 *RTL8192SE first then enable Pci Bridge ASPM
268 *or the system will show bluescreen.
270 static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
272 struct rtl_priv *rtlpriv = rtl_priv(hw);
273 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
274 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
275 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
276 u8 pcibridge_busnum = pcipriv->ndis_adapter.pcibridge_busnum;
277 u8 pcibridge_devnum = pcipriv->ndis_adapter.pcibridge_devnum;
278 u8 pcibridge_funcnum = pcipriv->ndis_adapter.pcibridge_funcnum;
279 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
280 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
281 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
282 u16 aspmlevel;
283 u8 u_pcibridge_aspmsetting;
284 u8 u_device_aspmsetting;
286 if (!ppsc->support_aspm)
287 return;
289 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
290 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
291 ("PCI(Bridge) UNKNOWN.\n"));
292 return;
295 /*4 Enable Pci Bridge ASPM */
296 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
297 pcicfg_addrport + (num4bytes << 2));
299 u_pcibridge_aspmsetting =
300 pcipriv->ndis_adapter.pcibridge_linkctrlreg |
301 rtlpci->const_hostpci_aspm_setting;
303 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
304 u_pcibridge_aspmsetting &= ~BIT(0);
306 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, u_pcibridge_aspmsetting);
308 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
309 ("PlatformEnableASPM():PciBridge busnumber[%x], "
310 "DevNumbe[%x], funcnumber[%x], Write reg[%x] = %x\n",
311 pcibridge_busnum, pcibridge_devnum, pcibridge_funcnum,
312 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
313 u_pcibridge_aspmsetting));
315 udelay(50);
317 /*Get ASPM level (with/without Clock Req) */
318 aspmlevel = rtlpci->const_devicepci_aspm_setting;
319 u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
321 /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
322 /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
324 u_device_aspmsetting |= aspmlevel;
326 _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
328 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
329 _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
330 RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
331 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
333 udelay(100);
336 static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
338 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
339 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
341 bool status = false;
342 u8 offset_e0;
343 unsigned offset_e4;
345 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
346 pcicfg_addrport + 0xE0);
347 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, 0xA0);
349 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
350 pcicfg_addrport + 0xE0);
351 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &offset_e0);
353 if (offset_e0 == 0xA0) {
354 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
355 pcicfg_addrport + 0xE4);
356 rtl_pci_raw_read_port_ulong(PCI_CONF_DATA, &offset_e4);
357 if (offset_e4 & BIT(23))
358 status = true;
361 return status;
364 static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
366 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
367 u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
368 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
369 u8 linkctrl_reg;
370 u8 num4bbytes;
372 num4bbytes = (capabilityoffset + 0x10) / 4;
374 /*Read Link Control Register */
375 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
376 pcicfg_addrport + (num4bbytes << 2));
377 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg);
379 pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
382 static void rtl_pci_parse_configuration(struct pci_dev *pdev,
383 struct ieee80211_hw *hw)
385 struct rtl_priv *rtlpriv = rtl_priv(hw);
386 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
388 u8 tmp;
389 int pos;
390 u8 linkctrl_reg;
392 /*Link Control Register */
393 pos = pci_pcie_cap(pdev);
394 pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
395 pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
397 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
398 ("Link Control Register =%x\n",
399 pcipriv->ndis_adapter.linkctrl_reg));
401 pci_read_config_byte(pdev, 0x98, &tmp);
402 tmp |= BIT(4);
403 pci_write_config_byte(pdev, 0x98, tmp);
405 tmp = 0x17;
406 pci_write_config_byte(pdev, 0x70f, tmp);
409 static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
411 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
413 _rtl_pci_update_default_setting(hw);
415 if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
416 /*Always enable ASPM & Clock Req. */
417 rtl_pci_enable_aspm(hw);
418 RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
423 static void _rtl_pci_io_handler_init(struct device *dev,
424 struct ieee80211_hw *hw)
426 struct rtl_priv *rtlpriv = rtl_priv(hw);
428 rtlpriv->io.dev = dev;
430 rtlpriv->io.write8_async = pci_write8_async;
431 rtlpriv->io.write16_async = pci_write16_async;
432 rtlpriv->io.write32_async = pci_write32_async;
434 rtlpriv->io.read8_sync = pci_read8_sync;
435 rtlpriv->io.read16_sync = pci_read16_sync;
436 rtlpriv->io.read32_sync = pci_read32_sync;
440 static void _rtl_pci_io_handler_release(struct ieee80211_hw *hw)
444 static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
445 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc, u8 tid)
447 struct rtl_priv *rtlpriv = rtl_priv(hw);
448 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
449 u8 additionlen = FCS_LEN;
450 struct sk_buff *next_skb;
452 /* here open is 4, wep/tkip is 8, aes is 12*/
453 if (info->control.hw_key)
454 additionlen += info->control.hw_key->icv_len;
456 /* The most skb num is 6 */
457 tcb_desc->empkt_num = 0;
458 spin_lock_bh(&rtlpriv->locks.waitq_lock);
459 skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
460 struct ieee80211_tx_info *next_info;
462 next_info = IEEE80211_SKB_CB(next_skb);
463 if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
464 tcb_desc->empkt_len[tcb_desc->empkt_num] =
465 next_skb->len + additionlen;
466 tcb_desc->empkt_num++;
467 } else {
468 break;
471 if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
472 next_skb))
473 break;
475 if (tcb_desc->empkt_num >= 5)
476 break;
478 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
480 return true;
483 /* just for early mode now */
484 static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
486 struct rtl_priv *rtlpriv = rtl_priv(hw);
487 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
488 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
489 struct sk_buff *skb = NULL;
490 struct ieee80211_tx_info *info = NULL;
491 int tid; /* should be int */
493 if (!rtlpriv->rtlhal.earlymode_enable)
494 return;
496 /* we juse use em for BE/BK/VI/VO */
497 for (tid = 7; tid >= 0; tid--) {
498 u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)];
499 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
500 while (!mac->act_scanning &&
501 rtlpriv->psc.rfpwr_state == ERFON) {
502 struct rtl_tcb_desc tcb_desc;
503 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
505 spin_lock_bh(&rtlpriv->locks.waitq_lock);
506 if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
507 (ring->entries - skb_queue_len(&ring->queue) > 5)) {
508 skb = skb_dequeue(&mac->skb_waitq[tid]);
509 } else {
510 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
511 break;
513 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
515 /* Some macaddr can't do early mode. like
516 * multicast/broadcast/no_qos data */
517 info = IEEE80211_SKB_CB(skb);
518 if (info->flags & IEEE80211_TX_CTL_AMPDU)
519 _rtl_update_earlymode_info(hw, skb,
520 &tcb_desc, tid);
522 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
528 static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
530 struct rtl_priv *rtlpriv = rtl_priv(hw);
531 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
533 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
535 while (skb_queue_len(&ring->queue)) {
536 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
537 struct sk_buff *skb;
538 struct ieee80211_tx_info *info;
539 __le16 fc;
540 u8 tid;
542 u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true,
543 HW_DESC_OWN);
546 *beacon packet will only use the first
547 *descriptor defautly,and the own may not
548 *be cleared by the hardware
550 if (own)
551 return;
552 ring->idx = (ring->idx + 1) % ring->entries;
554 skb = __skb_dequeue(&ring->queue);
555 pci_unmap_single(rtlpci->pdev,
556 rtlpriv->cfg->ops->
557 get_desc((u8 *) entry, true,
558 HW_DESC_TXBUFF_ADDR),
559 skb->len, PCI_DMA_TODEVICE);
561 /* remove early mode header */
562 if (rtlpriv->rtlhal.earlymode_enable)
563 skb_pull(skb, EM_HDR_LEN);
565 RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
566 ("new ring->idx:%d, "
567 "free: skb_queue_len:%d, free: seq:%x\n",
568 ring->idx,
569 skb_queue_len(&ring->queue),
570 *(u16 *) (skb->data + 22)));
572 if (prio == TXCMD_QUEUE) {
573 dev_kfree_skb(skb);
574 goto tx_status_ok;
578 /* for sw LPS, just after NULL skb send out, we can
579 * sure AP kown we are sleeped, our we should not let
580 * rf to sleep*/
581 fc = rtl_get_fc(skb);
582 if (ieee80211_is_nullfunc(fc)) {
583 if (ieee80211_has_pm(fc)) {
584 rtlpriv->mac80211.offchan_delay = true;
585 rtlpriv->psc.state_inap = 1;
586 } else {
587 rtlpriv->psc.state_inap = 0;
591 /* update tid tx pkt num */
592 tid = rtl_get_tid(skb);
593 if (tid <= 7)
594 rtlpriv->link_info.tidtx_inperiod[tid]++;
596 info = IEEE80211_SKB_CB(skb);
597 ieee80211_tx_info_clear_status(info);
599 info->flags |= IEEE80211_TX_STAT_ACK;
600 /*info->status.rates[0].count = 1; */
602 ieee80211_tx_status_irqsafe(hw, skb);
604 if ((ring->entries - skb_queue_len(&ring->queue))
605 == 2) {
607 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
608 ("more desc left, wake"
609 "skb_queue@%d,ring->idx = %d,"
610 "skb_queue_len = 0x%d\n",
611 prio, ring->idx,
612 skb_queue_len(&ring->queue)));
614 ieee80211_wake_queue(hw,
615 skb_get_queue_mapping
616 (skb));
618 tx_status_ok:
619 skb = NULL;
622 if (((rtlpriv->link_info.num_rx_inperiod +
623 rtlpriv->link_info.num_tx_inperiod) > 8) ||
624 (rtlpriv->link_info.num_rx_inperiod > 2)) {
625 tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
629 static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
630 struct ieee80211_rx_status rx_status)
632 struct rtl_priv *rtlpriv = rtl_priv(hw);
633 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
634 __le16 fc = rtl_get_fc(skb);
635 bool unicast = false;
636 struct sk_buff *uskb = NULL;
637 u8 *pdata;
640 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
642 if (is_broadcast_ether_addr(hdr->addr1)) {
643 ;/*TODO*/
644 } else if (is_multicast_ether_addr(hdr->addr1)) {
645 ;/*TODO*/
646 } else {
647 unicast = true;
648 rtlpriv->stats.rxbytesunicast += skb->len;
651 rtl_is_special_data(hw, skb, false);
653 if (ieee80211_is_data(fc)) {
654 rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
656 if (unicast)
657 rtlpriv->link_info.num_rx_inperiod++;
660 /* for sw lps */
661 rtl_swlps_beacon(hw, (void *)skb->data, skb->len);
662 rtl_recognize_peer(hw, (void *)skb->data, skb->len);
663 if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
664 (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) &&
665 (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)))
666 return;
668 if (unlikely(!rtl_action_proc(hw, skb, false)))
669 return;
671 uskb = dev_alloc_skb(skb->len + 128);
672 memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status));
673 pdata = (u8 *)skb_put(uskb, skb->len);
674 memcpy(pdata, skb->data, skb->len);
676 ieee80211_rx_irqsafe(hw, uskb);
679 static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
681 struct rtl_priv *rtlpriv = rtl_priv(hw);
682 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
683 int rx_queue_idx = RTL_PCI_RX_MPDU_QUEUE;
685 struct ieee80211_rx_status rx_status = { 0 };
686 unsigned int count = rtlpci->rxringcount;
687 u8 own;
688 u8 tmp_one;
689 u32 bufferaddress;
691 struct rtl_stats stats = {
692 .signal = 0,
693 .noise = -98,
694 .rate = 0,
696 int index = rtlpci->rx_ring[rx_queue_idx].idx;
698 /*RX NORMAL PKT */
699 while (count--) {
700 /*rx descriptor */
701 struct rtl_rx_desc *pdesc = &rtlpci->rx_ring[rx_queue_idx].desc[
702 index];
703 /*rx pkt */
704 struct sk_buff *skb = rtlpci->rx_ring[rx_queue_idx].rx_buf[
705 index];
706 struct sk_buff *new_skb = NULL;
708 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
709 false, HW_DESC_OWN);
711 /*wait data to be filled by hardware */
712 if (own)
713 break;
715 rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
716 &rx_status,
717 (u8 *) pdesc, skb);
719 if (stats.crc || stats.hwerror)
720 goto done;
722 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
723 if (unlikely(!new_skb)) {
724 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
725 DBG_DMESG,
726 ("can't alloc skb for rx\n"));
727 goto done;
730 pci_unmap_single(rtlpci->pdev,
731 *((dma_addr_t *) skb->cb),
732 rtlpci->rxbuffersize,
733 PCI_DMA_FROMDEVICE);
735 skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, false,
736 HW_DESC_RXPKT_LEN));
737 skb_reserve(skb, stats.rx_drvinfo_size + stats.rx_bufshift);
740 * NOTICE This can not be use for mac80211,
741 * this is done in mac80211 code,
742 * if you done here sec DHCP will fail
743 * skb_trim(skb, skb->len - 4);
746 _rtl_receive_one(hw, skb, rx_status);
748 if (((rtlpriv->link_info.num_rx_inperiod +
749 rtlpriv->link_info.num_tx_inperiod) > 8) ||
750 (rtlpriv->link_info.num_rx_inperiod > 2)) {
751 tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
754 dev_kfree_skb_any(skb);
755 skb = new_skb;
757 rtlpci->rx_ring[rx_queue_idx].rx_buf[index] = skb;
758 *((dma_addr_t *) skb->cb) =
759 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
760 rtlpci->rxbuffersize,
761 PCI_DMA_FROMDEVICE);
763 done:
764 bufferaddress = (*((dma_addr_t *)skb->cb));
765 tmp_one = 1;
766 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
767 HW_DESC_RXBUFF_ADDR,
768 (u8 *)&bufferaddress);
769 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
770 HW_DESC_RXPKT_LEN,
771 (u8 *)&rtlpci->rxbuffersize);
773 if (index == rtlpci->rxringcount - 1)
774 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
775 HW_DESC_RXERO,
776 (u8 *)&tmp_one);
778 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
779 (u8 *)&tmp_one);
781 index = (index + 1) % rtlpci->rxringcount;
784 rtlpci->rx_ring[rx_queue_idx].idx = index;
787 static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
789 struct ieee80211_hw *hw = dev_id;
790 struct rtl_priv *rtlpriv = rtl_priv(hw);
791 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
792 unsigned long flags;
793 u32 inta = 0;
794 u32 intb = 0;
796 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
798 /*read ISR: 4/8bytes */
799 rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
801 /*Shared IRQ or HW disappared */
802 if (!inta || inta == 0xffff)
803 goto done;
805 /*<1> beacon related */
806 if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
807 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
808 ("beacon ok interrupt!\n"));
811 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
812 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
813 ("beacon err interrupt!\n"));
816 if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
817 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
818 ("beacon interrupt!\n"));
821 if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
822 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
823 ("prepare beacon for interrupt!\n"));
824 tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
827 /*<3> Tx related */
828 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
829 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("IMR_TXFOVW!\n"));
831 if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
832 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
833 ("Manage ok interrupt!\n"));
834 _rtl_pci_tx_isr(hw, MGNT_QUEUE);
837 if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
838 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
839 ("HIGH_QUEUE ok interrupt!\n"));
840 _rtl_pci_tx_isr(hw, HIGH_QUEUE);
843 if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
844 rtlpriv->link_info.num_tx_inperiod++;
846 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
847 ("BK Tx OK interrupt!\n"));
848 _rtl_pci_tx_isr(hw, BK_QUEUE);
851 if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
852 rtlpriv->link_info.num_tx_inperiod++;
854 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
855 ("BE TX OK interrupt!\n"));
856 _rtl_pci_tx_isr(hw, BE_QUEUE);
859 if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
860 rtlpriv->link_info.num_tx_inperiod++;
862 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
863 ("VI TX OK interrupt!\n"));
864 _rtl_pci_tx_isr(hw, VI_QUEUE);
867 if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
868 rtlpriv->link_info.num_tx_inperiod++;
870 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
871 ("Vo TX OK interrupt!\n"));
872 _rtl_pci_tx_isr(hw, VO_QUEUE);
875 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
876 if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
877 rtlpriv->link_info.num_tx_inperiod++;
879 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
880 ("CMD TX OK interrupt!\n"));
881 _rtl_pci_tx_isr(hw, TXCMD_QUEUE);
885 /*<2> Rx related */
886 if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
887 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
888 _rtl_pci_rx_interrupt(hw);
891 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
892 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
893 ("rx descriptor unavailable!\n"));
894 _rtl_pci_rx_interrupt(hw);
897 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
898 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
899 _rtl_pci_rx_interrupt(hw);
902 if (rtlpriv->rtlhal.earlymode_enable)
903 tasklet_schedule(&rtlpriv->works.irq_tasklet);
905 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
906 return IRQ_HANDLED;
908 done:
909 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
910 return IRQ_HANDLED;
913 static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
915 _rtl_pci_tx_chk_waitq(hw);
918 static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw)
920 rtl_lps_leave(hw);
923 static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
925 struct rtl_priv *rtlpriv = rtl_priv(hw);
926 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
927 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
928 struct rtl8192_tx_ring *ring = NULL;
929 struct ieee80211_hdr *hdr = NULL;
930 struct ieee80211_tx_info *info = NULL;
931 struct sk_buff *pskb = NULL;
932 struct rtl_tx_desc *pdesc = NULL;
933 struct rtl_tcb_desc tcb_desc;
934 u8 temp_one = 1;
936 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
937 ring = &rtlpci->tx_ring[BEACON_QUEUE];
938 pskb = __skb_dequeue(&ring->queue);
939 if (pskb)
940 kfree_skb(pskb);
942 /*NB: the beacon data buffer must be 32-bit aligned. */
943 pskb = ieee80211_beacon_get(hw, mac->vif);
944 if (pskb == NULL)
945 return;
946 hdr = rtl_get_hdr(pskb);
947 info = IEEE80211_SKB_CB(pskb);
948 pdesc = &ring->desc[0];
949 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
950 info, pskb, BEACON_QUEUE, &tcb_desc);
952 __skb_queue_tail(&ring->queue, pskb);
954 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
955 (u8 *)&temp_one);
957 return;
960 static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
962 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
963 u8 i;
965 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
966 rtlpci->txringcount[i] = RT_TXDESC_NUM;
969 *we just alloc 2 desc for beacon queue,
970 *because we just need first desc in hw beacon.
972 rtlpci->txringcount[BEACON_QUEUE] = 2;
975 *BE queue need more descriptor for performance
976 *consideration or, No more tx desc will happen,
977 *and may cause mac80211 mem leakage.
979 rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
981 rtlpci->rxbuffersize = 9100; /*2048/1024; */
982 rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */
985 static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
986 struct pci_dev *pdev)
988 struct rtl_priv *rtlpriv = rtl_priv(hw);
989 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
990 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
991 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
993 rtlpci->up_first_time = true;
994 rtlpci->being_init_adapter = false;
996 rtlhal->hw = hw;
997 rtlpci->pdev = pdev;
999 /*Tx/Rx related var */
1000 _rtl_pci_init_trx_var(hw);
1002 /*IBSS*/ mac->beacon_interval = 100;
1004 /*AMPDU*/
1005 mac->min_space_cfg = 0;
1006 mac->max_mss_density = 0;
1007 /*set sane AMPDU defaults */
1008 mac->current_ampdu_density = 7;
1009 mac->current_ampdu_factor = 3;
1011 /*QOS*/
1012 rtlpci->acm_method = eAcmWay2_SW;
1014 /*task */
1015 tasklet_init(&rtlpriv->works.irq_tasklet,
1016 (void (*)(unsigned long))_rtl_pci_irq_tasklet,
1017 (unsigned long)hw);
1018 tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
1019 (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
1020 (unsigned long)hw);
1021 tasklet_init(&rtlpriv->works.ips_leave_tasklet,
1022 (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet,
1023 (unsigned long)hw);
1026 static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
1027 unsigned int prio, unsigned int entries)
1029 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1030 struct rtl_priv *rtlpriv = rtl_priv(hw);
1031 struct rtl_tx_desc *ring;
1032 dma_addr_t dma;
1033 u32 nextdescaddress;
1034 int i;
1036 ring = pci_alloc_consistent(rtlpci->pdev,
1037 sizeof(*ring) * entries, &dma);
1039 if (!ring || (unsigned long)ring & 0xFF) {
1040 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1041 ("Cannot allocate TX ring (prio = %d)\n", prio));
1042 return -ENOMEM;
1045 memset(ring, 0, sizeof(*ring) * entries);
1046 rtlpci->tx_ring[prio].desc = ring;
1047 rtlpci->tx_ring[prio].dma = dma;
1048 rtlpci->tx_ring[prio].idx = 0;
1049 rtlpci->tx_ring[prio].entries = entries;
1050 skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
1052 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1053 ("queue:%d, ring_addr:%p\n", prio, ring));
1055 for (i = 0; i < entries; i++) {
1056 nextdescaddress = (u32) dma +
1057 ((i + 1) % entries) *
1058 sizeof(*ring);
1060 rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
1061 true, HW_DESC_TX_NEXTDESC_ADDR,
1062 (u8 *)&nextdescaddress);
1065 return 0;
1068 static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1070 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1071 struct rtl_priv *rtlpriv = rtl_priv(hw);
1072 struct rtl_rx_desc *entry = NULL;
1073 int i, rx_queue_idx;
1074 u8 tmp_one = 1;
1077 *rx_queue_idx 0:RX_MPDU_QUEUE
1078 *rx_queue_idx 1:RX_CMD_QUEUE
1080 for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1081 rx_queue_idx++) {
1082 rtlpci->rx_ring[rx_queue_idx].desc =
1083 pci_alloc_consistent(rtlpci->pdev,
1084 sizeof(*rtlpci->rx_ring[rx_queue_idx].
1085 desc) * rtlpci->rxringcount,
1086 &rtlpci->rx_ring[rx_queue_idx].dma);
1088 if (!rtlpci->rx_ring[rx_queue_idx].desc ||
1089 (unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) {
1090 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1091 ("Cannot allocate RX ring\n"));
1092 return -ENOMEM;
1095 memset(rtlpci->rx_ring[rx_queue_idx].desc, 0,
1096 sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) *
1097 rtlpci->rxringcount);
1099 rtlpci->rx_ring[rx_queue_idx].idx = 0;
1101 /* If amsdu_8k is disabled, set buffersize to 4096. This
1102 * change will reduce memory fragmentation.
1104 if (rtlpci->rxbuffersize > 4096 &&
1105 rtlpriv->rtlhal.disable_amsdu_8k)
1106 rtlpci->rxbuffersize = 4096;
1108 for (i = 0; i < rtlpci->rxringcount; i++) {
1109 struct sk_buff *skb =
1110 dev_alloc_skb(rtlpci->rxbuffersize);
1111 u32 bufferaddress;
1112 if (!skb)
1113 return 0;
1114 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1116 /*skb->dev = dev; */
1118 rtlpci->rx_ring[rx_queue_idx].rx_buf[i] = skb;
1121 *just set skb->cb to mapping addr
1122 *for pci_unmap_single use
1124 *((dma_addr_t *) skb->cb) =
1125 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
1126 rtlpci->rxbuffersize,
1127 PCI_DMA_FROMDEVICE);
1129 bufferaddress = (*((dma_addr_t *)skb->cb));
1130 rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1131 HW_DESC_RXBUFF_ADDR,
1132 (u8 *)&bufferaddress);
1133 rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1134 HW_DESC_RXPKT_LEN,
1135 (u8 *)&rtlpci->
1136 rxbuffersize);
1137 rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1138 HW_DESC_RXOWN,
1139 (u8 *)&tmp_one);
1142 rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1143 HW_DESC_RXERO, (u8 *)&tmp_one);
1145 return 0;
1148 static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
1149 unsigned int prio)
1151 struct rtl_priv *rtlpriv = rtl_priv(hw);
1152 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1153 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
1155 while (skb_queue_len(&ring->queue)) {
1156 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
1157 struct sk_buff *skb = __skb_dequeue(&ring->queue);
1159 pci_unmap_single(rtlpci->pdev,
1160 rtlpriv->cfg->
1161 ops->get_desc((u8 *) entry, true,
1162 HW_DESC_TXBUFF_ADDR),
1163 skb->len, PCI_DMA_TODEVICE);
1164 kfree_skb(skb);
1165 ring->idx = (ring->idx + 1) % ring->entries;
1168 pci_free_consistent(rtlpci->pdev,
1169 sizeof(*ring->desc) * ring->entries,
1170 ring->desc, ring->dma);
1171 ring->desc = NULL;
1174 static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
1176 int i, rx_queue_idx;
1178 /*rx_queue_idx 0:RX_MPDU_QUEUE */
1179 /*rx_queue_idx 1:RX_CMD_QUEUE */
1180 for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1181 rx_queue_idx++) {
1182 for (i = 0; i < rtlpci->rxringcount; i++) {
1183 struct sk_buff *skb =
1184 rtlpci->rx_ring[rx_queue_idx].rx_buf[i];
1185 if (!skb)
1186 continue;
1188 pci_unmap_single(rtlpci->pdev,
1189 *((dma_addr_t *) skb->cb),
1190 rtlpci->rxbuffersize,
1191 PCI_DMA_FROMDEVICE);
1192 kfree_skb(skb);
1195 pci_free_consistent(rtlpci->pdev,
1196 sizeof(*rtlpci->rx_ring[rx_queue_idx].
1197 desc) * rtlpci->rxringcount,
1198 rtlpci->rx_ring[rx_queue_idx].desc,
1199 rtlpci->rx_ring[rx_queue_idx].dma);
1200 rtlpci->rx_ring[rx_queue_idx].desc = NULL;
1204 static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
1206 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1207 int ret;
1208 int i;
1210 ret = _rtl_pci_init_rx_ring(hw);
1211 if (ret)
1212 return ret;
1214 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1215 ret = _rtl_pci_init_tx_ring(hw, i,
1216 rtlpci->txringcount[i]);
1217 if (ret)
1218 goto err_free_rings;
1221 return 0;
1223 err_free_rings:
1224 _rtl_pci_free_rx_ring(rtlpci);
1226 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1227 if (rtlpci->tx_ring[i].desc)
1228 _rtl_pci_free_tx_ring(hw, i);
1230 return 1;
1233 static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
1235 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1236 u32 i;
1238 /*free rx rings */
1239 _rtl_pci_free_rx_ring(rtlpci);
1241 /*free tx rings */
1242 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1243 _rtl_pci_free_tx_ring(hw, i);
1245 return 0;
1248 int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1250 struct rtl_priv *rtlpriv = rtl_priv(hw);
1251 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1252 int i, rx_queue_idx;
1253 unsigned long flags;
1254 u8 tmp_one = 1;
1256 /*rx_queue_idx 0:RX_MPDU_QUEUE */
1257 /*rx_queue_idx 1:RX_CMD_QUEUE */
1258 for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1259 rx_queue_idx++) {
1261 *force the rx_ring[RX_MPDU_QUEUE/
1262 *RX_CMD_QUEUE].idx to the first one
1264 if (rtlpci->rx_ring[rx_queue_idx].desc) {
1265 struct rtl_rx_desc *entry = NULL;
1267 for (i = 0; i < rtlpci->rxringcount; i++) {
1268 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1269 rtlpriv->cfg->ops->set_desc((u8 *) entry,
1270 false,
1271 HW_DESC_RXOWN,
1272 (u8 *)&tmp_one);
1274 rtlpci->rx_ring[rx_queue_idx].idx = 0;
1279 *after reset, release previous pending packet,
1280 *and force the tx idx to the first one
1282 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1283 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1284 if (rtlpci->tx_ring[i].desc) {
1285 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
1287 while (skb_queue_len(&ring->queue)) {
1288 struct rtl_tx_desc *entry =
1289 &ring->desc[ring->idx];
1290 struct sk_buff *skb =
1291 __skb_dequeue(&ring->queue);
1293 pci_unmap_single(rtlpci->pdev,
1294 rtlpriv->cfg->ops->
1295 get_desc((u8 *)
1296 entry,
1297 true,
1298 HW_DESC_TXBUFF_ADDR),
1299 skb->len, PCI_DMA_TODEVICE);
1300 kfree_skb(skb);
1301 ring->idx = (ring->idx + 1) % ring->entries;
1303 ring->idx = 0;
1307 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1309 return 0;
1312 static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1313 struct sk_buff *skb)
1315 struct rtl_priv *rtlpriv = rtl_priv(hw);
1316 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1317 struct ieee80211_sta *sta = info->control.sta;
1318 struct rtl_sta_info *sta_entry = NULL;
1319 u8 tid = rtl_get_tid(skb);
1321 if (!sta)
1322 return false;
1323 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1325 if (!rtlpriv->rtlhal.earlymode_enable)
1326 return false;
1327 if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
1328 return false;
1329 if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
1330 return false;
1331 if (tid > 7)
1332 return false;
1334 /* maybe every tid should be checked */
1335 if (!rtlpriv->link_info.higher_busytxtraffic[tid])
1336 return false;
1338 spin_lock_bh(&rtlpriv->locks.waitq_lock);
1339 skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
1340 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
1342 return true;
1345 static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
1346 struct rtl_tcb_desc *ptcb_desc)
1348 struct rtl_priv *rtlpriv = rtl_priv(hw);
1349 struct rtl_sta_info *sta_entry = NULL;
1350 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1351 struct ieee80211_sta *sta = info->control.sta;
1352 struct rtl8192_tx_ring *ring;
1353 struct rtl_tx_desc *pdesc;
1354 u8 idx;
1355 u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
1356 unsigned long flags;
1357 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
1358 __le16 fc = rtl_get_fc(skb);
1359 u8 *pda_addr = hdr->addr1;
1360 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1361 /*ssn */
1362 u8 tid = 0;
1363 u16 seq_number = 0;
1364 u8 own;
1365 u8 temp_one = 1;
1367 if (ieee80211_is_auth(fc)) {
1368 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
1369 rtl_ips_nic_on(hw);
1372 if (rtlpriv->psc.sw_ps_enabled) {
1373 if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
1374 !ieee80211_has_pm(fc))
1375 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
1378 rtl_action_proc(hw, skb, true);
1380 if (is_multicast_ether_addr(pda_addr))
1381 rtlpriv->stats.txbytesmulticast += skb->len;
1382 else if (is_broadcast_ether_addr(pda_addr))
1383 rtlpriv->stats.txbytesbroadcast += skb->len;
1384 else
1385 rtlpriv->stats.txbytesunicast += skb->len;
1387 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1388 ring = &rtlpci->tx_ring[hw_queue];
1389 if (hw_queue != BEACON_QUEUE)
1390 idx = (ring->idx + skb_queue_len(&ring->queue)) %
1391 ring->entries;
1392 else
1393 idx = 0;
1395 pdesc = &ring->desc[idx];
1396 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
1397 true, HW_DESC_OWN);
1399 if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
1400 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1401 ("No more TX desc@%d, ring->idx = %d,"
1402 "idx = %d, skb_queue_len = 0x%d\n",
1403 hw_queue, ring->idx, idx,
1404 skb_queue_len(&ring->queue)));
1406 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1407 return skb->len;
1410 if (ieee80211_is_data_qos(fc)) {
1411 tid = rtl_get_tid(skb);
1412 if (sta) {
1413 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1414 seq_number = (le16_to_cpu(hdr->seq_ctrl) &
1415 IEEE80211_SCTL_SEQ) >> 4;
1416 seq_number += 1;
1418 if (!ieee80211_has_morefrags(hdr->frame_control))
1419 sta_entry->tids[tid].seq_number = seq_number;
1423 if (ieee80211_is_data(fc))
1424 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1426 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1427 info, skb, hw_queue, ptcb_desc);
1429 __skb_queue_tail(&ring->queue, skb);
1431 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
1432 HW_DESC_OWN, (u8 *)&temp_one);
1435 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
1436 hw_queue != BEACON_QUEUE) {
1438 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
1439 ("less desc left, stop skb_queue@%d, "
1440 "ring->idx = %d,"
1441 "idx = %d, skb_queue_len = 0x%d\n",
1442 hw_queue, ring->idx, idx,
1443 skb_queue_len(&ring->queue)));
1445 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
1448 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1450 rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
1452 return 0;
1455 static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop)
1457 struct rtl_priv *rtlpriv = rtl_priv(hw);
1458 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1459 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1460 u16 i = 0;
1461 int queue_id;
1462 struct rtl8192_tx_ring *ring;
1464 for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
1465 u32 queue_len;
1466 ring = &pcipriv->dev.tx_ring[queue_id];
1467 queue_len = skb_queue_len(&ring->queue);
1468 if (queue_len == 0 || queue_id == BEACON_QUEUE ||
1469 queue_id == TXCMD_QUEUE) {
1470 queue_id--;
1471 continue;
1472 } else {
1473 msleep(20);
1474 i++;
1477 /* we just wait 1s for all queues */
1478 if (rtlpriv->psc.rfpwr_state == ERFOFF ||
1479 is_hal_stop(rtlhal) || i >= 200)
1480 return;
1484 static void rtl_pci_deinit(struct ieee80211_hw *hw)
1486 struct rtl_priv *rtlpriv = rtl_priv(hw);
1487 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1489 _rtl_pci_deinit_trx_ring(hw);
1491 synchronize_irq(rtlpci->pdev->irq);
1492 tasklet_kill(&rtlpriv->works.irq_tasklet);
1493 tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
1495 flush_workqueue(rtlpriv->works.rtl_wq);
1496 destroy_workqueue(rtlpriv->works.rtl_wq);
1500 static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1502 struct rtl_priv *rtlpriv = rtl_priv(hw);
1503 int err;
1505 _rtl_pci_init_struct(hw, pdev);
1507 err = _rtl_pci_init_trx_ring(hw);
1508 if (err) {
1509 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1510 ("tx ring initialization failed"));
1511 return err;
1514 return 1;
1517 static int rtl_pci_start(struct ieee80211_hw *hw)
1519 struct rtl_priv *rtlpriv = rtl_priv(hw);
1520 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1521 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1522 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1524 int err;
1526 rtl_pci_reset_trx_ring(hw);
1528 rtlpci->driver_is_goingto_unload = false;
1529 err = rtlpriv->cfg->ops->hw_init(hw);
1530 if (err) {
1531 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1532 ("Failed to config hardware!\n"));
1533 return err;
1536 rtlpriv->cfg->ops->enable_interrupt(hw);
1537 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("enable_interrupt OK\n"));
1539 rtl_init_rx_config(hw);
1541 /*should after adapter start and interrupt enable. */
1542 set_hal_start(rtlhal);
1544 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1546 rtlpci->up_first_time = false;
1548 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("OK\n"));
1549 return 0;
1552 static void rtl_pci_stop(struct ieee80211_hw *hw)
1554 struct rtl_priv *rtlpriv = rtl_priv(hw);
1555 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1556 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1557 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1558 unsigned long flags;
1559 u8 RFInProgressTimeOut = 0;
1562 *should before disable interrrupt&adapter
1563 *and will do it immediately.
1565 set_hal_stop(rtlhal);
1567 rtlpriv->cfg->ops->disable_interrupt(hw);
1568 tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
1570 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1571 while (ppsc->rfchange_inprogress) {
1572 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1573 if (RFInProgressTimeOut > 100) {
1574 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1575 break;
1577 mdelay(1);
1578 RFInProgressTimeOut++;
1579 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1581 ppsc->rfchange_inprogress = true;
1582 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1584 rtlpci->driver_is_goingto_unload = true;
1585 rtlpriv->cfg->ops->hw_disable(hw);
1586 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1588 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1589 ppsc->rfchange_inprogress = false;
1590 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1592 rtl_pci_enable_aspm(hw);
1595 static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1596 struct ieee80211_hw *hw)
1598 struct rtl_priv *rtlpriv = rtl_priv(hw);
1599 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1600 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1601 struct pci_dev *bridge_pdev = pdev->bus->self;
1602 u16 venderid;
1603 u16 deviceid;
1604 u8 revisionid;
1605 u16 irqline;
1606 u8 tmp;
1608 pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
1609 venderid = pdev->vendor;
1610 deviceid = pdev->device;
1611 pci_read_config_byte(pdev, 0x8, &revisionid);
1612 pci_read_config_word(pdev, 0x3C, &irqline);
1614 /* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses
1615 * r8192e_pci, and RTL8192SE, which uses this driver. If the
1616 * revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then
1617 * the correct driver is r8192e_pci, thus this routine should
1618 * return false.
1620 if (deviceid == RTL_PCI_8192SE_DID &&
1621 revisionid == RTL_PCI_REVISION_ID_8192PCIE)
1622 return false;
1624 if (deviceid == RTL_PCI_8192_DID ||
1625 deviceid == RTL_PCI_0044_DID ||
1626 deviceid == RTL_PCI_0047_DID ||
1627 deviceid == RTL_PCI_8192SE_DID ||
1628 deviceid == RTL_PCI_8174_DID ||
1629 deviceid == RTL_PCI_8173_DID ||
1630 deviceid == RTL_PCI_8172_DID ||
1631 deviceid == RTL_PCI_8171_DID) {
1632 switch (revisionid) {
1633 case RTL_PCI_REVISION_ID_8192PCIE:
1634 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1635 ("8192 PCI-E is found - "
1636 "vid/did=%x/%x\n", venderid, deviceid));
1637 rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
1638 break;
1639 case RTL_PCI_REVISION_ID_8192SE:
1640 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1641 ("8192SE is found - "
1642 "vid/did=%x/%x\n", venderid, deviceid));
1643 rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1644 break;
1645 default:
1646 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1647 ("Err: Unknown device - "
1648 "vid/did=%x/%x\n", venderid, deviceid));
1649 rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1650 break;
1653 } else if (deviceid == RTL_PCI_8192CET_DID ||
1654 deviceid == RTL_PCI_8192CE_DID ||
1655 deviceid == RTL_PCI_8191CE_DID ||
1656 deviceid == RTL_PCI_8188CE_DID) {
1657 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
1658 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1659 ("8192C PCI-E is found - "
1660 "vid/did=%x/%x\n", venderid, deviceid));
1661 } else if (deviceid == RTL_PCI_8192DE_DID ||
1662 deviceid == RTL_PCI_8192DE_DID2) {
1663 rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
1664 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1665 ("8192D PCI-E is found - "
1666 "vid/did=%x/%x\n", venderid, deviceid));
1667 } else {
1668 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1669 ("Err: Unknown device -"
1670 " vid/did=%x/%x\n", venderid, deviceid));
1672 rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
1675 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
1676 if (revisionid == 0 || revisionid == 1) {
1677 if (revisionid == 0) {
1678 RT_TRACE(rtlpriv, COMP_INIT,
1679 DBG_LOUD, ("Find 92DE MAC0.\n"));
1680 rtlhal->interfaceindex = 0;
1681 } else if (revisionid == 1) {
1682 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1683 ("Find 92DE MAC1.\n"));
1684 rtlhal->interfaceindex = 1;
1686 } else {
1687 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1688 ("Unknown device - "
1689 "VendorID/DeviceID=%x/%x, Revision=%x\n",
1690 venderid, deviceid, revisionid));
1691 rtlhal->interfaceindex = 0;
1694 /*find bus info */
1695 pcipriv->ndis_adapter.busnumber = pdev->bus->number;
1696 pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
1697 pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
1699 if (bridge_pdev) {
1700 /*find bridge info if available */
1701 pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
1702 for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
1703 if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
1704 pcipriv->ndis_adapter.pcibridge_vendor = tmp;
1705 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1706 ("Pci Bridge Vendor is found index:"
1707 " %d\n", tmp));
1708 break;
1713 if (pcipriv->ndis_adapter.pcibridge_vendor !=
1714 PCI_BRIDGE_VENDOR_UNKNOWN) {
1715 pcipriv->ndis_adapter.pcibridge_busnum =
1716 bridge_pdev->bus->number;
1717 pcipriv->ndis_adapter.pcibridge_devnum =
1718 PCI_SLOT(bridge_pdev->devfn);
1719 pcipriv->ndis_adapter.pcibridge_funcnum =
1720 PCI_FUNC(bridge_pdev->devfn);
1721 pcipriv->ndis_adapter.pcicfg_addrport =
1722 (pcipriv->ndis_adapter.pcibridge_busnum << 16) |
1723 (pcipriv->ndis_adapter.pcibridge_devnum << 11) |
1724 (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31);
1725 pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
1726 pci_pcie_cap(bridge_pdev);
1727 pcipriv->ndis_adapter.num4bytes =
1728 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
1730 rtl_pci_get_linkcontrol_field(hw);
1732 if (pcipriv->ndis_adapter.pcibridge_vendor ==
1733 PCI_BRIDGE_VENDOR_AMD) {
1734 pcipriv->ndis_adapter.amd_l1_patch =
1735 rtl_pci_get_amd_l1_patch(hw);
1739 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1740 ("pcidev busnumber:devnumber:funcnumber:"
1741 "vendor:link_ctl %d:%d:%d:%x:%x\n",
1742 pcipriv->ndis_adapter.busnumber,
1743 pcipriv->ndis_adapter.devnumber,
1744 pcipriv->ndis_adapter.funcnumber,
1745 pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg));
1747 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1748 ("pci_bridge busnumber:devnumber:funcnumber:vendor:"
1749 "pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
1750 pcipriv->ndis_adapter.pcibridge_busnum,
1751 pcipriv->ndis_adapter.pcibridge_devnum,
1752 pcipriv->ndis_adapter.pcibridge_funcnum,
1753 pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
1754 pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
1755 pcipriv->ndis_adapter.pcibridge_linkctrlreg,
1756 pcipriv->ndis_adapter.amd_l1_patch));
1758 rtl_pci_parse_configuration(pdev, hw);
1760 return true;
1763 int __devinit rtl_pci_probe(struct pci_dev *pdev,
1764 const struct pci_device_id *id)
1766 struct ieee80211_hw *hw = NULL;
1768 struct rtl_priv *rtlpriv = NULL;
1769 struct rtl_pci_priv *pcipriv = NULL;
1770 struct rtl_pci *rtlpci;
1771 unsigned long pmem_start, pmem_len, pmem_flags;
1772 int err;
1774 err = pci_enable_device(pdev);
1775 if (err) {
1776 RT_ASSERT(false,
1777 ("%s : Cannot enable new PCI device\n",
1778 pci_name(pdev)));
1779 return err;
1782 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
1783 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1784 RT_ASSERT(false, ("Unable to obtain 32bit DMA "
1785 "for consistent allocations\n"));
1786 pci_disable_device(pdev);
1787 return -ENOMEM;
1791 pci_set_master(pdev);
1793 hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
1794 sizeof(struct rtl_priv), &rtl_ops);
1795 if (!hw) {
1796 RT_ASSERT(false,
1797 ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
1798 err = -ENOMEM;
1799 goto fail1;
1802 SET_IEEE80211_DEV(hw, &pdev->dev);
1803 pci_set_drvdata(pdev, hw);
1805 rtlpriv = hw->priv;
1806 pcipriv = (void *)rtlpriv->priv;
1807 pcipriv->dev.pdev = pdev;
1809 /* init cfg & intf_ops */
1810 rtlpriv->rtlhal.interface = INTF_PCI;
1811 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
1812 rtlpriv->intf_ops = &rtl_pci_ops;
1815 *init dbgp flags before all
1816 *other functions, because we will
1817 *use it in other funtions like
1818 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
1819 *you can not use these macro
1820 *before this
1822 rtl_dbgp_flag_init(hw);
1824 /* MEM map */
1825 err = pci_request_regions(pdev, KBUILD_MODNAME);
1826 if (err) {
1827 RT_ASSERT(false, ("Can't obtain PCI resources\n"));
1828 return err;
1831 pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
1832 pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
1833 pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
1835 /*shared mem start */
1836 rtlpriv->io.pci_mem_start =
1837 (unsigned long)pci_iomap(pdev,
1838 rtlpriv->cfg->bar_id, pmem_len);
1839 if (rtlpriv->io.pci_mem_start == 0) {
1840 RT_ASSERT(false, ("Can't map PCI mem\n"));
1841 goto fail2;
1844 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1845 ("mem mapped space: start: 0x%08lx len:%08lx "
1846 "flags:%08lx, after map:0x%08lx\n",
1847 pmem_start, pmem_len, pmem_flags,
1848 rtlpriv->io.pci_mem_start));
1850 /* Disable Clk Request */
1851 pci_write_config_byte(pdev, 0x81, 0);
1852 /* leave D3 mode */
1853 pci_write_config_byte(pdev, 0x44, 0);
1854 pci_write_config_byte(pdev, 0x04, 0x06);
1855 pci_write_config_byte(pdev, 0x04, 0x07);
1857 /* find adapter */
1858 if (!_rtl_pci_find_adapter(pdev, hw))
1859 goto fail3;
1861 /* Init IO handler */
1862 _rtl_pci_io_handler_init(&pdev->dev, hw);
1864 /*like read eeprom and so on */
1865 rtlpriv->cfg->ops->read_eeprom_info(hw);
1867 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
1868 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1869 ("Can't init_sw_vars.\n"));
1870 goto fail3;
1873 rtlpriv->cfg->ops->init_sw_leds(hw);
1875 /*aspm */
1876 rtl_pci_init_aspm(hw);
1878 /* Init mac80211 sw */
1879 err = rtl_init_core(hw);
1880 if (err) {
1881 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1882 ("Can't allocate sw for mac80211.\n"));
1883 goto fail3;
1886 /* Init PCI sw */
1887 err = !rtl_pci_init(hw, pdev);
1888 if (err) {
1889 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1890 ("Failed to init PCI.\n"));
1891 goto fail3;
1894 err = ieee80211_register_hw(hw);
1895 if (err) {
1896 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1897 ("Can't register mac80211 hw.\n"));
1898 goto fail3;
1899 } else {
1900 rtlpriv->mac80211.mac80211_registered = 1;
1903 err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
1904 if (err) {
1905 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1906 ("failed to create sysfs device attributes\n"));
1907 goto fail3;
1910 /*init rfkill */
1911 rtl_init_rfkill(hw);
1913 rtlpci = rtl_pcidev(pcipriv);
1914 err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
1915 IRQF_SHARED, KBUILD_MODNAME, hw);
1916 if (err) {
1917 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1918 ("%s: failed to register IRQ handler\n",
1919 wiphy_name(hw->wiphy)));
1920 goto fail3;
1921 } else {
1922 rtlpci->irq_alloc = 1;
1925 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
1926 return 0;
1928 fail3:
1929 pci_set_drvdata(pdev, NULL);
1930 rtl_deinit_core(hw);
1931 _rtl_pci_io_handler_release(hw);
1932 ieee80211_free_hw(hw);
1934 if (rtlpriv->io.pci_mem_start != 0)
1935 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
1937 fail2:
1938 pci_release_regions(pdev);
1940 fail1:
1942 pci_disable_device(pdev);
1944 return -ENODEV;
1947 EXPORT_SYMBOL(rtl_pci_probe);
1949 void rtl_pci_disconnect(struct pci_dev *pdev)
1951 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1952 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1953 struct rtl_priv *rtlpriv = rtl_priv(hw);
1954 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
1955 struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
1957 clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
1959 sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
1961 /*ieee80211_unregister_hw will call ops_stop */
1962 if (rtlmac->mac80211_registered == 1) {
1963 ieee80211_unregister_hw(hw);
1964 rtlmac->mac80211_registered = 0;
1965 } else {
1966 rtl_deinit_deferred_work(hw);
1967 rtlpriv->intf_ops->adapter_stop(hw);
1970 /*deinit rfkill */
1971 rtl_deinit_rfkill(hw);
1973 rtl_pci_deinit(hw);
1974 rtl_deinit_core(hw);
1975 _rtl_pci_io_handler_release(hw);
1976 rtlpriv->cfg->ops->deinit_sw_vars(hw);
1978 if (rtlpci->irq_alloc) {
1979 free_irq(rtlpci->pdev->irq, hw);
1980 rtlpci->irq_alloc = 0;
1983 if (rtlpriv->io.pci_mem_start != 0) {
1984 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
1985 pci_release_regions(pdev);
1988 pci_disable_device(pdev);
1990 rtl_pci_disable_aspm(hw);
1992 pci_set_drvdata(pdev, NULL);
1994 ieee80211_free_hw(hw);
1996 EXPORT_SYMBOL(rtl_pci_disconnect);
1998 /***************************************
1999 kernel pci power state define:
2000 PCI_D0 ((pci_power_t __force) 0)
2001 PCI_D1 ((pci_power_t __force) 1)
2002 PCI_D2 ((pci_power_t __force) 2)
2003 PCI_D3hot ((pci_power_t __force) 3)
2004 PCI_D3cold ((pci_power_t __force) 4)
2005 PCI_UNKNOWN ((pci_power_t __force) 5)
2007 This function is called when system
2008 goes into suspend state mac80211 will
2009 call rtl_mac_stop() from the mac80211
2010 suspend function first, So there is
2011 no need to call hw_disable here.
2012 ****************************************/
2013 int rtl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2015 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2016 struct rtl_priv *rtlpriv = rtl_priv(hw);
2018 rtlpriv->cfg->ops->hw_suspend(hw);
2019 rtl_deinit_rfkill(hw);
2021 pci_save_state(pdev);
2022 pci_disable_device(pdev);
2023 pci_set_power_state(pdev, PCI_D3hot);
2024 return 0;
2026 EXPORT_SYMBOL(rtl_pci_suspend);
2028 int rtl_pci_resume(struct pci_dev *pdev)
2030 int ret;
2031 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2032 struct rtl_priv *rtlpriv = rtl_priv(hw);
2034 pci_set_power_state(pdev, PCI_D0);
2035 ret = pci_enable_device(pdev);
2036 if (ret) {
2037 RT_ASSERT(false, ("ERR: <======\n"));
2038 return ret;
2041 pci_restore_state(pdev);
2043 rtlpriv->cfg->ops->hw_resume(hw);
2044 rtl_init_rfkill(hw);
2045 return 0;
2047 EXPORT_SYMBOL(rtl_pci_resume);
2049 struct rtl_intf_ops rtl_pci_ops = {
2050 .read_efuse_byte = read_efuse_byte,
2051 .adapter_start = rtl_pci_start,
2052 .adapter_stop = rtl_pci_stop,
2053 .adapter_tx = rtl_pci_tx,
2054 .flush = rtl_pci_flush,
2055 .reset_trx_ring = rtl_pci_reset_trx_ring,
2056 .waitq_insert = rtl_pci_tx_chk_waitq_insert,
2058 .disable_aspm = rtl_pci_disable_aspm,
2059 .enable_aspm = rtl_pci_enable_aspm,