PageRenderTime 38ms CodeModel.GetById 13ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/net/wireless/intersil/p54/p54pci.c

http://github.com/mirrors/linux-2.6
C | 701 lines | 550 code | 129 blank | 22 comment | 55 complexity | 06dd1386dffdd2ebc27886d58cb7569f MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Linux device driver for PCI based Prism54
  4. *
  5. * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
  6. * Copyright (c) 2008, Christian Lamparter <chunkeey@web.de>
  7. *
  8. * Based on the islsm (softmac prism54) driver, which is:
  9. * Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al.
  10. */
  11. #include <linux/pci.h>
  12. #include <linux/slab.h>
  13. #include <linux/firmware.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/delay.h>
  16. #include <linux/completion.h>
  17. #include <linux/module.h>
  18. #include <net/mac80211.h>
  19. #include "p54.h"
  20. #include "lmac.h"
  21. #include "p54pci.h"
  22. MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
  23. MODULE_DESCRIPTION("Prism54 PCI wireless driver");
  24. MODULE_LICENSE("GPL");
  25. MODULE_ALIAS("prism54pci");
  26. MODULE_FIRMWARE("isl3886pci");
  27. static const struct pci_device_id p54p_table[] = {
  28. /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
  29. { PCI_DEVICE(0x1260, 0x3890) },
  30. /* 3COM 3CRWE154G72 Wireless LAN adapter */
  31. { PCI_DEVICE(0x10b7, 0x6001) },
  32. /* Intersil PRISM Indigo Wireless LAN adapter */
  33. { PCI_DEVICE(0x1260, 0x3877) },
  34. /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
  35. { PCI_DEVICE(0x1260, 0x3886) },
  36. /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
  37. { PCI_DEVICE(0x1260, 0xffff) },
  38. { },
  39. };
  40. MODULE_DEVICE_TABLE(pci, p54p_table);
  41. static int p54p_upload_firmware(struct ieee80211_hw *dev)
  42. {
  43. struct p54p_priv *priv = dev->priv;
  44. __le32 reg;
  45. int err;
  46. __le32 *data;
  47. u32 remains, left, device_addr;
  48. P54P_WRITE(int_enable, cpu_to_le32(0));
  49. P54P_READ(int_enable);
  50. udelay(10);
  51. reg = P54P_READ(ctrl_stat);
  52. reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
  53. reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT);
  54. P54P_WRITE(ctrl_stat, reg);
  55. P54P_READ(ctrl_stat);
  56. udelay(10);
  57. reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
  58. P54P_WRITE(ctrl_stat, reg);
  59. wmb();
  60. udelay(10);
  61. reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
  62. P54P_WRITE(ctrl_stat, reg);
  63. wmb();
  64. /* wait for the firmware to reset properly */
  65. mdelay(10);
  66. err = p54_parse_firmware(dev, priv->firmware);
  67. if (err)
  68. return err;
  69. if (priv->common.fw_interface != FW_LM86) {
  70. dev_err(&priv->pdev->dev, "wrong firmware, "
  71. "please get a LM86(PCI) firmware a try again.\n");
  72. return -EINVAL;
  73. }
  74. data = (__le32 *) priv->firmware->data;
  75. remains = priv->firmware->size;
  76. device_addr = ISL38XX_DEV_FIRMWARE_ADDR;
  77. while (remains) {
  78. u32 i = 0;
  79. left = min((u32)0x1000, remains);
  80. P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr));
  81. P54P_READ(int_enable);
  82. device_addr += 0x1000;
  83. while (i < left) {
  84. P54P_WRITE(direct_mem_win[i], *data++);
  85. i += sizeof(u32);
  86. }
  87. remains -= left;
  88. P54P_READ(int_enable);
  89. }
  90. reg = P54P_READ(ctrl_stat);
  91. reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN);
  92. reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
  93. reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT);
  94. P54P_WRITE(ctrl_stat, reg);
  95. P54P_READ(ctrl_stat);
  96. udelay(10);
  97. reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
  98. P54P_WRITE(ctrl_stat, reg);
  99. wmb();
  100. udelay(10);
  101. reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
  102. P54P_WRITE(ctrl_stat, reg);
  103. wmb();
  104. udelay(10);
  105. /* wait for the firmware to boot properly */
  106. mdelay(100);
  107. return 0;
  108. }
  109. static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
  110. int ring_index, struct p54p_desc *ring, u32 ring_limit,
  111. struct sk_buff **rx_buf, u32 index)
  112. {
  113. struct p54p_priv *priv = dev->priv;
  114. struct p54p_ring_control *ring_control = priv->ring_control;
  115. u32 limit, idx, i;
  116. idx = le32_to_cpu(ring_control->host_idx[ring_index]);
  117. limit = idx;
  118. limit -= index;
  119. limit = ring_limit - limit;
  120. i = idx % ring_limit;
  121. while (limit-- > 1) {
  122. struct p54p_desc *desc = &ring[i];
  123. if (!desc->host_addr) {
  124. struct sk_buff *skb;
  125. dma_addr_t mapping;
  126. skb = dev_alloc_skb(priv->common.rx_mtu + 32);
  127. if (!skb)
  128. break;
  129. mapping = pci_map_single(priv->pdev,
  130. skb_tail_pointer(skb),
  131. priv->common.rx_mtu + 32,
  132. PCI_DMA_FROMDEVICE);
  133. if (pci_dma_mapping_error(priv->pdev, mapping)) {
  134. dev_kfree_skb_any(skb);
  135. dev_err(&priv->pdev->dev,
  136. "RX DMA Mapping error\n");
  137. break;
  138. }
  139. desc->host_addr = cpu_to_le32(mapping);
  140. desc->device_addr = 0; // FIXME: necessary?
  141. desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
  142. desc->flags = 0;
  143. rx_buf[i] = skb;
  144. }
  145. i++;
  146. idx++;
  147. i %= ring_limit;
  148. }
  149. wmb();
  150. ring_control->host_idx[ring_index] = cpu_to_le32(idx);
  151. }
  152. static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
  153. int ring_index, struct p54p_desc *ring, u32 ring_limit,
  154. struct sk_buff **rx_buf)
  155. {
  156. struct p54p_priv *priv = dev->priv;
  157. struct p54p_ring_control *ring_control = priv->ring_control;
  158. struct p54p_desc *desc;
  159. u32 idx, i;
  160. i = (*index) % ring_limit;
  161. (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
  162. idx %= ring_limit;
  163. while (i != idx) {
  164. u16 len;
  165. struct sk_buff *skb;
  166. dma_addr_t dma_addr;
  167. desc = &ring[i];
  168. len = le16_to_cpu(desc->len);
  169. skb = rx_buf[i];
  170. if (!skb) {
  171. i++;
  172. i %= ring_limit;
  173. continue;
  174. }
  175. if (unlikely(len > priv->common.rx_mtu)) {
  176. if (net_ratelimit())
  177. dev_err(&priv->pdev->dev, "rx'd frame size "
  178. "exceeds length threshold.\n");
  179. len = priv->common.rx_mtu;
  180. }
  181. dma_addr = le32_to_cpu(desc->host_addr);
  182. pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
  183. priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
  184. skb_put(skb, len);
  185. if (p54_rx(dev, skb)) {
  186. pci_unmap_single(priv->pdev, dma_addr,
  187. priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
  188. rx_buf[i] = NULL;
  189. desc->host_addr = cpu_to_le32(0);
  190. } else {
  191. skb_trim(skb, 0);
  192. pci_dma_sync_single_for_device(priv->pdev, dma_addr,
  193. priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
  194. desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
  195. }
  196. i++;
  197. i %= ring_limit;
  198. }
  199. p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
  200. }
  201. static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
  202. int ring_index, struct p54p_desc *ring, u32 ring_limit,
  203. struct sk_buff **tx_buf)
  204. {
  205. struct p54p_priv *priv = dev->priv;
  206. struct p54p_ring_control *ring_control = priv->ring_control;
  207. struct p54p_desc *desc;
  208. struct sk_buff *skb;
  209. u32 idx, i;
  210. i = (*index) % ring_limit;
  211. (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
  212. idx %= ring_limit;
  213. while (i != idx) {
  214. desc = &ring[i];
  215. skb = tx_buf[i];
  216. tx_buf[i] = NULL;
  217. pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
  218. le16_to_cpu(desc->len), PCI_DMA_TODEVICE);
  219. desc->host_addr = 0;
  220. desc->device_addr = 0;
  221. desc->len = 0;
  222. desc->flags = 0;
  223. if (skb && FREE_AFTER_TX(skb))
  224. p54_free_skb(dev, skb);
  225. i++;
  226. i %= ring_limit;
  227. }
  228. }
  229. static void p54p_tasklet(unsigned long dev_id)
  230. {
  231. struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
  232. struct p54p_priv *priv = dev->priv;
  233. struct p54p_ring_control *ring_control = priv->ring_control;
  234. p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
  235. ARRAY_SIZE(ring_control->tx_mgmt),
  236. priv->tx_buf_mgmt);
  237. p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
  238. ARRAY_SIZE(ring_control->tx_data),
  239. priv->tx_buf_data);
  240. p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
  241. ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
  242. p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data,
  243. ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data);
  244. wmb();
  245. P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
  246. }
  247. static irqreturn_t p54p_interrupt(int irq, void *dev_id)
  248. {
  249. struct ieee80211_hw *dev = dev_id;
  250. struct p54p_priv *priv = dev->priv;
  251. __le32 reg;
  252. reg = P54P_READ(int_ident);
  253. if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
  254. goto out;
  255. }
  256. P54P_WRITE(int_ack, reg);
  257. reg &= P54P_READ(int_enable);
  258. if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE))
  259. tasklet_schedule(&priv->tasklet);
  260. else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
  261. complete(&priv->boot_comp);
  262. out:
  263. return reg ? IRQ_HANDLED : IRQ_NONE;
  264. }
  265. static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
  266. {
  267. unsigned long flags;
  268. struct p54p_priv *priv = dev->priv;
  269. struct p54p_ring_control *ring_control = priv->ring_control;
  270. struct p54p_desc *desc;
  271. dma_addr_t mapping;
  272. u32 idx, i;
  273. spin_lock_irqsave(&priv->lock, flags);
  274. idx = le32_to_cpu(ring_control->host_idx[1]);
  275. i = idx % ARRAY_SIZE(ring_control->tx_data);
  276. mapping = pci_map_single(priv->pdev, skb->data, skb->len,
  277. PCI_DMA_TODEVICE);
  278. if (pci_dma_mapping_error(priv->pdev, mapping)) {
  279. spin_unlock_irqrestore(&priv->lock, flags);
  280. p54_free_skb(dev, skb);
  281. dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
  282. return ;
  283. }
  284. priv->tx_buf_data[i] = skb;
  285. desc = &ring_control->tx_data[i];
  286. desc->host_addr = cpu_to_le32(mapping);
  287. desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
  288. desc->len = cpu_to_le16(skb->len);
  289. desc->flags = 0;
  290. wmb();
  291. ring_control->host_idx[1] = cpu_to_le32(idx + 1);
  292. spin_unlock_irqrestore(&priv->lock, flags);
  293. P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
  294. P54P_READ(dev_int);
  295. }
  296. static void p54p_stop(struct ieee80211_hw *dev)
  297. {
  298. struct p54p_priv *priv = dev->priv;
  299. struct p54p_ring_control *ring_control = priv->ring_control;
  300. unsigned int i;
  301. struct p54p_desc *desc;
  302. P54P_WRITE(int_enable, cpu_to_le32(0));
  303. P54P_READ(int_enable);
  304. udelay(10);
  305. free_irq(priv->pdev->irq, dev);
  306. tasklet_kill(&priv->tasklet);
  307. P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
  308. for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
  309. desc = &ring_control->rx_data[i];
  310. if (desc->host_addr)
  311. pci_unmap_single(priv->pdev,
  312. le32_to_cpu(desc->host_addr),
  313. priv->common.rx_mtu + 32,
  314. PCI_DMA_FROMDEVICE);
  315. kfree_skb(priv->rx_buf_data[i]);
  316. priv->rx_buf_data[i] = NULL;
  317. }
  318. for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
  319. desc = &ring_control->rx_mgmt[i];
  320. if (desc->host_addr)
  321. pci_unmap_single(priv->pdev,
  322. le32_to_cpu(desc->host_addr),
  323. priv->common.rx_mtu + 32,
  324. PCI_DMA_FROMDEVICE);
  325. kfree_skb(priv->rx_buf_mgmt[i]);
  326. priv->rx_buf_mgmt[i] = NULL;
  327. }
  328. for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
  329. desc = &ring_control->tx_data[i];
  330. if (desc->host_addr)
  331. pci_unmap_single(priv->pdev,
  332. le32_to_cpu(desc->host_addr),
  333. le16_to_cpu(desc->len),
  334. PCI_DMA_TODEVICE);
  335. p54_free_skb(dev, priv->tx_buf_data[i]);
  336. priv->tx_buf_data[i] = NULL;
  337. }
  338. for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
  339. desc = &ring_control->tx_mgmt[i];
  340. if (desc->host_addr)
  341. pci_unmap_single(priv->pdev,
  342. le32_to_cpu(desc->host_addr),
  343. le16_to_cpu(desc->len),
  344. PCI_DMA_TODEVICE);
  345. p54_free_skb(dev, priv->tx_buf_mgmt[i]);
  346. priv->tx_buf_mgmt[i] = NULL;
  347. }
  348. memset(ring_control, 0, sizeof(*ring_control));
  349. }
  350. static int p54p_open(struct ieee80211_hw *dev)
  351. {
  352. struct p54p_priv *priv = dev->priv;
  353. int err;
  354. long timeout;
  355. init_completion(&priv->boot_comp);
  356. err = request_irq(priv->pdev->irq, p54p_interrupt,
  357. IRQF_SHARED, "p54pci", dev);
  358. if (err) {
  359. dev_err(&priv->pdev->dev, "failed to register IRQ handler\n");
  360. return err;
  361. }
  362. memset(priv->ring_control, 0, sizeof(*priv->ring_control));
  363. err = p54p_upload_firmware(dev);
  364. if (err) {
  365. free_irq(priv->pdev->irq, dev);
  366. return err;
  367. }
  368. priv->rx_idx_data = priv->tx_idx_data = 0;
  369. priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
  370. p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
  371. ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
  372. p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
  373. ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
  374. P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
  375. P54P_READ(ring_control_base);
  376. wmb();
  377. udelay(10);
  378. P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
  379. P54P_READ(int_enable);
  380. wmb();
  381. udelay(10);
  382. P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
  383. P54P_READ(dev_int);
  384. timeout = wait_for_completion_interruptible_timeout(
  385. &priv->boot_comp, HZ);
  386. if (timeout <= 0) {
  387. wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
  388. p54p_stop(dev);
  389. return timeout ? -ERESTARTSYS : -ETIMEDOUT;
  390. }
  391. P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
  392. P54P_READ(int_enable);
  393. wmb();
  394. udelay(10);
  395. P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
  396. P54P_READ(dev_int);
  397. wmb();
  398. udelay(10);
  399. return 0;
  400. }
  401. static void p54p_firmware_step2(const struct firmware *fw,
  402. void *context)
  403. {
  404. struct p54p_priv *priv = context;
  405. struct ieee80211_hw *dev = priv->common.hw;
  406. struct pci_dev *pdev = priv->pdev;
  407. int err;
  408. if (!fw) {
  409. dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
  410. err = -ENOENT;
  411. goto out;
  412. }
  413. priv->firmware = fw;
  414. err = p54p_open(dev);
  415. if (err)
  416. goto out;
  417. err = p54_read_eeprom(dev);
  418. p54p_stop(dev);
  419. if (err)
  420. goto out;
  421. err = p54_register_common(dev, &pdev->dev);
  422. if (err)
  423. goto out;
  424. out:
  425. complete(&priv->fw_loaded);
  426. if (err) {
  427. struct device *parent = pdev->dev.parent;
  428. if (parent)
  429. device_lock(parent);
  430. /*
  431. * This will indirectly result in a call to p54p_remove.
  432. * Hence, we don't need to bother with freeing any
  433. * allocated ressources at all.
  434. */
  435. device_release_driver(&pdev->dev);
  436. if (parent)
  437. device_unlock(parent);
  438. }
  439. pci_dev_put(pdev);
  440. }
  441. static int p54p_probe(struct pci_dev *pdev,
  442. const struct pci_device_id *id)
  443. {
  444. struct p54p_priv *priv;
  445. struct ieee80211_hw *dev;
  446. unsigned long mem_addr, mem_len;
  447. int err;
  448. pci_dev_get(pdev);
  449. err = pci_enable_device(pdev);
  450. if (err) {
  451. dev_err(&pdev->dev, "Cannot enable new PCI device\n");
  452. goto err_put;
  453. }
  454. mem_addr = pci_resource_start(pdev, 0);
  455. mem_len = pci_resource_len(pdev, 0);
  456. if (mem_len < sizeof(struct p54p_csr)) {
  457. dev_err(&pdev->dev, "Too short PCI resources\n");
  458. err = -ENODEV;
  459. goto err_disable_dev;
  460. }
  461. err = pci_request_regions(pdev, "p54pci");
  462. if (err) {
  463. dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
  464. goto err_disable_dev;
  465. }
  466. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  467. if (!err)
  468. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  469. if (err) {
  470. dev_err(&pdev->dev, "No suitable DMA available\n");
  471. goto err_free_reg;
  472. }
  473. pci_set_master(pdev);
  474. pci_try_set_mwi(pdev);
  475. pci_write_config_byte(pdev, 0x40, 0);
  476. pci_write_config_byte(pdev, 0x41, 0);
  477. dev = p54_init_common(sizeof(*priv));
  478. if (!dev) {
  479. dev_err(&pdev->dev, "ieee80211 alloc failed\n");
  480. err = -ENOMEM;
  481. goto err_free_reg;
  482. }
  483. priv = dev->priv;
  484. priv->pdev = pdev;
  485. init_completion(&priv->fw_loaded);
  486. SET_IEEE80211_DEV(dev, &pdev->dev);
  487. pci_set_drvdata(pdev, dev);
  488. priv->map = ioremap(mem_addr, mem_len);
  489. if (!priv->map) {
  490. dev_err(&pdev->dev, "Cannot map device memory\n");
  491. err = -ENOMEM;
  492. goto err_free_dev;
  493. }
  494. priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
  495. &priv->ring_control_dma);
  496. if (!priv->ring_control) {
  497. dev_err(&pdev->dev, "Cannot allocate rings\n");
  498. err = -ENOMEM;
  499. goto err_iounmap;
  500. }
  501. priv->common.open = p54p_open;
  502. priv->common.stop = p54p_stop;
  503. priv->common.tx = p54p_tx;
  504. spin_lock_init(&priv->lock);
  505. tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
  506. err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
  507. &priv->pdev->dev, GFP_KERNEL,
  508. priv, p54p_firmware_step2);
  509. if (!err)
  510. return 0;
  511. pci_free_consistent(pdev, sizeof(*priv->ring_control),
  512. priv->ring_control, priv->ring_control_dma);
  513. err_iounmap:
  514. iounmap(priv->map);
  515. err_free_dev:
  516. p54_free_common(dev);
  517. err_free_reg:
  518. pci_release_regions(pdev);
  519. err_disable_dev:
  520. pci_disable_device(pdev);
  521. err_put:
  522. pci_dev_put(pdev);
  523. return err;
  524. }
  525. static void p54p_remove(struct pci_dev *pdev)
  526. {
  527. struct ieee80211_hw *dev = pci_get_drvdata(pdev);
  528. struct p54p_priv *priv;
  529. if (!dev)
  530. return;
  531. priv = dev->priv;
  532. wait_for_completion(&priv->fw_loaded);
  533. p54_unregister_common(dev);
  534. release_firmware(priv->firmware);
  535. pci_free_consistent(pdev, sizeof(*priv->ring_control),
  536. priv->ring_control, priv->ring_control_dma);
  537. iounmap(priv->map);
  538. pci_release_regions(pdev);
  539. pci_disable_device(pdev);
  540. p54_free_common(dev);
  541. }
  542. #ifdef CONFIG_PM_SLEEP
  543. static int p54p_suspend(struct device *device)
  544. {
  545. struct pci_dev *pdev = to_pci_dev(device);
  546. pci_save_state(pdev);
  547. pci_set_power_state(pdev, PCI_D3hot);
  548. pci_disable_device(pdev);
  549. return 0;
  550. }
  551. static int p54p_resume(struct device *device)
  552. {
  553. struct pci_dev *pdev = to_pci_dev(device);
  554. int err;
  555. err = pci_reenable_device(pdev);
  556. if (err)
  557. return err;
  558. return pci_set_power_state(pdev, PCI_D0);
  559. }
  560. static SIMPLE_DEV_PM_OPS(p54pci_pm_ops, p54p_suspend, p54p_resume);
  561. #define P54P_PM_OPS (&p54pci_pm_ops)
  562. #else
  563. #define P54P_PM_OPS (NULL)
  564. #endif /* CONFIG_PM_SLEEP */
  565. static struct pci_driver p54p_driver = {
  566. .name = "p54pci",
  567. .id_table = p54p_table,
  568. .probe = p54p_probe,
  569. .remove = p54p_remove,
  570. .driver.pm = P54P_PM_OPS,
  571. };
  572. module_pci_driver(p54p_driver);