/sys/dev/nfe/if_nfe.c

https://bitbucket.org/freebsd/freebsd-head/ · C · 3373 lines · 2758 code · 456 blank · 159 comment · 631 complexity · 809f207e1f2e1b262a70fdb16f6eb0ba MD5 · raw file

Large files are truncated click here to view the full file

  1. /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
  2. /*-
  3. * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
  4. * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
  5. * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
  6. *
  7. * Permission to use, copy, modify, and distribute this software for any
  8. * purpose with or without fee is hereby granted, provided that the above
  9. * copyright notice and this permission notice appear in all copies.
  10. *
  11. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  12. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  13. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  14. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  15. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  16. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  17. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
  20. #include <sys/cdefs.h>
  21. __FBSDID("$FreeBSD$");
  22. #ifdef HAVE_KERNEL_OPTION_HEADERS
  23. #include "opt_device_polling.h"
  24. #endif
  25. #include <sys/param.h>
  26. #include <sys/endian.h>
  27. #include <sys/systm.h>
  28. #include <sys/sockio.h>
  29. #include <sys/mbuf.h>
  30. #include <sys/malloc.h>
  31. #include <sys/module.h>
  32. #include <sys/kernel.h>
  33. #include <sys/queue.h>
  34. #include <sys/socket.h>
  35. #include <sys/sysctl.h>
  36. #include <sys/taskqueue.h>
  37. #include <net/if.h>
  38. #include <net/if_arp.h>
  39. #include <net/ethernet.h>
  40. #include <net/if_dl.h>
  41. #include <net/if_media.h>
  42. #include <net/if_types.h>
  43. #include <net/if_vlan_var.h>
  44. #include <net/bpf.h>
  45. #include <machine/bus.h>
  46. #include <machine/resource.h>
  47. #include <sys/bus.h>
  48. #include <sys/rman.h>
  49. #include <dev/mii/mii.h>
  50. #include <dev/mii/miivar.h>
  51. #include <dev/pci/pcireg.h>
  52. #include <dev/pci/pcivar.h>
  53. #include <dev/nfe/if_nfereg.h>
  54. #include <dev/nfe/if_nfevar.h>
  55. MODULE_DEPEND(nfe, pci, 1, 1, 1);
  56. MODULE_DEPEND(nfe, ether, 1, 1, 1);
  57. MODULE_DEPEND(nfe, miibus, 1, 1, 1);
  58. /* "device miibus" required. See GENERIC if you get errors here. */
  59. #include "miibus_if.h"
  60. static int nfe_probe(device_t);
  61. static int nfe_attach(device_t);
  62. static int nfe_detach(device_t);
  63. static int nfe_suspend(device_t);
  64. static int nfe_resume(device_t);
  65. static int nfe_shutdown(device_t);
  66. static int nfe_can_use_msix(struct nfe_softc *);
  67. static void nfe_power(struct nfe_softc *);
  68. static int nfe_miibus_readreg(device_t, int, int);
  69. static int nfe_miibus_writereg(device_t, int, int, int);
  70. static void nfe_miibus_statchg(device_t);
  71. static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
  72. static void nfe_set_intr(struct nfe_softc *);
  73. static __inline void nfe_enable_intr(struct nfe_softc *);
  74. static __inline void nfe_disable_intr(struct nfe_softc *);
  75. static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
  76. static void nfe_alloc_msix(struct nfe_softc *, int);
  77. static int nfe_intr(void *);
  78. static void nfe_int_task(void *, int);
  79. static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
  80. static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
  81. static int nfe_newbuf(struct nfe_softc *, int);
  82. static int nfe_jnewbuf(struct nfe_softc *, int);
  83. static int nfe_rxeof(struct nfe_softc *, int, int *);
  84. static int nfe_jrxeof(struct nfe_softc *, int, int *);
  85. static void nfe_txeof(struct nfe_softc *);
  86. static int nfe_encap(struct nfe_softc *, struct mbuf **);
  87. static void nfe_setmulti(struct nfe_softc *);
  88. static void nfe_start(struct ifnet *);
  89. static void nfe_start_locked(struct ifnet *);
  90. static void nfe_watchdog(struct ifnet *);
  91. static void nfe_init(void *);
  92. static void nfe_init_locked(void *);
  93. static void nfe_stop(struct ifnet *);
  94. static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
  95. static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
  96. static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
  97. static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
  98. static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
  99. static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
  100. static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
  101. static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
  102. static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
  103. static int nfe_ifmedia_upd(struct ifnet *);
  104. static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  105. static void nfe_tick(void *);
  106. static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
  107. static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
  108. static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
  109. static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
  110. static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
  111. static void nfe_sysctl_node(struct nfe_softc *);
  112. static void nfe_stats_clear(struct nfe_softc *);
  113. static void nfe_stats_update(struct nfe_softc *);
  114. static void nfe_set_linkspeed(struct nfe_softc *);
  115. static void nfe_set_wol(struct nfe_softc *);
  116. #ifdef NFE_DEBUG
  117. static int nfedebug = 0;
  118. #define DPRINTF(sc, ...) do { \
  119. if (nfedebug) \
  120. device_printf((sc)->nfe_dev, __VA_ARGS__); \
  121. } while (0)
  122. #define DPRINTFN(sc, n, ...) do { \
  123. if (nfedebug >= (n)) \
  124. device_printf((sc)->nfe_dev, __VA_ARGS__); \
  125. } while (0)
  126. #else
  127. #define DPRINTF(sc, ...)
  128. #define DPRINTFN(sc, n, ...)
  129. #endif
  130. #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
  131. #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
  132. #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
  133. /* Tunables. */
  134. static int msi_disable = 0;
  135. static int msix_disable = 0;
  136. static int jumbo_disable = 0;
  137. TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
  138. TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
  139. TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
  140. static device_method_t nfe_methods[] = {
  141. /* Device interface */
  142. DEVMETHOD(device_probe, nfe_probe),
  143. DEVMETHOD(device_attach, nfe_attach),
  144. DEVMETHOD(device_detach, nfe_detach),
  145. DEVMETHOD(device_suspend, nfe_suspend),
  146. DEVMETHOD(device_resume, nfe_resume),
  147. DEVMETHOD(device_shutdown, nfe_shutdown),
  148. /* MII interface */
  149. DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
  150. DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
  151. DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
  152. DEVMETHOD_END
  153. };
  154. static driver_t nfe_driver = {
  155. "nfe",
  156. nfe_methods,
  157. sizeof(struct nfe_softc)
  158. };
  159. static devclass_t nfe_devclass;
  160. DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
  161. DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
  162. static struct nfe_type nfe_devs[] = {
  163. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
  164. "NVIDIA nForce MCP Networking Adapter"},
  165. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
  166. "NVIDIA nForce2 MCP2 Networking Adapter"},
  167. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
  168. "NVIDIA nForce2 400 MCP4 Networking Adapter"},
  169. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
  170. "NVIDIA nForce2 400 MCP5 Networking Adapter"},
  171. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
  172. "NVIDIA nForce3 MCP3 Networking Adapter"},
  173. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
  174. "NVIDIA nForce3 250 MCP6 Networking Adapter"},
  175. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
  176. "NVIDIA nForce3 MCP7 Networking Adapter"},
  177. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
  178. "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
  179. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
  180. "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
  181. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
  182. "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
  183. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
  184. "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
  185. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
  186. "NVIDIA nForce 430 MCP12 Networking Adapter"},
  187. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
  188. "NVIDIA nForce 430 MCP13 Networking Adapter"},
  189. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
  190. "NVIDIA nForce MCP55 Networking Adapter"},
  191. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
  192. "NVIDIA nForce MCP55 Networking Adapter"},
  193. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
  194. "NVIDIA nForce MCP61 Networking Adapter"},
  195. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
  196. "NVIDIA nForce MCP61 Networking Adapter"},
  197. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
  198. "NVIDIA nForce MCP61 Networking Adapter"},
  199. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
  200. "NVIDIA nForce MCP61 Networking Adapter"},
  201. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
  202. "NVIDIA nForce MCP65 Networking Adapter"},
  203. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
  204. "NVIDIA nForce MCP65 Networking Adapter"},
  205. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
  206. "NVIDIA nForce MCP65 Networking Adapter"},
  207. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
  208. "NVIDIA nForce MCP65 Networking Adapter"},
  209. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
  210. "NVIDIA nForce MCP67 Networking Adapter"},
  211. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
  212. "NVIDIA nForce MCP67 Networking Adapter"},
  213. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
  214. "NVIDIA nForce MCP67 Networking Adapter"},
  215. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
  216. "NVIDIA nForce MCP67 Networking Adapter"},
  217. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
  218. "NVIDIA nForce MCP73 Networking Adapter"},
  219. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
  220. "NVIDIA nForce MCP73 Networking Adapter"},
  221. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
  222. "NVIDIA nForce MCP73 Networking Adapter"},
  223. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
  224. "NVIDIA nForce MCP73 Networking Adapter"},
  225. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
  226. "NVIDIA nForce MCP77 Networking Adapter"},
  227. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
  228. "NVIDIA nForce MCP77 Networking Adapter"},
  229. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
  230. "NVIDIA nForce MCP77 Networking Adapter"},
  231. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
  232. "NVIDIA nForce MCP77 Networking Adapter"},
  233. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
  234. "NVIDIA nForce MCP79 Networking Adapter"},
  235. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
  236. "NVIDIA nForce MCP79 Networking Adapter"},
  237. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
  238. "NVIDIA nForce MCP79 Networking Adapter"},
  239. {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
  240. "NVIDIA nForce MCP79 Networking Adapter"},
  241. {0, 0, NULL}
  242. };
  243. /* Probe for supported hardware ID's */
  244. static int
  245. nfe_probe(device_t dev)
  246. {
  247. struct nfe_type *t;
  248. t = nfe_devs;
  249. /* Check for matching PCI DEVICE ID's */
  250. while (t->name != NULL) {
  251. if ((pci_get_vendor(dev) == t->vid_id) &&
  252. (pci_get_device(dev) == t->dev_id)) {
  253. device_set_desc(dev, t->name);
  254. return (BUS_PROBE_DEFAULT);
  255. }
  256. t++;
  257. }
  258. return (ENXIO);
  259. }
  260. static void
  261. nfe_alloc_msix(struct nfe_softc *sc, int count)
  262. {
  263. int rid;
  264. rid = PCIR_BAR(2);
  265. sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
  266. &rid, RF_ACTIVE);
  267. if (sc->nfe_msix_res == NULL) {
  268. device_printf(sc->nfe_dev,
  269. "couldn't allocate MSIX table resource\n");
  270. return;
  271. }
  272. rid = PCIR_BAR(3);
  273. sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
  274. SYS_RES_MEMORY, &rid, RF_ACTIVE);
  275. if (sc->nfe_msix_pba_res == NULL) {
  276. device_printf(sc->nfe_dev,
  277. "couldn't allocate MSIX PBA resource\n");
  278. bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
  279. sc->nfe_msix_res);
  280. sc->nfe_msix_res = NULL;
  281. return;
  282. }
  283. if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
  284. if (count == NFE_MSI_MESSAGES) {
  285. if (bootverbose)
  286. device_printf(sc->nfe_dev,
  287. "Using %d MSIX messages\n", count);
  288. sc->nfe_msix = 1;
  289. } else {
  290. if (bootverbose)
  291. device_printf(sc->nfe_dev,
  292. "couldn't allocate MSIX\n");
  293. pci_release_msi(sc->nfe_dev);
  294. bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
  295. PCIR_BAR(3), sc->nfe_msix_pba_res);
  296. bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
  297. PCIR_BAR(2), sc->nfe_msix_res);
  298. sc->nfe_msix_pba_res = NULL;
  299. sc->nfe_msix_res = NULL;
  300. }
  301. }
  302. }
  303. static int
  304. nfe_attach(device_t dev)
  305. {
  306. struct nfe_softc *sc;
  307. struct ifnet *ifp;
  308. bus_addr_t dma_addr_max;
  309. int error = 0, i, msic, reg, rid;
  310. sc = device_get_softc(dev);
  311. sc->nfe_dev = dev;
  312. mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  313. MTX_DEF);
  314. callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
  315. pci_enable_busmaster(dev);
  316. rid = PCIR_BAR(0);
  317. sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
  318. RF_ACTIVE);
  319. if (sc->nfe_res[0] == NULL) {
  320. device_printf(dev, "couldn't map memory resources\n");
  321. mtx_destroy(&sc->nfe_mtx);
  322. return (ENXIO);
  323. }
  324. if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
  325. uint16_t v, width;
  326. v = pci_read_config(dev, reg + 0x08, 2);
  327. /* Change max. read request size to 4096. */
  328. v &= ~(7 << 12);
  329. v |= (5 << 12);
  330. pci_write_config(dev, reg + 0x08, v, 2);
  331. v = pci_read_config(dev, reg + 0x0c, 2);
  332. /* link capability */
  333. v = (v >> 4) & 0x0f;
  334. width = pci_read_config(dev, reg + 0x12, 2);
  335. /* negotiated link width */
  336. width = (width >> 4) & 0x3f;
  337. if (v != width)
  338. device_printf(sc->nfe_dev,
  339. "warning, negotiated width of link(x%d) != "
  340. "max. width of link(x%d)\n", width, v);
  341. }
  342. if (nfe_can_use_msix(sc) == 0) {
  343. device_printf(sc->nfe_dev,
  344. "MSI/MSI-X capability black-listed, will use INTx\n");
  345. msix_disable = 1;
  346. msi_disable = 1;
  347. }
  348. /* Allocate interrupt */
  349. if (msix_disable == 0 || msi_disable == 0) {
  350. if (msix_disable == 0 &&
  351. (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
  352. nfe_alloc_msix(sc, msic);
  353. if (msi_disable == 0 && sc->nfe_msix == 0 &&
  354. (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
  355. pci_alloc_msi(dev, &msic) == 0) {
  356. if (msic == NFE_MSI_MESSAGES) {
  357. if (bootverbose)
  358. device_printf(dev,
  359. "Using %d MSI messages\n", msic);
  360. sc->nfe_msi = 1;
  361. } else
  362. pci_release_msi(dev);
  363. }
  364. }
  365. if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
  366. rid = 0;
  367. sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
  368. RF_SHAREABLE | RF_ACTIVE);
  369. if (sc->nfe_irq[0] == NULL) {
  370. device_printf(dev, "couldn't allocate IRQ resources\n");
  371. error = ENXIO;
  372. goto fail;
  373. }
  374. } else {
  375. for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
  376. sc->nfe_irq[i] = bus_alloc_resource_any(dev,
  377. SYS_RES_IRQ, &rid, RF_ACTIVE);
  378. if (sc->nfe_irq[i] == NULL) {
  379. device_printf(dev,
  380. "couldn't allocate IRQ resources for "
  381. "message %d\n", rid);
  382. error = ENXIO;
  383. goto fail;
  384. }
  385. }
  386. /* Map interrupts to vector 0. */
  387. if (sc->nfe_msix != 0) {
  388. NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
  389. NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
  390. } else if (sc->nfe_msi != 0) {
  391. NFE_WRITE(sc, NFE_MSI_MAP0, 0);
  392. NFE_WRITE(sc, NFE_MSI_MAP1, 0);
  393. }
  394. }
  395. /* Set IRQ status/mask register. */
  396. sc->nfe_irq_status = NFE_IRQ_STATUS;
  397. sc->nfe_irq_mask = NFE_IRQ_MASK;
  398. sc->nfe_intrs = NFE_IRQ_WANTED;
  399. sc->nfe_nointrs = 0;
  400. if (sc->nfe_msix != 0) {
  401. sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
  402. sc->nfe_nointrs = NFE_IRQ_WANTED;
  403. } else if (sc->nfe_msi != 0) {
  404. sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
  405. sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
  406. }
  407. sc->nfe_devid = pci_get_device(dev);
  408. sc->nfe_revid = pci_get_revid(dev);
  409. sc->nfe_flags = 0;
  410. switch (sc->nfe_devid) {
  411. case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
  412. case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
  413. case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
  414. case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
  415. sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
  416. break;
  417. case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
  418. case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
  419. sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
  420. break;
  421. case PCI_PRODUCT_NVIDIA_CK804_LAN1:
  422. case PCI_PRODUCT_NVIDIA_CK804_LAN2:
  423. case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
  424. case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
  425. sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
  426. NFE_MIB_V1;
  427. break;
  428. case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
  429. case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
  430. sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
  431. NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
  432. break;
  433. case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
  434. case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
  435. case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
  436. case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
  437. case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
  438. case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
  439. case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
  440. case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
  441. case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
  442. case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
  443. case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
  444. case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
  445. sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
  446. NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
  447. break;
  448. case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
  449. case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
  450. case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
  451. case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
  452. /* XXX flow control */
  453. sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
  454. NFE_CORRECT_MACADDR | NFE_MIB_V3;
  455. break;
  456. case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
  457. case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
  458. case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
  459. case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
  460. /* XXX flow control */
  461. sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
  462. NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
  463. break;
  464. case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
  465. case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
  466. case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
  467. case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
  468. sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
  469. NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
  470. NFE_MIB_V2;
  471. break;
  472. }
  473. nfe_power(sc);
  474. /* Check for reversed ethernet address */
  475. if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
  476. sc->nfe_flags |= NFE_CORRECT_MACADDR;
  477. nfe_get_macaddr(sc, sc->eaddr);
  478. /*
  479. * Allocate the parent bus DMA tag appropriate for PCI.
  480. */
  481. dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
  482. if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
  483. dma_addr_max = NFE_DMA_MAXADDR;
  484. error = bus_dma_tag_create(
  485. bus_get_dma_tag(sc->nfe_dev), /* parent */
  486. 1, 0, /* alignment, boundary */
  487. dma_addr_max, /* lowaddr */
  488. BUS_SPACE_MAXADDR, /* highaddr */
  489. NULL, NULL, /* filter, filterarg */
  490. BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
  491. BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
  492. 0, /* flags */
  493. NULL, NULL, /* lockfunc, lockarg */
  494. &sc->nfe_parent_tag);
  495. if (error)
  496. goto fail;
  497. ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
  498. if (ifp == NULL) {
  499. device_printf(dev, "can not if_alloc()\n");
  500. error = ENOSPC;
  501. goto fail;
  502. }
  503. /*
  504. * Allocate Tx and Rx rings.
  505. */
  506. if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
  507. goto fail;
  508. if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
  509. goto fail;
  510. nfe_alloc_jrx_ring(sc, &sc->jrxq);
  511. /* Create sysctl node. */
  512. nfe_sysctl_node(sc);
  513. ifp->if_softc = sc;
  514. if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  515. ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  516. ifp->if_ioctl = nfe_ioctl;
  517. ifp->if_start = nfe_start;
  518. ifp->if_hwassist = 0;
  519. ifp->if_capabilities = 0;
  520. ifp->if_init = nfe_init;
  521. IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
  522. ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
  523. IFQ_SET_READY(&ifp->if_snd);
  524. if (sc->nfe_flags & NFE_HW_CSUM) {
  525. ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
  526. ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
  527. }
  528. ifp->if_capenable = ifp->if_capabilities;
  529. sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
  530. /* VLAN capability setup. */
  531. ifp->if_capabilities |= IFCAP_VLAN_MTU;
  532. if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
  533. ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
  534. if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
  535. ifp->if_capabilities |= IFCAP_VLAN_HWCSUM |
  536. IFCAP_VLAN_HWTSO;
  537. }
  538. if (pci_find_cap(dev, PCIY_PMG, &reg) == 0)
  539. ifp->if_capabilities |= IFCAP_WOL_MAGIC;
  540. ifp->if_capenable = ifp->if_capabilities;
  541. /*
  542. * Tell the upper layer(s) we support long frames.
  543. * Must appear after the call to ether_ifattach() because
  544. * ether_ifattach() sets ifi_hdrlen to the default value.
  545. */
  546. ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  547. #ifdef DEVICE_POLLING
  548. ifp->if_capabilities |= IFCAP_POLLING;
  549. #endif
  550. /* Do MII setup */
  551. error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd,
  552. nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
  553. MIIF_DOPAUSE);
  554. if (error != 0) {
  555. device_printf(dev, "attaching PHYs failed\n");
  556. goto fail;
  557. }
  558. ether_ifattach(ifp, sc->eaddr);
  559. TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
  560. sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
  561. taskqueue_thread_enqueue, &sc->nfe_tq);
  562. taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
  563. device_get_nameunit(sc->nfe_dev));
  564. error = 0;
  565. if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
  566. error = bus_setup_intr(dev, sc->nfe_irq[0],
  567. INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
  568. &sc->nfe_intrhand[0]);
  569. } else {
  570. for (i = 0; i < NFE_MSI_MESSAGES; i++) {
  571. error = bus_setup_intr(dev, sc->nfe_irq[i],
  572. INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
  573. &sc->nfe_intrhand[i]);
  574. if (error != 0)
  575. break;
  576. }
  577. }
  578. if (error) {
  579. device_printf(dev, "couldn't set up irq\n");
  580. taskqueue_free(sc->nfe_tq);
  581. sc->nfe_tq = NULL;
  582. ether_ifdetach(ifp);
  583. goto fail;
  584. }
  585. fail:
  586. if (error)
  587. nfe_detach(dev);
  588. return (error);
  589. }
  590. static int
  591. nfe_detach(device_t dev)
  592. {
  593. struct nfe_softc *sc;
  594. struct ifnet *ifp;
  595. uint8_t eaddr[ETHER_ADDR_LEN];
  596. int i, rid;
  597. sc = device_get_softc(dev);
  598. KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
  599. ifp = sc->nfe_ifp;
  600. #ifdef DEVICE_POLLING
  601. if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
  602. ether_poll_deregister(ifp);
  603. #endif
  604. if (device_is_attached(dev)) {
  605. NFE_LOCK(sc);
  606. nfe_stop(ifp);
  607. ifp->if_flags &= ~IFF_UP;
  608. NFE_UNLOCK(sc);
  609. callout_drain(&sc->nfe_stat_ch);
  610. ether_ifdetach(ifp);
  611. }
  612. if (ifp) {
  613. /* restore ethernet address */
  614. if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
  615. for (i = 0; i < ETHER_ADDR_LEN; i++) {
  616. eaddr[i] = sc->eaddr[5 - i];
  617. }
  618. } else
  619. bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
  620. nfe_set_macaddr(sc, eaddr);
  621. if_free(ifp);
  622. }
  623. if (sc->nfe_miibus)
  624. device_delete_child(dev, sc->nfe_miibus);
  625. bus_generic_detach(dev);
  626. if (sc->nfe_tq != NULL) {
  627. taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
  628. taskqueue_free(sc->nfe_tq);
  629. sc->nfe_tq = NULL;
  630. }
  631. for (i = 0; i < NFE_MSI_MESSAGES; i++) {
  632. if (sc->nfe_intrhand[i] != NULL) {
  633. bus_teardown_intr(dev, sc->nfe_irq[i],
  634. sc->nfe_intrhand[i]);
  635. sc->nfe_intrhand[i] = NULL;
  636. }
  637. }
  638. if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
  639. if (sc->nfe_irq[0] != NULL)
  640. bus_release_resource(dev, SYS_RES_IRQ, 0,
  641. sc->nfe_irq[0]);
  642. } else {
  643. for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
  644. if (sc->nfe_irq[i] != NULL) {
  645. bus_release_resource(dev, SYS_RES_IRQ, rid,
  646. sc->nfe_irq[i]);
  647. sc->nfe_irq[i] = NULL;
  648. }
  649. }
  650. pci_release_msi(dev);
  651. }
  652. if (sc->nfe_msix_pba_res != NULL) {
  653. bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
  654. sc->nfe_msix_pba_res);
  655. sc->nfe_msix_pba_res = NULL;
  656. }
  657. if (sc->nfe_msix_res != NULL) {
  658. bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
  659. sc->nfe_msix_res);
  660. sc->nfe_msix_res = NULL;
  661. }
  662. if (sc->nfe_res[0] != NULL) {
  663. bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
  664. sc->nfe_res[0]);
  665. sc->nfe_res[0] = NULL;
  666. }
  667. nfe_free_tx_ring(sc, &sc->txq);
  668. nfe_free_rx_ring(sc, &sc->rxq);
  669. nfe_free_jrx_ring(sc, &sc->jrxq);
  670. if (sc->nfe_parent_tag) {
  671. bus_dma_tag_destroy(sc->nfe_parent_tag);
  672. sc->nfe_parent_tag = NULL;
  673. }
  674. mtx_destroy(&sc->nfe_mtx);
  675. return (0);
  676. }
  677. static int
  678. nfe_suspend(device_t dev)
  679. {
  680. struct nfe_softc *sc;
  681. sc = device_get_softc(dev);
  682. NFE_LOCK(sc);
  683. nfe_stop(sc->nfe_ifp);
  684. nfe_set_wol(sc);
  685. sc->nfe_suspended = 1;
  686. NFE_UNLOCK(sc);
  687. return (0);
  688. }
  689. static int
  690. nfe_resume(device_t dev)
  691. {
  692. struct nfe_softc *sc;
  693. struct ifnet *ifp;
  694. sc = device_get_softc(dev);
  695. NFE_LOCK(sc);
  696. nfe_power(sc);
  697. ifp = sc->nfe_ifp;
  698. if (ifp->if_flags & IFF_UP)
  699. nfe_init_locked(sc);
  700. sc->nfe_suspended = 0;
  701. NFE_UNLOCK(sc);
  702. return (0);
  703. }
  704. static int
  705. nfe_can_use_msix(struct nfe_softc *sc)
  706. {
  707. static struct msix_blacklist {
  708. char *maker;
  709. char *product;
  710. } msix_blacklists[] = {
  711. { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
  712. };
  713. struct msix_blacklist *mblp;
  714. char *maker, *product;
  715. int count, n, use_msix;
  716. /*
  717. * Search base board manufacturer and product name table
  718. * to see this system has a known MSI/MSI-X issue.
  719. */
  720. maker = getenv("smbios.planar.maker");
  721. product = getenv("smbios.planar.product");
  722. use_msix = 1;
  723. if (maker != NULL && product != NULL) {
  724. count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]);
  725. mblp = msix_blacklists;
  726. for (n = 0; n < count; n++) {
  727. if (strcmp(maker, mblp->maker) == 0 &&
  728. strcmp(product, mblp->product) == 0) {
  729. use_msix = 0;
  730. break;
  731. }
  732. mblp++;
  733. }
  734. }
  735. if (maker != NULL)
  736. freeenv(maker);
  737. if (product != NULL)
  738. freeenv(product);
  739. return (use_msix);
  740. }
  741. /* Take PHY/NIC out of powerdown, from Linux */
  742. static void
  743. nfe_power(struct nfe_softc *sc)
  744. {
  745. uint32_t pwr;
  746. if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
  747. return;
  748. NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
  749. NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
  750. DELAY(100);
  751. NFE_WRITE(sc, NFE_MAC_RESET, 0);
  752. DELAY(100);
  753. NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
  754. pwr = NFE_READ(sc, NFE_PWR2_CTL);
  755. pwr &= ~NFE_PWR2_WAKEUP_MASK;
  756. if (sc->nfe_revid >= 0xa3 &&
  757. (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
  758. sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
  759. pwr |= NFE_PWR2_REVA3;
  760. NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
  761. }
  762. static void
  763. nfe_miibus_statchg(device_t dev)
  764. {
  765. struct nfe_softc *sc;
  766. struct mii_data *mii;
  767. struct ifnet *ifp;
  768. uint32_t rxctl, txctl;
  769. sc = device_get_softc(dev);
  770. mii = device_get_softc(sc->nfe_miibus);
  771. ifp = sc->nfe_ifp;
  772. sc->nfe_link = 0;
  773. if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
  774. (IFM_ACTIVE | IFM_AVALID)) {
  775. switch (IFM_SUBTYPE(mii->mii_media_active)) {
  776. case IFM_10_T:
  777. case IFM_100_TX:
  778. case IFM_1000_T:
  779. sc->nfe_link = 1;
  780. break;
  781. default:
  782. break;
  783. }
  784. }
  785. nfe_mac_config(sc, mii);
  786. txctl = NFE_READ(sc, NFE_TX_CTL);
  787. rxctl = NFE_READ(sc, NFE_RX_CTL);
  788. if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
  789. txctl |= NFE_TX_START;
  790. rxctl |= NFE_RX_START;
  791. } else {
  792. txctl &= ~NFE_TX_START;
  793. rxctl &= ~NFE_RX_START;
  794. }
  795. NFE_WRITE(sc, NFE_TX_CTL, txctl);
  796. NFE_WRITE(sc, NFE_RX_CTL, rxctl);
  797. }
  798. static void
  799. nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
  800. {
  801. uint32_t link, misc, phy, seed;
  802. uint32_t val;
  803. NFE_LOCK_ASSERT(sc);
  804. phy = NFE_READ(sc, NFE_PHY_IFACE);
  805. phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
  806. seed = NFE_READ(sc, NFE_RNDSEED);
  807. seed &= ~NFE_SEED_MASK;
  808. misc = NFE_MISC1_MAGIC;
  809. link = NFE_MEDIA_SET;
  810. if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
  811. phy |= NFE_PHY_HDX; /* half-duplex */
  812. misc |= NFE_MISC1_HDX;
  813. }
  814. switch (IFM_SUBTYPE(mii->mii_media_active)) {
  815. case IFM_1000_T: /* full-duplex only */
  816. link |= NFE_MEDIA_1000T;
  817. seed |= NFE_SEED_1000T;
  818. phy |= NFE_PHY_1000T;
  819. break;
  820. case IFM_100_TX:
  821. link |= NFE_MEDIA_100TX;
  822. seed |= NFE_SEED_100TX;
  823. phy |= NFE_PHY_100TX;
  824. break;
  825. case IFM_10_T:
  826. link |= NFE_MEDIA_10T;
  827. seed |= NFE_SEED_10T;
  828. break;
  829. }
  830. if ((phy & 0x10000000) != 0) {
  831. if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
  832. val = NFE_R1_MAGIC_1000;
  833. else
  834. val = NFE_R1_MAGIC_10_100;
  835. } else
  836. val = NFE_R1_MAGIC_DEFAULT;
  837. NFE_WRITE(sc, NFE_SETUP_R1, val);
  838. NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
  839. NFE_WRITE(sc, NFE_PHY_IFACE, phy);
  840. NFE_WRITE(sc, NFE_MISC1, misc);
  841. NFE_WRITE(sc, NFE_LINKSPEED, link);
  842. if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
  843. /* It seems all hardwares supports Rx pause frames. */
  844. val = NFE_READ(sc, NFE_RXFILTER);
  845. if ((IFM_OPTIONS(mii->mii_media_active) &
  846. IFM_ETH_RXPAUSE) != 0)
  847. val |= NFE_PFF_RX_PAUSE;
  848. else
  849. val &= ~NFE_PFF_RX_PAUSE;
  850. NFE_WRITE(sc, NFE_RXFILTER, val);
  851. if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
  852. val = NFE_READ(sc, NFE_MISC1);
  853. if ((IFM_OPTIONS(mii->mii_media_active) &
  854. IFM_ETH_TXPAUSE) != 0) {
  855. NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
  856. NFE_TX_PAUSE_FRAME_ENABLE);
  857. val |= NFE_MISC1_TX_PAUSE;
  858. } else {
  859. val &= ~NFE_MISC1_TX_PAUSE;
  860. NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
  861. NFE_TX_PAUSE_FRAME_DISABLE);
  862. }
  863. NFE_WRITE(sc, NFE_MISC1, val);
  864. }
  865. } else {
  866. /* disable rx/tx pause frames */
  867. val = NFE_READ(sc, NFE_RXFILTER);
  868. val &= ~NFE_PFF_RX_PAUSE;
  869. NFE_WRITE(sc, NFE_RXFILTER, val);
  870. if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
  871. NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
  872. NFE_TX_PAUSE_FRAME_DISABLE);
  873. val = NFE_READ(sc, NFE_MISC1);
  874. val &= ~NFE_MISC1_TX_PAUSE;
  875. NFE_WRITE(sc, NFE_MISC1, val);
  876. }
  877. }
  878. }
  879. static int
  880. nfe_miibus_readreg(device_t dev, int phy, int reg)
  881. {
  882. struct nfe_softc *sc = device_get_softc(dev);
  883. uint32_t val;
  884. int ntries;
  885. NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
  886. if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
  887. NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
  888. DELAY(100);
  889. }
  890. NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
  891. for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
  892. DELAY(100);
  893. if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
  894. break;
  895. }
  896. if (ntries == NFE_TIMEOUT) {
  897. DPRINTFN(sc, 2, "timeout waiting for PHY\n");
  898. return 0;
  899. }
  900. if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
  901. DPRINTFN(sc, 2, "could not read PHY\n");
  902. return 0;
  903. }
  904. val = NFE_READ(sc, NFE_PHY_DATA);
  905. if (val != 0xffffffff && val != 0)
  906. sc->mii_phyaddr = phy;
  907. DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
  908. return (val);
  909. }
  910. static int
  911. nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
  912. {
  913. struct nfe_softc *sc = device_get_softc(dev);
  914. uint32_t ctl;
  915. int ntries;
  916. NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
  917. if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
  918. NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
  919. DELAY(100);
  920. }
  921. NFE_WRITE(sc, NFE_PHY_DATA, val);
  922. ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
  923. NFE_WRITE(sc, NFE_PHY_CTL, ctl);
  924. for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
  925. DELAY(100);
  926. if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
  927. break;
  928. }
  929. #ifdef NFE_DEBUG
  930. if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
  931. device_printf(sc->nfe_dev, "could not write to PHY\n");
  932. #endif
  933. return (0);
  934. }
  935. struct nfe_dmamap_arg {
  936. bus_addr_t nfe_busaddr;
  937. };
  938. static int
  939. nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
  940. {
  941. struct nfe_dmamap_arg ctx;
  942. struct nfe_rx_data *data;
  943. void *desc;
  944. int i, error, descsize;
  945. if (sc->nfe_flags & NFE_40BIT_ADDR) {
  946. desc = ring->desc64;
  947. descsize = sizeof (struct nfe_desc64);
  948. } else {
  949. desc = ring->desc32;
  950. descsize = sizeof (struct nfe_desc32);
  951. }
  952. ring->cur = ring->next = 0;
  953. error = bus_dma_tag_create(sc->nfe_parent_tag,
  954. NFE_RING_ALIGN, 0, /* alignment, boundary */
  955. BUS_SPACE_MAXADDR, /* lowaddr */
  956. BUS_SPACE_MAXADDR, /* highaddr */
  957. NULL, NULL, /* filter, filterarg */
  958. NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
  959. NFE_RX_RING_COUNT * descsize, /* maxsegsize */
  960. 0, /* flags */
  961. NULL, NULL, /* lockfunc, lockarg */
  962. &ring->rx_desc_tag);
  963. if (error != 0) {
  964. device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
  965. goto fail;
  966. }
  967. /* allocate memory to desc */
  968. error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
  969. BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
  970. if (error != 0) {
  971. device_printf(sc->nfe_dev, "could not create desc DMA map\n");
  972. goto fail;
  973. }
  974. if (sc->nfe_flags & NFE_40BIT_ADDR)
  975. ring->desc64 = desc;
  976. else
  977. ring->desc32 = desc;
  978. /* map desc to device visible address space */
  979. ctx.nfe_busaddr = 0;
  980. error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
  981. NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
  982. if (error != 0) {
  983. device_printf(sc->nfe_dev, "could not load desc DMA map\n");
  984. goto fail;
  985. }
  986. ring->physaddr = ctx.nfe_busaddr;
  987. error = bus_dma_tag_create(sc->nfe_parent_tag,
  988. 1, 0, /* alignment, boundary */
  989. BUS_SPACE_MAXADDR, /* lowaddr */
  990. BUS_SPACE_MAXADDR, /* highaddr */
  991. NULL, NULL, /* filter, filterarg */
  992. MCLBYTES, 1, /* maxsize, nsegments */
  993. MCLBYTES, /* maxsegsize */
  994. 0, /* flags */
  995. NULL, NULL, /* lockfunc, lockarg */
  996. &ring->rx_data_tag);
  997. if (error != 0) {
  998. device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
  999. goto fail;
  1000. }
  1001. error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
  1002. if (error != 0) {
  1003. device_printf(sc->nfe_dev,
  1004. "could not create Rx DMA spare map\n");
  1005. goto fail;
  1006. }
  1007. /*
  1008. * Pre-allocate Rx buffers and populate Rx ring.
  1009. */
  1010. for (i = 0; i < NFE_RX_RING_COUNT; i++) {
  1011. data = &sc->rxq.data[i];
  1012. data->rx_data_map = NULL;
  1013. data->m = NULL;
  1014. error = bus_dmamap_create(ring->rx_data_tag, 0,
  1015. &data->rx_data_map);
  1016. if (error != 0) {
  1017. device_printf(sc->nfe_dev,
  1018. "could not create Rx DMA map\n");
  1019. goto fail;
  1020. }
  1021. }
  1022. fail:
  1023. return (error);
  1024. }
  1025. static void
  1026. nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
  1027. {
  1028. struct nfe_dmamap_arg ctx;
  1029. struct nfe_rx_data *data;
  1030. void *desc;
  1031. int i, error, descsize;
  1032. if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
  1033. return;
  1034. if (jumbo_disable != 0) {
  1035. device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
  1036. sc->nfe_jumbo_disable = 1;
  1037. return;
  1038. }
  1039. if (sc->nfe_flags & NFE_40BIT_ADDR) {
  1040. desc = ring->jdesc64;
  1041. descsize = sizeof (struct nfe_desc64);
  1042. } else {
  1043. desc = ring->jdesc32;
  1044. descsize = sizeof (struct nfe_desc32);
  1045. }
  1046. ring->jcur = ring->jnext = 0;
  1047. /* Create DMA tag for jumbo Rx ring. */
  1048. error = bus_dma_tag_create(sc->nfe_parent_tag,
  1049. NFE_RING_ALIGN, 0, /* alignment, boundary */
  1050. BUS_SPACE_MAXADDR, /* lowaddr */
  1051. BUS_SPACE_MAXADDR, /* highaddr */
  1052. NULL, NULL, /* filter, filterarg */
  1053. NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
  1054. 1, /* nsegments */
  1055. NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
  1056. 0, /* flags */
  1057. NULL, NULL, /* lockfunc, lockarg */
  1058. &ring->jrx_desc_tag);
  1059. if (error != 0) {
  1060. device_printf(sc->nfe_dev,
  1061. "could not create jumbo ring DMA tag\n");
  1062. goto fail;
  1063. }
  1064. /* Create DMA tag for jumbo Rx buffers. */
  1065. error = bus_dma_tag_create(sc->nfe_parent_tag,
  1066. 1, 0, /* alignment, boundary */
  1067. BUS_SPACE_MAXADDR, /* lowaddr */
  1068. BUS_SPACE_MAXADDR, /* highaddr */
  1069. NULL, NULL, /* filter, filterarg */
  1070. MJUM9BYTES, /* maxsize */
  1071. 1, /* nsegments */
  1072. MJUM9BYTES, /* maxsegsize */
  1073. 0, /* flags */
  1074. NULL, NULL, /* lockfunc, lockarg */
  1075. &ring->jrx_data_tag);
  1076. if (error != 0) {
  1077. device_printf(sc->nfe_dev,
  1078. "could not create jumbo Rx buffer DMA tag\n");
  1079. goto fail;
  1080. }
  1081. /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
  1082. error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
  1083. BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
  1084. if (error != 0) {
  1085. device_printf(sc->nfe_dev,
  1086. "could not allocate DMA'able memory for jumbo Rx ring\n");
  1087. goto fail;
  1088. }
  1089. if (sc->nfe_flags & NFE_40BIT_ADDR)
  1090. ring->jdesc64 = desc;
  1091. else
  1092. ring->jdesc32 = desc;
  1093. ctx.nfe_busaddr = 0;
  1094. error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
  1095. NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
  1096. if (error != 0) {
  1097. device_printf(sc->nfe_dev,
  1098. "could not load DMA'able memory for jumbo Rx ring\n");
  1099. goto fail;
  1100. }
  1101. ring->jphysaddr = ctx.nfe_busaddr;
  1102. /* Create DMA maps for jumbo Rx buffers. */
  1103. error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
  1104. if (error != 0) {
  1105. device_printf(sc->nfe_dev,
  1106. "could not create jumbo Rx DMA spare map\n");
  1107. goto fail;
  1108. }
  1109. for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
  1110. data = &sc->jrxq.jdata[i];
  1111. data->rx_data_map = NULL;
  1112. data->m = NULL;
  1113. error = bus_dmamap_create(ring->jrx_data_tag, 0,
  1114. &data->rx_data_map);
  1115. if (error != 0) {
  1116. device_printf(sc->nfe_dev,
  1117. "could not create jumbo Rx DMA map\n");
  1118. goto fail;
  1119. }
  1120. }
  1121. return;
  1122. fail:
  1123. /*
  1124. * Running without jumbo frame support is ok for most cases
  1125. * so don't fail on creating dma tag/map for jumbo frame.
  1126. */
  1127. nfe_free_jrx_ring(sc, ring);
  1128. device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
  1129. "resource shortage\n");
  1130. sc->nfe_jumbo_disable = 1;
  1131. }
  1132. static int
  1133. nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
  1134. {
  1135. void *desc;
  1136. size_t descsize;
  1137. int i;
  1138. ring->cur = ring->next = 0;
  1139. if (sc->nfe_flags & NFE_40BIT_ADDR) {
  1140. desc = ring->desc64;
  1141. descsize = sizeof (struct nfe_desc64);
  1142. } else {
  1143. desc = ring->desc32;
  1144. descsize = sizeof (struct nfe_desc32);
  1145. }
  1146. bzero(desc, descsize * NFE_RX_RING_COUNT);
  1147. for (i = 0; i < NFE_RX_RING_COUNT; i++) {
  1148. if (nfe_newbuf(sc, i) != 0)
  1149. return (ENOBUFS);
  1150. }
  1151. bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
  1152. BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  1153. return (0);
  1154. }
  1155. static int
  1156. nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
  1157. {
  1158. void *desc;
  1159. size_t descsize;
  1160. int i;
  1161. ring->jcur = ring->jnext = 0;
  1162. if (sc->nfe_flags & NFE_40BIT_ADDR) {
  1163. desc = ring->jdesc64;
  1164. descsize = sizeof (struct nfe_desc64);
  1165. } else {
  1166. desc = ring->jdesc32;
  1167. descsize = sizeof (struct nfe_desc32);
  1168. }
  1169. bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
  1170. for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
  1171. if (nfe_jnewbuf(sc, i) != 0)
  1172. return (ENOBUFS);
  1173. }
  1174. bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
  1175. BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  1176. return (0);
  1177. }
  1178. static void
  1179. nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
  1180. {
  1181. struct nfe_rx_data *data;
  1182. void *desc;
  1183. int i, descsize;
  1184. if (sc->nfe_flags & NFE_40BIT_ADDR) {
  1185. desc = ring->desc64;
  1186. descsize = sizeof (struct nfe_desc64);
  1187. } else {
  1188. desc = ring->desc32;
  1189. descsize = sizeof (struct nfe_desc32);
  1190. }
  1191. for (i = 0; i < NFE_RX_RING_COUNT; i++) {
  1192. data = &ring->data[i];
  1193. if (data->rx_data_map != NULL) {
  1194. bus_dmamap_destroy(ring->rx_data_tag,
  1195. data->rx_data_map);
  1196. data->rx_data_map = NULL;
  1197. }
  1198. if (data->m != NULL) {
  1199. m_freem(data->m);
  1200. data->m = NULL;
  1201. }
  1202. }
  1203. if (ring->rx_data_tag != NULL) {
  1204. if (ring->rx_spare_map != NULL) {
  1205. bus_dmamap_destroy(ring->rx_data_tag,
  1206. ring->rx_spare_map);
  1207. ring->rx_spare_map = NULL;
  1208. }
  1209. bus_dma_tag_destroy(ring->rx_data_tag);
  1210. ring->rx_data_tag = NULL;
  1211. }
  1212. if (desc != NULL) {
  1213. bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
  1214. bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
  1215. ring->desc64 = NULL;
  1216. ring->desc32 = NULL;
  1217. ring->rx_desc_map = NULL;
  1218. }
  1219. if (ring->rx_desc_tag != NULL) {
  1220. bus_dma_tag_destroy(ring->rx_desc_tag);
  1221. ring->rx_desc_tag = NULL;
  1222. }
  1223. }
  1224. static void
  1225. nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
  1226. {
  1227. struct nfe_rx_data *data;
  1228. void *desc;
  1229. int i, descsize;
  1230. if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
  1231. return;
  1232. if (sc->nfe_flags & NFE_40BIT_ADDR) {
  1233. desc = ring->jdesc64;
  1234. descsize = sizeof (struct nfe_desc64);
  1235. } else {
  1236. desc = ring->jdesc32;
  1237. descsize = sizeof (struct nfe_desc32);
  1238. }
  1239. for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
  1240. data = &ring->jdata[i];
  1241. if (data->rx_data_map != NULL) {
  1242. bus_dmamap_destroy(ring->jrx_data_tag,
  1243. data->rx_data_map);
  1244. data->rx_data_map = NULL;
  1245. }
  1246. if (data->m != NULL) {
  1247. m_freem(data->m);
  1248. data->m = NULL;
  1249. }
  1250. }
  1251. if (ring->jrx_data_tag != NULL) {
  1252. if (ring->jrx_spare_map != NULL) {
  1253. bus_dmamap_destroy(ring->jrx_data_tag,
  1254. ring->jrx_spare_map);
  1255. ring->jrx_spare_map = NULL;
  1256. }
  1257. bus_dma_tag_destroy(ring->jrx_data_tag);
  1258. ring->jrx_data_tag = NULL;
  1259. }
  1260. if (desc != NULL) {
  1261. bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
  1262. bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
  1263. ring->jdesc64 = NULL;
  1264. ring->jdesc32 = NULL;
  1265. ring->jrx_desc_map = NULL;
  1266. }
  1267. if (ring->jrx_desc_tag != NULL) {
  1268. bus_dma_tag_destroy(ring->jrx_desc_tag);
  1269. ring->jrx_desc_tag = NULL;
  1270. }
  1271. }
  1272. static int
  1273. nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
  1274. {
  1275. struct nfe_dmamap_arg ctx;
  1276. int i, error;
  1277. void *desc;
  1278. int descsize;
  1279. if (sc->nfe_flags & NFE_40BIT_ADDR) {
  1280. desc = ring->desc64;
  1281. descsize = sizeof (struct nfe_desc64);
  1282. } else {
  1283. desc = ring->desc32;
  1284. descsize = sizeof (struct nfe_desc32);
  1285. }
  1286. ring->queued = 0;
  1287. ring->cur = ring->next = 0;
  1288. error = bus_dma_tag_create(sc->nfe_parent_tag,
  1289. NFE_RING_ALIGN, 0, /* alignment, boundary */
  1290. BUS_SPACE_MAXADDR, /* lowaddr */
  1291. BUS_SPACE_MAXADDR, /* highaddr */
  1292. NULL, NULL, /* filter, filterarg */
  1293. NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
  1294. NFE_TX_RING_COUNT * descsize, /* maxsegsize */
  1295. 0, /* flags */
  1296. NULL, NULL, /* lockfunc, lockarg */
  1297. &ring->tx_desc_tag);
  1298. if (error != 0) {
  1299. device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
  1300. goto fail;
  1301. }
  1302. error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
  1303. BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
  1304. if (error != 0) {
  1305. device_printf(sc->nfe_dev, "could not create desc DMA map\n");
  1306. goto fail;
  1307. }
  1308. if (sc->nfe_flags & NFE_40BIT_ADDR)
  1309. ring->desc64 = desc;
  1310. else
  1311. ring->desc32 = desc;
  1312. ctx.nfe_busaddr = 0;
  1313. error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
  1314. NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
  1315. if (error != 0) {
  1316. device_printf(sc->nfe_dev, "could not load desc DMA map\n");
  1317. goto fail;
  1318. }
  1319. ring->physaddr = ctx.nfe_busaddr;
  1320. error = bus_dma_tag_create(sc->nfe_parent_tag,
  1321. 1, 0,
  1322. BUS_SPACE_MAXADDR,
  1323. BUS_SPACE_MAXADDR,
  1324. NULL, NULL,
  1325. NFE_TSO_MAXSIZE,
  1326. NFE_MAX_SCATTER,
  1327. NFE_TSO_MAXSGSIZE,
  1328. 0,
  1329. NULL, NULL,
  1330. &ring->tx_data_tag);
  1331. if (error != 0) {
  1332. device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
  1333. goto fail;
  1334. }
  1335. for (i = 0; i < NFE_TX_RING_COUNT; i++) {
  1336. error = bus_dmamap_create(ring->tx_data_tag, 0,
  1337. &ring->data[i].tx_data_map);
  1338. if (error != 0) {
  1339. device_printf(sc->nfe_dev,
  1340. "could not create Tx DMA map\n");
  1341. goto fail;
  1342. }
  1343. }
  1344. fail:
  1345. return (error);
  1346. }
  1347. static void
  1348. nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
  1349. {
  1350. void *desc;
  1351. size_t descsize;
  1352. sc->nfe_force_tx = 0;
  1353. ring->queued = 0;
  1354. ring->cur = ring->next = 0;
  1355. if (sc->nfe_flags & NFE_40BIT_ADDR) {
  1356. desc = ring->desc64;
  1357. descsize = sizeof (struct nfe_desc64);
  1358. } else {
  1359. desc = ring->desc32;
  1360. descsize = sizeof (struct nfe_desc32);
  1361. }
  1362. bzero(desc, descsize * NFE_TX_RING_COUNT);
  1363. bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
  1364. BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  1365. }
  1366. static void
  1367. nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
  1368. {
  1369. struct nfe_tx_data *data;
  1370. void *desc;
  1371. int i, descsize;
  1372. if (sc->nfe_flags & NFE_40BIT_ADDR) {
  1373. desc = ring->desc64;
  1374. descsize = sizeof (struct nfe_desc64);
  1375. } else {
  1376. desc = ring->desc32;
  1377. descsize = sizeof (struct nfe_desc32);
  1378. }
  1379. for (i = 0; i < NFE_TX_RING_COUNT; i++) {
  1380. data = &ring->data[i];
  1381. if (data->m != NULL) {
  1382. bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
  1383. BUS_DMASYNC_POSTWRITE);
  1384. bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
  1385. m_freem(data->m);
  1386. data->m = NULL;
  1387. }
  1388. if (data->tx_data_map != NULL) {
  1389. bus_dmamap_destroy(ring->tx_data_tag,
  1390. data->tx_data_map);
  1391. data->tx_data_map = NULL;
  1392. }
  1393. }
  1394. if (ring->tx_data_tag != NULL) {
  1395. bus_dma_tag_destroy(ring->tx_data_tag);
  1396. ring->tx_data_tag = NULL;
  1397. }
  1398. if (desc != NULL) {
  1399. bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
  1400. BUS_DMASYNC_POSTWRITE);
  1401. bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
  1402. bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
  1403. ring->desc64 = NULL;
  1404. ring->desc32 = NULL;
  1405. ring->tx_desc_map = NULL;
  1406. bus_dma_tag_destroy(ring->tx_desc_tag);
  1407. ring->tx_desc_tag = NULL;
  1408. }
  1409. }
  1410. #ifdef DEVICE_POLLING
  1411. static poll_handler_t nfe_poll;
  1412. static int
  1413. nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
  1414. {
  1415. struct nfe_softc *sc = ifp->if_softc;
  1416. uint32_t r;
  1417. int rx_npkts = 0;
  1418. NFE_LOCK(sc);
  1419. if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
  1420. NFE_UNLOCK(sc);
  1421. return (rx_npkts);
  1422. }
  1423. if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
  1424. rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
  1425. else
  1426. rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
  1427. nfe_txeof(sc);
  1428. if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
  1429. nfe_start_locked(ifp);
  1430. if (cmd == POLL_AND_CHECK_STATUS) {
  1431. if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
  1432. NFE_UNLOCK(sc);
  1433. return (rx_npkts);
  1434. }
  1435. NFE_WRITE(sc, sc->nfe_irq_status, r);
  1436. if (r & NFE_IRQ_LINK) {
  1437. NFE_READ(sc, NFE_PHY_STATUS);
  1438. NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
  1439. DPRINTF(sc, "link state changed\n");
  1440. }
  1441. }
  1442. NFE_UNLOCK(sc);
  1443. return (rx_npkts);
  1444. }
  1445. #endif /* DEVICE_POLLING */
  1446. static void
  1447. nfe_set_intr(struct nfe_softc *sc)
  1448. {
  1449. if (sc->nfe_msi != 0)
  1450. NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
  1451. }
  1452. /* In MSIX, a write to mask reegisters behaves as XOR. */
  1453. static __inline void
  1454. nfe_enable_intr(struct nfe_softc *sc)
  1455. {
  1456. if (sc->nfe_msix != 0) {
  1457. /* XXX Should have a better way to enable interrupts! */
  1458. if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
  1459. NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
  1460. } else
  1461. NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
  1462. }
  1463. static __inline void
  1464. nfe_disable_intr(struct nfe_softc *sc)
  1465. {
  1466. if (sc->nfe_msix != 0) {
  1467. /* XXX Should have a better way to disable interrupts! */
  1468. if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
  1469. NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
  1470. } else
  1471. NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
  1472. }
  1473. static int
  1474. nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  1475. {
  1476. struct nfe_softc *sc;
  1477. struct ifreq *ifr;
  1478. struct mii_data *mii;
  1479. int error, init, mask;
  1480. sc = ifp->if_softc;
  1481. ifr = (struct ifreq *) data;
  1482. error = 0;
  1483. init = 0;
  1484. switch (cmd) {
  1485. case SIOCSIFMTU:
  1486. if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
  1487. error = EINVAL;
  1488. else if (ifp->if_mtu != ifr->ifr_mtu) {
  1489. if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
  1490. (sc->nfe_jumbo_disable != 0)) &&
  1491. ifr->ifr_mtu > ETHERMTU)
  1492. error = EINVAL;
  1493. else {
  1494. NFE_LOCK(sc);
  1495. ifp->if_mtu = ifr->ifr_mtu;
  1496. if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
  1497. ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  1498. nfe_init_locked(sc);
  1499. }
  1500. NFE_UNLOCK(sc);
  1501. }
  1502. }
  1503. break;
  1504. case SIOCSIFFLAGS:
  1505. NFE_LOCK(sc);
  1506. if (ifp->if_flags & IFF_UP) {
  1507. /*
  1508. * If only the PROMISC or ALLMULTI flag changes, then
  1509. * don't do a full re-init of the chip, just update
  1510. * the Rx filter.
  1511. */
  1512. if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
  1513. ((ifp->if_flags ^ sc->nfe_if_flags) &
  1514. (IFF_ALLMULTI | IFF_PROMISC)) != 0)
  1515. nfe_setmulti(sc);
  1516. else
  1517. nfe_init_locked(sc);
  1518. } else {
  1519. if (ifp->if_drv_flags & IFF_DRV_RUNNING)
  1520. nfe_stop(ifp);
  1521. }
  1522. sc->nfe_if_flags = ifp->if_flags;
  1523. NFE_UNLOCK(sc);
  1524. error = 0;
  1525. break;
  1526. case SIOCADDMULTI:
  1527. case SIOCDELMULTI:
  1528. if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
  1529. NFE_LOCK(sc);
  1530. nfe_setmulti(sc);
  1531. NFE_UNLOCK(sc);
  1532. error = 0;
  1533. }
  1534. break;
  1535. case SIOCSIFMEDIA:
  1536. case SIOCGIFMEDIA:
  1537. mii = device_get_softc(sc->nfe_miibus);
  1538. error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
  1539. break;
  1540. case SIOCSIFCAP:
  1541. mask = ifr->ifr_reqcap ^ ifp->if_capenable;
  1542. #ifdef DEVICE_POLLING
  1543. if ((mask & IFCAP_POLLING) != 0) {
  1544. if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
  1545. error = ether_poll_register(nfe_poll, ifp);
  1546. if (error)
  1547. break;
  1548. NFE_LOCK(sc);
  1549. nfe_disable_intr(sc);
  1550. ifp->if_capenable |= IFCAP_POLLING;
  1551. NFE_UNLOCK(sc);
  1552. } else {
  1553. error = ether_poll_deregister(ifp);
  1554. /* Enable interrupt even in error case */
  1555. NFE_LOCK(sc);
  1556. nfe_enable_intr(sc);
  1557. ifp->if_capenable &= ~IFCAP_POLLING;
  1558. NFE_UNLOCK(sc);
  1559. }
  1560. }
  1561. #endif /* DEVICE_POLLING */
  1562. if ((mask & IFCAP_WOL_MAGIC) != 0 &&
  1563. (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
  1564. ifp->if_capenable ^= IFCAP_WOL_MAGIC;
  1565. if ((mask & IFCAP_TXCSUM) != 0 &&
  1566. (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
  1567. ifp->if_capenable ^= IFCAP_TXCSUM;
  1568. if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
  1569. ifp->if_hwassist |= NFE_CSUM_FEATURES;
  1570. else
  1571. ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
  1572. }
  1573. if ((mask & IFCAP_RXCSUM) != 0 &&
  1574. (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
  1575. ifp->if_capenable ^= IFCAP_RXCSUM;
  1576. init++;
  1577. }
  1578. if ((mask & IFCAP_TSO4) != 0 &&
  1579. (ifp->if_capabilities & IFCAP_TSO4) != 0) {
  1580. ifp->if_capenable ^= IFCAP_TSO4;
  1581. if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
  1582. ifp->if_hwassist |= CSUM_TSO;
  1583. else
  1584. ifp->if_hwassist &= ~CSUM_TSO;
  1585. }
  1586. if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
  1587. (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
  1588. ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
  1589. if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
  1590. (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
  1591. ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
  1592. if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
  1593. ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
  1594. init++;
  1595. }
  1596. /*
  1597. * XXX
  1598. * It seems that VLAN stripping requires Rx checksum offload.
  1599. * Unfortunately FreeBSD has no way to disable only Rx side
  1600. * VLAN stripping. So when we know Rx checksum offload is
  1601. * disabled turn entire hardware VLAN assist off.
  1602. */
  1603. if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
  1604. if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
  1605. init++;
  1606. ifp->if_capenable &= ~(IFCAP_VLAN_HWTAGGING |
  1607. IFCAP_VLAN_HWTSO);
  1608. }
  1609. if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
  1610. ifp