/sys/dev/en/midway.c

https://bitbucket.org/freebsd/freebsd-head/ · C · 3364 lines · 2167 code · 444 blank · 753 comment · 438 complexity · 5a129879e1fd7ab72c5b7edf4a6f7794 MD5 · raw file

  1. /* $NetBSD: midway.c,v 1.30 1997/09/29 17:40:38 chuck Exp $ */
  2. /* (sync'd to midway.c 1.68) */
  3. /*-
  4. * Copyright (c) 1996 Charles D. Cranor and Washington University.
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. All advertising materials mentioning features or use of this software
  16. * must display the following acknowledgement:
  17. * This product includes software developed by Charles D. Cranor and
  18. * Washington University.
  19. * 4. The name of the author may not be used to endorse or promote products
  20. * derived from this software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  23. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  24. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  25. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  26. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  27. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  28. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  29. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  30. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  31. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  32. */
  33. #include <sys/cdefs.h>
  34. __FBSDID("$FreeBSD$");
  35. /*
  36. *
  37. * m i d w a y . c e n i 1 5 5 d r i v e r
  38. *
  39. * author: Chuck Cranor <chuck@ccrc.wustl.edu>
  40. * started: spring, 1996 (written from scratch).
  41. *
  42. * notes from the author:
  43. * Extra special thanks go to Werner Almesberger, EPFL LRC. Werner's
  44. * ENI driver was especially useful in figuring out how this card works.
  45. * I would also like to thank Werner for promptly answering email and being
  46. * generally helpful.
  47. */
  48. #define EN_DIAG
  49. #define EN_DDBHOOK 1 /* compile in ddb functions */
  50. /*
  51. * Note on EN_ENIDMAFIX: the byte aligner on the ENI version of the card
  52. * appears to be broken. it works just fine if there is no load... however
  53. * when the card is loaded the data get corrupted. to see this, one only
  54. * has to use "telnet" over ATM. do the following command in "telnet":
  55. * cat /usr/share/misc/termcap
  56. * "telnet" seems to generate lots of 1023 byte mbufs (which make great
  57. * use of the byte aligner). watch "netstat -s" for checksum errors.
  58. *
  59. * I further tested this by adding a function that compared the transmit
  60. * data on the card's SRAM with the data in the mbuf chain _after_ the
  61. * "transmit DMA complete" interrupt. using the "telnet" test I got data
  62. * mismatches where the byte-aligned data should have been. using ddb
  63. * and en_dumpmem() I verified that the DTQs fed into the card were
  64. * absolutely correct. thus, we are forced to concluded that the ENI
  65. * hardware is buggy. note that the Adaptec version of the card works
  66. * just fine with byte DMA.
  67. *
  68. * bottom line: we set EN_ENIDMAFIX to 1 to avoid byte DMAs on the ENI
  69. * card.
  70. */
  71. #if defined(DIAGNOSTIC) && !defined(EN_DIAG)
  72. #define EN_DIAG /* link in with master DIAG option */
  73. #endif
  74. #define EN_COUNT(X) (X)++
  75. #ifdef EN_DEBUG
  76. #undef EN_DDBHOOK
  77. #define EN_DDBHOOK 1
  78. /*
  79. * This macro removes almost all the EN_DEBUG conditionals in the code that make
  80. * to code a good deal less readable.
  81. */
  82. #define DBG(SC, FL, PRINT) do { \
  83. if ((SC)->debug & DBG_##FL) { \
  84. device_printf((SC)->dev, "%s: "#FL": ", __func__); \
  85. printf PRINT; \
  86. printf("\n"); \
  87. } \
  88. } while (0)
  89. enum {
  90. DBG_INIT = 0x0001, /* debug attach/detach */
  91. DBG_TX = 0x0002, /* debug transmitting */
  92. DBG_SERV = 0x0004, /* debug service interrupts */
  93. DBG_IOCTL = 0x0008, /* debug ioctls */
  94. DBG_VC = 0x0010, /* debug VC handling */
  95. DBG_INTR = 0x0020, /* debug interrupts */
  96. DBG_DMA = 0x0040, /* debug DMA probing */
  97. DBG_IPACKETS = 0x0080, /* print input packets */
  98. DBG_REG = 0x0100, /* print all register access */
  99. DBG_LOCK = 0x0200, /* debug locking */
  100. };
  101. #else /* EN_DEBUG */
  102. #define DBG(SC, FL, PRINT) do { } while (0)
  103. #endif /* EN_DEBUG */
  104. #include "opt_inet.h"
  105. #include "opt_natm.h"
  106. #include "opt_ddb.h"
  107. #ifdef DDB
  108. #undef EN_DDBHOOK
  109. #define EN_DDBHOOK 1
  110. #endif
  111. #include <sys/param.h>
  112. #include <sys/systm.h>
  113. #include <sys/queue.h>
  114. #include <sys/sockio.h>
  115. #include <sys/socket.h>
  116. #include <sys/mbuf.h>
  117. #include <sys/endian.h>
  118. #include <sys/stdint.h>
  119. #include <sys/lock.h>
  120. #include <sys/mutex.h>
  121. #include <sys/condvar.h>
  122. #include <vm/uma.h>
  123. #include <net/if.h>
  124. #include <net/if_media.h>
  125. #include <net/if_atm.h>
  126. #if defined(NATM) || defined(INET) || defined(INET6)
  127. #include <netinet/in.h>
  128. #if defined(INET) || defined(INET6)
  129. #include <netinet/if_atm.h>
  130. #endif
  131. #endif
  132. #ifdef NATM
  133. #include <netnatm/natm.h>
  134. #endif
  135. #include <sys/bus.h>
  136. #include <machine/bus.h>
  137. #include <sys/rman.h>
  138. #include <sys/module.h>
  139. #include <sys/sysctl.h>
  140. #include <sys/malloc.h>
  141. #include <machine/resource.h>
  142. #include <dev/utopia/utopia.h>
  143. #include <dev/en/midwayreg.h>
  144. #include <dev/en/midwayvar.h>
  145. #include <net/bpf.h>
  146. /*
  147. * params
  148. */
  149. #ifndef EN_TXHIWAT
  150. #define EN_TXHIWAT (64 * 1024) /* max 64 KB waiting to be DMAd out */
  151. #endif
  152. SYSCTL_DECL(_hw_atm);
  153. /*
  154. * dma tables
  155. *
  156. * The plan is indexed by the number of words to transfer.
  157. * The maximum index is 15 for 60 words.
  158. */
  159. struct en_dmatab {
  160. uint8_t bcode; /* code */
  161. uint8_t divshift; /* byte divisor */
  162. };
  163. static const struct en_dmatab en_dmaplan[] = {
  164. { 0, 0 }, /* 0 */ { MIDDMA_WORD, 2}, /* 1 */
  165. { MIDDMA_2WORD, 3}, /* 2 */ { MIDDMA_WORD, 2}, /* 3 */
  166. { MIDDMA_4WORD, 4}, /* 4 */ { MIDDMA_WORD, 2}, /* 5 */
  167. { MIDDMA_2WORD, 3}, /* 6 */ { MIDDMA_WORD, 2}, /* 7 */
  168. { MIDDMA_8WORD, 5}, /* 8 */ { MIDDMA_WORD, 2}, /* 9 */
  169. { MIDDMA_2WORD, 3}, /* 10 */ { MIDDMA_WORD, 2}, /* 11 */
  170. { MIDDMA_4WORD, 4}, /* 12 */ { MIDDMA_WORD, 2}, /* 13 */
  171. { MIDDMA_2WORD, 3}, /* 14 */ { MIDDMA_WORD, 2}, /* 15 */
  172. { MIDDMA_16WORD,6}, /* 16 */
  173. };
  174. /*
  175. * prototypes
  176. */
  177. #ifdef EN_DDBHOOK
  178. int en_dump(int unit, int level);
  179. int en_dumpmem(int,int,int);
  180. #endif
  181. static void en_close_finish(struct en_softc *sc, struct en_vcc *vc);
  182. #define EN_LOCK(SC) do { \
  183. DBG(SC, LOCK, ("ENLOCK %d\n", __LINE__)); \
  184. mtx_lock(&sc->en_mtx); \
  185. } while (0)
  186. #define EN_UNLOCK(SC) do { \
  187. DBG(SC, LOCK, ("ENUNLOCK %d\n", __LINE__)); \
  188. mtx_unlock(&sc->en_mtx); \
  189. } while (0)
  190. #define EN_CHECKLOCK(sc) mtx_assert(&sc->en_mtx, MA_OWNED)
  191. /*
  192. * While a transmit mbuf is waiting to get transmit DMA resources we
  193. * need to keep some information with it. We don't want to allocate
  194. * additional memory for this so we stuff it into free fields in the
  195. * mbuf packet header. Neither the checksum fields nor the rcvif field are used
  196. * so use these.
  197. */
  198. #define TX_AAL5 0x1 /* transmit AAL5 PDU */
  199. #define TX_HAS_TBD 0x2 /* TBD did fit into mbuf */
  200. #define TX_HAS_PAD 0x4 /* padding did fit into mbuf */
  201. #define TX_HAS_PDU 0x8 /* PDU trailer did fit into mbuf */
  202. #define MBUF_SET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do { \
  203. (M)->m_pkthdr.csum_data = (VCI) | ((FLAGS) << MID_VCI_BITS); \
  204. (M)->m_pkthdr.csum_flags = ((DATALEN) & 0xffff) | \
  205. ((PAD & 0x3f) << 16); \
  206. (M)->m_pkthdr.rcvif = (void *)(MAP); \
  207. } while (0)
  208. #define MBUF_GET_TX(M, VCI, FLAGS, DATALEN, PAD, MAP) do { \
  209. (VCI) = (M)->m_pkthdr.csum_data & ((1 << MID_VCI_BITS) - 1); \
  210. (FLAGS) = ((M)->m_pkthdr.csum_data >> MID_VCI_BITS) & 0xf; \
  211. (DATALEN) = (M)->m_pkthdr.csum_flags & 0xffff; \
  212. (PAD) = ((M)->m_pkthdr.csum_flags >> 16) & 0x3f; \
  213. (MAP) = (void *)((M)->m_pkthdr.rcvif); \
  214. } while (0)
  215. #define EN_WRAPADD(START, STOP, CUR, VAL) do { \
  216. (CUR) = (CUR) + (VAL); \
  217. if ((CUR) >= (STOP)) \
  218. (CUR) = (START) + ((CUR) - (STOP)); \
  219. } while (0)
  220. #define WORD_IDX(START, X) (((X) - (START)) / sizeof(uint32_t))
  221. #define SETQ_END(SC, VAL) ((SC)->is_adaptec ? \
  222. ((VAL) | (MID_DMA_END >> 4)) : \
  223. ((VAL) | (MID_DMA_END)))
  224. /*
  225. * The dtq and drq members are set for each END entry in the corresponding
  226. * card queue entry. It is used to find out, when a buffer has been
  227. * finished DMAing and can be freed.
  228. *
  229. * We store sc->dtq and sc->drq data in the following format...
  230. * the 0x80000 ensures we != 0
  231. */
  232. #define EN_DQ_MK(SLOT, LEN) (((SLOT) << 20) | (LEN) | (0x80000))
  233. #define EN_DQ_SLOT(X) ((X) >> 20)
  234. #define EN_DQ_LEN(X) ((X) & 0x3ffff)
  235. /*
  236. * Variables
  237. */
  238. static uma_zone_t en_vcc_zone;
  239. /***********************************************************************/
  240. /*
  241. * en_read{x}: read a word from the card. These are the only functions
  242. * that read from the card.
  243. */
  244. static __inline uint32_t
  245. en_readx(struct en_softc *sc, uint32_t r)
  246. {
  247. uint32_t v;
  248. #ifdef EN_DIAG
  249. if (r > MID_MAXOFF || (r % 4))
  250. panic("en_read out of range, r=0x%x", r);
  251. #endif
  252. v = bus_space_read_4(sc->en_memt, sc->en_base, r);
  253. return (v);
  254. }
  255. static __inline uint32_t
  256. en_read(struct en_softc *sc, uint32_t r)
  257. {
  258. uint32_t v;
  259. #ifdef EN_DIAG
  260. if (r > MID_MAXOFF || (r % 4))
  261. panic("en_read out of range, r=0x%x", r);
  262. #endif
  263. v = bus_space_read_4(sc->en_memt, sc->en_base, r);
  264. DBG(sc, REG, ("en_read(%#x) -> %08x", r, v));
  265. return (v);
  266. }
  267. /*
  268. * en_write: write a word to the card. This is the only function that
  269. * writes to the card.
  270. */
  271. static __inline void
  272. en_write(struct en_softc *sc, uint32_t r, uint32_t v)
  273. {
  274. #ifdef EN_DIAG
  275. if (r > MID_MAXOFF || (r % 4))
  276. panic("en_write out of range, r=0x%x", r);
  277. #endif
  278. DBG(sc, REG, ("en_write(%#x) <- %08x", r, v));
  279. bus_space_write_4(sc->en_memt, sc->en_base, r, v);
  280. }
  281. /*
  282. * en_k2sz: convert KBytes to a size parameter (a log2)
  283. */
  284. static __inline int
  285. en_k2sz(int k)
  286. {
  287. switch(k) {
  288. case 1: return (0);
  289. case 2: return (1);
  290. case 4: return (2);
  291. case 8: return (3);
  292. case 16: return (4);
  293. case 32: return (5);
  294. case 64: return (6);
  295. case 128: return (7);
  296. default:
  297. panic("en_k2sz");
  298. }
  299. return (0);
  300. }
  301. #define en_log2(X) en_k2sz(X)
  302. /*
  303. * en_b2sz: convert a DMA burst code to its byte size
  304. */
  305. static __inline int
  306. en_b2sz(int b)
  307. {
  308. switch (b) {
  309. case MIDDMA_WORD: return (1*4);
  310. case MIDDMA_2WMAYBE:
  311. case MIDDMA_2WORD: return (2*4);
  312. case MIDDMA_4WMAYBE:
  313. case MIDDMA_4WORD: return (4*4);
  314. case MIDDMA_8WMAYBE:
  315. case MIDDMA_8WORD: return (8*4);
  316. case MIDDMA_16WMAYBE:
  317. case MIDDMA_16WORD: return (16*4);
  318. default:
  319. panic("en_b2sz");
  320. }
  321. return (0);
  322. }
  323. /*
  324. * en_sz2b: convert a burst size (bytes) to DMA burst code
  325. */
  326. static __inline int
  327. en_sz2b(int sz)
  328. {
  329. switch (sz) {
  330. case 1*4: return (MIDDMA_WORD);
  331. case 2*4: return (MIDDMA_2WORD);
  332. case 4*4: return (MIDDMA_4WORD);
  333. case 8*4: return (MIDDMA_8WORD);
  334. case 16*4: return (MIDDMA_16WORD);
  335. default:
  336. panic("en_sz2b");
  337. }
  338. return(0);
  339. }
  340. #ifdef EN_DEBUG
  341. /*
  342. * Dump a packet
  343. */
  344. static void
  345. en_dump_packet(struct en_softc *sc, struct mbuf *m)
  346. {
  347. int plen = m->m_pkthdr.len;
  348. u_int pos = 0;
  349. u_int totlen = 0;
  350. int len;
  351. u_char *ptr;
  352. device_printf(sc->dev, "packet len=%d", plen);
  353. while (m != NULL) {
  354. totlen += m->m_len;
  355. ptr = mtod(m, u_char *);
  356. for (len = 0; len < m->m_len; len++, pos++, ptr++) {
  357. if (pos % 16 == 8)
  358. printf(" ");
  359. if (pos % 16 == 0)
  360. printf("\n");
  361. printf(" %02x", *ptr);
  362. }
  363. m = m->m_next;
  364. }
  365. printf("\n");
  366. if (totlen != plen)
  367. printf("sum of m_len=%u\n", totlen);
  368. }
  369. #endif
  370. /*********************************************************************/
  371. /*
  372. * DMA maps
  373. */
  374. /*
  375. * Map constructor for a MAP.
  376. *
  377. * This is called each time when a map is allocated
  378. * from the pool and about to be returned to the user. Here we actually
  379. * allocate the map if there isn't one. The problem is that we may fail
  380. * to allocate the DMA map yet have no means to signal this error. Therefor
  381. * when allocating a map, the call must check that there is a map. An
  382. * additional problem is, that i386 maps will be NULL, yet are ok and must
  383. * be freed so let's use a flag to signal allocation.
  384. *
  385. * Caveat: we have no way to know that we are called from an interrupt context
  386. * here. We rely on the fact, that bus_dmamap_create uses M_NOWAIT in all
  387. * its allocations.
  388. *
  389. * LOCK: any, not needed
  390. */
  391. static int
  392. en_map_ctor(void *mem, int size, void *arg, int flags)
  393. {
  394. struct en_softc *sc = arg;
  395. struct en_map *map = mem;
  396. int err;
  397. err = bus_dmamap_create(sc->txtag, 0, &map->map);
  398. if (err != 0) {
  399. device_printf(sc->dev, "cannot create DMA map %d\n", err);
  400. return (err);
  401. }
  402. map->flags = ENMAP_ALLOC;
  403. map->sc = sc;
  404. return (0);
  405. }
  406. /*
  407. * Map destructor.
  408. *
  409. * Called when a map is disposed into the zone. If the map is loaded, unload
  410. * it.
  411. *
  412. * LOCK: any, not needed
  413. */
  414. static void
  415. en_map_dtor(void *mem, int size, void *arg)
  416. {
  417. struct en_map *map = mem;
  418. if (map->flags & ENMAP_LOADED) {
  419. bus_dmamap_unload(map->sc->txtag, map->map);
  420. map->flags &= ~ENMAP_LOADED;
  421. }
  422. }
  423. /*
  424. * Map finializer.
  425. *
  426. * This is called each time a map is returned from the zone to the system.
  427. * Get rid of the dmamap here.
  428. *
  429. * LOCK: any, not needed
  430. */
  431. static void
  432. en_map_fini(void *mem, int size)
  433. {
  434. struct en_map *map = mem;
  435. bus_dmamap_destroy(map->sc->txtag, map->map);
  436. }
  437. /*********************************************************************/
  438. /*
  439. * Transmission
  440. */
  441. /*
  442. * Argument structure to load a transmit DMA map
  443. */
  444. struct txarg {
  445. struct en_softc *sc;
  446. struct mbuf *m;
  447. u_int vci;
  448. u_int chan; /* transmit channel */
  449. u_int datalen; /* length of user data */
  450. u_int flags;
  451. u_int wait; /* return: out of resources */
  452. };
  453. /*
  454. * TX DMA map loader helper. This function is the callback when the map
  455. * is loaded. It should fill the DMA segment descriptors into the hardware.
  456. *
  457. * LOCK: locked, needed
  458. */
  459. static void
  460. en_txdma_load(void *uarg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize,
  461. int error)
  462. {
  463. struct txarg *tx = uarg;
  464. struct en_softc *sc = tx->sc;
  465. struct en_txslot *slot = &sc->txslot[tx->chan];
  466. uint32_t cur; /* on-card buffer position (bytes offset) */
  467. uint32_t dtq; /* on-card queue position (byte offset) */
  468. uint32_t last_dtq; /* last DTQ we have written */
  469. uint32_t tmp;
  470. u_int free; /* free queue entries on card */
  471. u_int needalign, cnt;
  472. bus_size_t rest; /* remaining bytes in current segment */
  473. bus_addr_t addr;
  474. bus_dma_segment_t *s;
  475. uint32_t count, bcode;
  476. int i;
  477. if (error != 0)
  478. return;
  479. cur = slot->cur;
  480. dtq = sc->dtq_us;
  481. free = sc->dtq_free;
  482. last_dtq = 0; /* make gcc happy */
  483. /*
  484. * Local macro to add an entry to the transmit DMA area. If there
  485. * are no entries left, return. Save the byte offset of the entry
  486. * in last_dtq for later use.
  487. */
  488. #define PUT_DTQ_ENTRY(ENI, BCODE, COUNT, ADDR) \
  489. if (free == 0) { \
  490. EN_COUNT(sc->stats.txdtqout); \
  491. tx->wait = 1; \
  492. return; \
  493. } \
  494. last_dtq = dtq; \
  495. en_write(sc, dtq + 0, (ENI || !sc->is_adaptec) ? \
  496. MID_MK_TXQ_ENI(COUNT, tx->chan, 0, BCODE) : \
  497. MID_MK_TXQ_ADP(COUNT, tx->chan, 0, BCODE)); \
  498. en_write(sc, dtq + 4, ADDR); \
  499. \
  500. EN_WRAPADD(MID_DTQOFF, MID_DTQEND, dtq, 8); \
  501. free--;
  502. /*
  503. * Local macro to generate a DMA entry to DMA cnt bytes. Updates
  504. * the current buffer byte offset accordingly.
  505. */
  506. #define DO_DTQ(TYPE) do { \
  507. rest -= cnt; \
  508. EN_WRAPADD(slot->start, slot->stop, cur, cnt); \
  509. DBG(sc, TX, ("tx%d: "TYPE" %u bytes, %ju left, cur %#x", \
  510. tx->chan, cnt, (uintmax_t)rest, cur)); \
  511. \
  512. PUT_DTQ_ENTRY(1, bcode, count, addr); \
  513. \
  514. addr += cnt; \
  515. } while (0)
  516. if (!(tx->flags & TX_HAS_TBD)) {
  517. /*
  518. * Prepend the TBD - it did not fit into the first mbuf
  519. */
  520. tmp = MID_TBD_MK1((tx->flags & TX_AAL5) ?
  521. MID_TBD_AAL5 : MID_TBD_NOAAL5,
  522. sc->vccs[tx->vci]->txspeed,
  523. tx->m->m_pkthdr.len / MID_ATMDATASZ);
  524. en_write(sc, cur, tmp);
  525. EN_WRAPADD(slot->start, slot->stop, cur, 4);
  526. tmp = MID_TBD_MK2(tx->vci, 0, 0);
  527. en_write(sc, cur, tmp);
  528. EN_WRAPADD(slot->start, slot->stop, cur, 4);
  529. /* update DMA address */
  530. PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
  531. }
  532. for (i = 0, s = segs; i < nseg; i++, s++) {
  533. rest = s->ds_len;
  534. addr = s->ds_addr;
  535. if (sc->is_adaptec) {
  536. /* adaptec card - simple */
  537. /* advance the on-card buffer pointer */
  538. EN_WRAPADD(slot->start, slot->stop, cur, rest);
  539. DBG(sc, TX, ("tx%d: adp %ju bytes %#jx (cur now 0x%x)",
  540. tx->chan, (uintmax_t)rest, (uintmax_t)addr, cur));
  541. PUT_DTQ_ENTRY(0, 0, rest, addr);
  542. continue;
  543. }
  544. /*
  545. * do we need to do a DMA op to align to the maximum
  546. * burst? Note, that we are alway 32-bit aligned.
  547. */
  548. if (sc->alburst &&
  549. (needalign = (addr & sc->bestburstmask)) != 0) {
  550. /* compute number of bytes, words and code */
  551. cnt = sc->bestburstlen - needalign;
  552. if (cnt > rest)
  553. cnt = rest;
  554. count = cnt / sizeof(uint32_t);
  555. if (sc->noalbursts) {
  556. bcode = MIDDMA_WORD;
  557. } else {
  558. bcode = en_dmaplan[count].bcode;
  559. count = cnt >> en_dmaplan[count].divshift;
  560. }
  561. DO_DTQ("al_dma");
  562. }
  563. /* do we need to do a max-sized burst? */
  564. if (rest >= sc->bestburstlen) {
  565. count = rest >> sc->bestburstshift;
  566. cnt = count << sc->bestburstshift;
  567. bcode = sc->bestburstcode;
  568. DO_DTQ("best_dma");
  569. }
  570. /* do we need to do a cleanup burst? */
  571. if (rest != 0) {
  572. cnt = rest;
  573. count = rest / sizeof(uint32_t);
  574. if (sc->noalbursts) {
  575. bcode = MIDDMA_WORD;
  576. } else {
  577. bcode = en_dmaplan[count].bcode;
  578. count = cnt >> en_dmaplan[count].divshift;
  579. }
  580. DO_DTQ("clean_dma");
  581. }
  582. }
  583. KASSERT (tx->flags & TX_HAS_PAD, ("PDU not padded"));
  584. if ((tx->flags & TX_AAL5) && !(tx->flags & TX_HAS_PDU)) {
  585. /*
  586. * Append the AAL5 PDU trailer
  587. */
  588. tmp = MID_PDU_MK1(0, 0, tx->datalen);
  589. en_write(sc, cur, tmp);
  590. EN_WRAPADD(slot->start, slot->stop, cur, 4);
  591. en_write(sc, cur, 0);
  592. EN_WRAPADD(slot->start, slot->stop, cur, 4);
  593. /* update DMA address */
  594. PUT_DTQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
  595. }
  596. /* record the end for the interrupt routine */
  597. sc->dtq[MID_DTQ_A2REG(last_dtq)] =
  598. EN_DQ_MK(tx->chan, tx->m->m_pkthdr.len);
  599. /* set the end flag in the last descriptor */
  600. en_write(sc, last_dtq + 0, SETQ_END(sc, en_read(sc, last_dtq + 0)));
  601. #undef PUT_DTQ_ENTRY
  602. #undef DO_DTQ
  603. /* commit */
  604. slot->cur = cur;
  605. sc->dtq_free = free;
  606. sc->dtq_us = dtq;
  607. /* tell card */
  608. en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_us));
  609. }
  610. /*
  611. * en_txdma: start transmit DMA on the given channel, if possible
  612. *
  613. * This is called from two places: when we got new packets from the upper
  614. * layer or when we found that buffer space has freed up during interrupt
  615. * processing.
  616. *
  617. * LOCK: locked, needed
  618. */
  619. static void
  620. en_txdma(struct en_softc *sc, struct en_txslot *slot)
  621. {
  622. struct en_map *map;
  623. struct mbuf *lastm;
  624. struct txarg tx;
  625. u_int pad;
  626. int error;
  627. DBG(sc, TX, ("tx%td: starting ...", slot - sc->txslot));
  628. again:
  629. bzero(&tx, sizeof(tx));
  630. tx.chan = slot - sc->txslot;
  631. tx.sc = sc;
  632. /*
  633. * get an mbuf waiting for DMA
  634. */
  635. _IF_DEQUEUE(&slot->q, tx.m);
  636. if (tx.m == NULL) {
  637. DBG(sc, TX, ("tx%td: ...done!", slot - sc->txslot));
  638. return;
  639. }
  640. MBUF_GET_TX(tx.m, tx.vci, tx.flags, tx.datalen, pad, map);
  641. /*
  642. * note: don't use the entire buffer space. if WRTX becomes equal
  643. * to RDTX, the transmitter stops assuming the buffer is empty! --kjc
  644. */
  645. if (tx.m->m_pkthdr.len >= slot->bfree) {
  646. EN_COUNT(sc->stats.txoutspace);
  647. DBG(sc, TX, ("tx%td: out of transmit space", slot - sc->txslot));
  648. goto waitres;
  649. }
  650. lastm = NULL;
  651. if (!(tx.flags & TX_HAS_PAD)) {
  652. if (pad != 0) {
  653. /* Append the padding buffer */
  654. (void)m_length(tx.m, &lastm);
  655. lastm->m_next = sc->padbuf;
  656. sc->padbuf->m_len = pad;
  657. }
  658. tx.flags |= TX_HAS_PAD;
  659. }
  660. /*
  661. * Try to load that map
  662. */
  663. error = bus_dmamap_load_mbuf(sc->txtag, map->map, tx.m,
  664. en_txdma_load, &tx, BUS_DMA_NOWAIT);
  665. if (lastm != NULL)
  666. lastm->m_next = NULL;
  667. if (error != 0) {
  668. device_printf(sc->dev, "loading TX map failed %d\n",
  669. error);
  670. goto dequeue_drop;
  671. }
  672. map->flags |= ENMAP_LOADED;
  673. if (tx.wait) {
  674. /* probably not enough space */
  675. bus_dmamap_unload(map->sc->txtag, map->map);
  676. map->flags &= ~ENMAP_LOADED;
  677. sc->need_dtqs = 1;
  678. DBG(sc, TX, ("tx%td: out of transmit DTQs", slot - sc->txslot));
  679. goto waitres;
  680. }
  681. EN_COUNT(sc->stats.launch);
  682. sc->ifp->if_opackets++;
  683. sc->vccs[tx.vci]->opackets++;
  684. sc->vccs[tx.vci]->obytes += tx.datalen;
  685. #ifdef ENABLE_BPF
  686. if (bpf_peers_present(sc->ifp->if_bpf)) {
  687. /*
  688. * adjust the top of the mbuf to skip the TBD if present
  689. * before passing the packet to bpf.
  690. * Also remove padding and the PDU trailer. Assume both of
  691. * them to be in the same mbuf. pktlen, m_len and m_data
  692. * are not needed anymore so we can change them.
  693. */
  694. if (tx.flags & TX_HAS_TBD) {
  695. tx.m->m_data += MID_TBD_SIZE;
  696. tx.m->m_len -= MID_TBD_SIZE;
  697. }
  698. tx.m->m_pkthdr.len = m_length(tx.m, &lastm);
  699. if (tx.m->m_pkthdr.len > tx.datalen) {
  700. lastm->m_len -= tx.m->m_pkthdr.len - tx.datalen;
  701. tx.m->m_pkthdr.len = tx.datalen;
  702. }
  703. bpf_mtap(sc->ifp->if_bpf, tx.m);
  704. }
  705. #endif
  706. /*
  707. * do some housekeeping and get the next packet
  708. */
  709. slot->bfree -= tx.m->m_pkthdr.len;
  710. _IF_ENQUEUE(&slot->indma, tx.m);
  711. goto again;
  712. /*
  713. * error handling. This is jumped to when we just want to drop
  714. * the packet. Must be unlocked here.
  715. */
  716. dequeue_drop:
  717. if (map != NULL)
  718. uma_zfree(sc->map_zone, map);
  719. slot->mbsize -= tx.m->m_pkthdr.len;
  720. m_freem(tx.m);
  721. goto again;
  722. waitres:
  723. _IF_PREPEND(&slot->q, tx.m);
  724. }
  725. /*
  726. * Create a copy of a single mbuf. It can have either internal or
  727. * external data, it may have a packet header. External data is really
  728. * copied, so the new buffer is writeable.
  729. *
  730. * LOCK: any, not needed
  731. */
  732. static struct mbuf *
  733. copy_mbuf(struct mbuf *m)
  734. {
  735. struct mbuf *new;
  736. MGET(new, M_WAIT, MT_DATA);
  737. if (m->m_flags & M_PKTHDR) {
  738. M_MOVE_PKTHDR(new, m);
  739. if (m->m_len > MHLEN)
  740. MCLGET(new, M_WAIT);
  741. } else {
  742. if (m->m_len > MLEN)
  743. MCLGET(new, M_WAIT);
  744. }
  745. bcopy(m->m_data, new->m_data, m->m_len);
  746. new->m_len = m->m_len;
  747. new->m_flags &= ~M_RDONLY;
  748. return (new);
  749. }
  750. /*
  751. * This function is called when we have an ENI adapter. It fixes the
  752. * mbuf chain, so that all addresses and lengths are 4 byte aligned.
  753. * The overall length is already padded to multiple of cells plus the
  754. * TBD so this must always succeed. The routine can fail, when it
  755. * needs to copy an mbuf (this may happen if an mbuf is readonly).
  756. *
  757. * We assume here, that aligning the virtual addresses to 4 bytes also
  758. * aligns the physical addresses.
  759. *
  760. * LOCK: locked, needed
  761. */
  762. static struct mbuf *
  763. en_fix_mchain(struct en_softc *sc, struct mbuf *m0, u_int *pad)
  764. {
  765. struct mbuf **prev = &m0;
  766. struct mbuf *m = m0;
  767. struct mbuf *new;
  768. u_char *d;
  769. int off;
  770. while (m != NULL) {
  771. d = mtod(m, u_char *);
  772. if ((off = (uintptr_t)d % sizeof(uint32_t)) != 0) {
  773. EN_COUNT(sc->stats.mfixaddr);
  774. if (M_WRITABLE(m)) {
  775. bcopy(d, d - off, m->m_len);
  776. m->m_data -= off;
  777. } else {
  778. if ((new = copy_mbuf(m)) == NULL) {
  779. EN_COUNT(sc->stats.mfixfail);
  780. m_freem(m0);
  781. return (NULL);
  782. }
  783. new->m_next = m_free(m);
  784. *prev = m = new;
  785. }
  786. }
  787. if ((off = m->m_len % sizeof(uint32_t)) != 0) {
  788. EN_COUNT(sc->stats.mfixlen);
  789. if (!M_WRITABLE(m)) {
  790. if ((new = copy_mbuf(m)) == NULL) {
  791. EN_COUNT(sc->stats.mfixfail);
  792. m_freem(m0);
  793. return (NULL);
  794. }
  795. new->m_next = m_free(m);
  796. *prev = m = new;
  797. }
  798. d = mtod(m, u_char *) + m->m_len;
  799. off = 4 - off;
  800. while (off) {
  801. while (m->m_next && m->m_next->m_len == 0)
  802. m->m_next = m_free(m->m_next);
  803. if (m->m_next == NULL) {
  804. *d++ = 0;
  805. KASSERT(*pad > 0, ("no padding space"));
  806. (*pad)--;
  807. } else {
  808. *d++ = *mtod(m->m_next, u_char *);
  809. m->m_next->m_len--;
  810. m->m_next->m_data++;
  811. }
  812. m->m_len++;
  813. off--;
  814. }
  815. }
  816. prev = &m->m_next;
  817. m = m->m_next;
  818. }
  819. return (m0);
  820. }
  821. /*
  822. * en_start: start transmitting the next packet that needs to go out
  823. * if there is one. We take off all packets from the interface's queue and
  824. * put them into the channels queue.
  825. *
  826. * Here we also prepend the transmit packet descriptor and append the padding
  827. * and (for aal5) the PDU trailer. This is different from the original driver:
  828. * we assume, that allocating one or two additional mbufs is actually cheaper
  829. * than all this algorithmic fiddling we would need otherwise.
  830. *
  831. * While the packet is on the channels wait queue we use the csum_* fields
  832. * in the packet header to hold the original datalen, the AAL5 flag and the
  833. * VCI. The packet length field in the header holds the needed buffer space.
  834. * This may actually be more than the length of the current mbuf chain (when
  835. * one or more of TBD, padding and PDU do not fit).
  836. *
  837. * LOCK: unlocked, needed
  838. */
  839. static void
  840. en_start(struct ifnet *ifp)
  841. {
  842. struct en_softc *sc = (struct en_softc *)ifp->if_softc;
  843. struct mbuf *m, *lastm;
  844. struct atm_pseudohdr *ap;
  845. u_int pad; /* 0-bytes to pad at PDU end */
  846. u_int datalen; /* length of user data */
  847. u_int vci; /* the VCI we are transmitting on */
  848. u_int flags;
  849. uint32_t tbd[2];
  850. uint32_t pdu[2];
  851. struct en_vcc *vc;
  852. struct en_map *map;
  853. struct en_txslot *tx;
  854. while (1) {
  855. IF_DEQUEUE(&ifp->if_snd, m);
  856. if (m == NULL)
  857. return;
  858. flags = 0;
  859. ap = mtod(m, struct atm_pseudohdr *);
  860. vci = ATM_PH_VCI(ap);
  861. if (ATM_PH_VPI(ap) != 0 || vci >= MID_N_VC ||
  862. (vc = sc->vccs[vci]) == NULL ||
  863. (vc->vflags & VCC_CLOSE_RX)) {
  864. DBG(sc, TX, ("output vpi=%u, vci=%u -- drop",
  865. ATM_PH_VPI(ap), vci));
  866. m_freem(m);
  867. continue;
  868. }
  869. if (vc->vcc.aal == ATMIO_AAL_5)
  870. flags |= TX_AAL5;
  871. m_adj(m, sizeof(struct atm_pseudohdr));
  872. /*
  873. * (re-)calculate size of packet (in bytes)
  874. */
  875. m->m_pkthdr.len = datalen = m_length(m, &lastm);
  876. /*
  877. * computing how much padding we need on the end of the mbuf,
  878. * then see if we can put the TBD at the front of the mbuf
  879. * where the link header goes (well behaved protocols will
  880. * reserve room for us). Last, check if room for PDU tail.
  881. */
  882. if (flags & TX_AAL5)
  883. m->m_pkthdr.len += MID_PDU_SIZE;
  884. m->m_pkthdr.len = roundup(m->m_pkthdr.len, MID_ATMDATASZ);
  885. pad = m->m_pkthdr.len - datalen;
  886. if (flags & TX_AAL5)
  887. pad -= MID_PDU_SIZE;
  888. m->m_pkthdr.len += MID_TBD_SIZE;
  889. DBG(sc, TX, ("txvci%d: buflen=%u datalen=%u lead=%d trail=%d",
  890. vci, m->m_pkthdr.len, datalen, (int)M_LEADINGSPACE(m),
  891. (int)M_TRAILINGSPACE(lastm)));
  892. /*
  893. * From here on we need access to sc
  894. */
  895. EN_LOCK(sc);
  896. /*
  897. * Allocate a map. We do this here rather then in en_txdma,
  898. * because en_txdma is also called from the interrupt handler
  899. * and we are going to have a locking problem then. We must
  900. * use NOWAIT here, because the ip_output path holds various
  901. * locks.
  902. */
  903. map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
  904. if (map == NULL) {
  905. /* drop that packet */
  906. EN_COUNT(sc->stats.txnomap);
  907. EN_UNLOCK(sc);
  908. m_freem(m);
  909. continue;
  910. }
  911. if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
  912. EN_UNLOCK(sc);
  913. uma_zfree(sc->map_zone, map);
  914. m_freem(m);
  915. continue;
  916. }
  917. /*
  918. * Look, whether we can prepend the TBD (8 byte)
  919. */
  920. if (M_WRITABLE(m) && M_LEADINGSPACE(m) >= MID_TBD_SIZE) {
  921. tbd[0] = htobe32(MID_TBD_MK1((flags & TX_AAL5) ?
  922. MID_TBD_AAL5 : MID_TBD_NOAAL5,
  923. vc->txspeed, m->m_pkthdr.len / MID_ATMDATASZ));
  924. tbd[1] = htobe32(MID_TBD_MK2(vci, 0, 0));
  925. m->m_data -= MID_TBD_SIZE;
  926. bcopy(tbd, m->m_data, MID_TBD_SIZE);
  927. m->m_len += MID_TBD_SIZE;
  928. flags |= TX_HAS_TBD;
  929. }
  930. /*
  931. * Check whether the padding fits (must be writeable -
  932. * we pad with zero).
  933. */
  934. if (M_WRITABLE(lastm) && M_TRAILINGSPACE(lastm) >= pad) {
  935. bzero(lastm->m_data + lastm->m_len, pad);
  936. lastm->m_len += pad;
  937. flags |= TX_HAS_PAD;
  938. if ((flags & TX_AAL5) &&
  939. M_TRAILINGSPACE(lastm) > MID_PDU_SIZE) {
  940. pdu[0] = htobe32(MID_PDU_MK1(0, 0, datalen));
  941. pdu[1] = 0;
  942. bcopy(pdu, lastm->m_data + lastm->m_len,
  943. MID_PDU_SIZE);
  944. lastm->m_len += MID_PDU_SIZE;
  945. flags |= TX_HAS_PDU;
  946. }
  947. }
  948. if (!sc->is_adaptec &&
  949. (m = en_fix_mchain(sc, m, &pad)) == NULL) {
  950. EN_UNLOCK(sc);
  951. uma_zfree(sc->map_zone, map);
  952. continue;
  953. }
  954. /*
  955. * get assigned channel (will be zero unless txspeed is set)
  956. */
  957. tx = vc->txslot;
  958. if (m->m_pkthdr.len > EN_TXSZ * 1024) {
  959. DBG(sc, TX, ("tx%td: packet larger than xmit buffer "
  960. "(%d > %d)\n", tx - sc->txslot, m->m_pkthdr.len,
  961. EN_TXSZ * 1024));
  962. EN_UNLOCK(sc);
  963. m_freem(m);
  964. uma_zfree(sc->map_zone, map);
  965. continue;
  966. }
  967. if (tx->mbsize > EN_TXHIWAT) {
  968. EN_COUNT(sc->stats.txmbovr);
  969. DBG(sc, TX, ("tx%td: buffer space shortage",
  970. tx - sc->txslot));
  971. EN_UNLOCK(sc);
  972. m_freem(m);
  973. uma_zfree(sc->map_zone, map);
  974. continue;
  975. }
  976. /* commit */
  977. tx->mbsize += m->m_pkthdr.len;
  978. DBG(sc, TX, ("tx%td: VCI=%d, speed=0x%x, buflen=%d, mbsize=%d",
  979. tx - sc->txslot, vci, sc->vccs[vci]->txspeed,
  980. m->m_pkthdr.len, tx->mbsize));
  981. MBUF_SET_TX(m, vci, flags, datalen, pad, map);
  982. _IF_ENQUEUE(&tx->q, m);
  983. en_txdma(sc, tx);
  984. EN_UNLOCK(sc);
  985. }
  986. }
  987. /*********************************************************************/
  988. /*
  989. * VCs
  990. */
  991. /*
  992. * en_loadvc: load a vc tab entry from a slot
  993. *
  994. * LOCK: locked, needed
  995. */
  996. static void
  997. en_loadvc(struct en_softc *sc, struct en_vcc *vc)
  998. {
  999. uint32_t reg = en_read(sc, MID_VC(vc->vcc.vci));
  1000. reg = MIDV_SETMODE(reg, MIDV_TRASH);
  1001. en_write(sc, MID_VC(vc->vcc.vci), reg);
  1002. DELAY(27);
  1003. /* no need to set CRC */
  1004. /* read pointer = 0, desc. start = 0 */
  1005. en_write(sc, MID_DST_RP(vc->vcc.vci), 0);
  1006. /* write pointer = 0 */
  1007. en_write(sc, MID_WP_ST_CNT(vc->vcc.vci), 0);
  1008. /* set mode, size, loc */
  1009. en_write(sc, MID_VC(vc->vcc.vci), vc->rxslot->mode);
  1010. vc->rxslot->cur = vc->rxslot->start;
  1011. DBG(sc, VC, ("rx%td: assigned to VCI %d", vc->rxslot - sc->rxslot,
  1012. vc->vcc.vci));
  1013. }
  1014. /*
  1015. * Open the given vcc.
  1016. *
  1017. * LOCK: unlocked, needed
  1018. */
  1019. static int
  1020. en_open_vcc(struct en_softc *sc, struct atmio_openvcc *op)
  1021. {
  1022. uint32_t oldmode, newmode;
  1023. struct en_rxslot *slot;
  1024. struct en_vcc *vc;
  1025. int error = 0;
  1026. DBG(sc, IOCTL, ("enable vpi=%d, vci=%d, flags=%#x",
  1027. op->param.vpi, op->param.vci, op->param.flags));
  1028. if (op->param.vpi != 0 || op->param.vci >= MID_N_VC)
  1029. return (EINVAL);
  1030. vc = uma_zalloc(en_vcc_zone, M_NOWAIT | M_ZERO);
  1031. if (vc == NULL)
  1032. return (ENOMEM);
  1033. EN_LOCK(sc);
  1034. if (sc->vccs[op->param.vci] != NULL) {
  1035. error = EBUSY;
  1036. goto done;
  1037. }
  1038. /* find a free receive slot */
  1039. for (slot = sc->rxslot; slot < &sc->rxslot[sc->en_nrx]; slot++)
  1040. if (slot->vcc == NULL)
  1041. break;
  1042. if (slot == &sc->rxslot[sc->en_nrx]) {
  1043. error = ENOSPC;
  1044. goto done;
  1045. }
  1046. vc->rxslot = slot;
  1047. vc->rxhand = op->rxhand;
  1048. vc->vcc = op->param;
  1049. oldmode = slot->mode;
  1050. newmode = (op->param.aal == ATMIO_AAL_5) ? MIDV_AAL5 : MIDV_NOAAL;
  1051. slot->mode = MIDV_SETMODE(oldmode, newmode);
  1052. slot->vcc = vc;
  1053. KASSERT (_IF_QLEN(&slot->indma) == 0 && _IF_QLEN(&slot->q) == 0,
  1054. ("en_rxctl: left over mbufs on enable slot=%td",
  1055. vc->rxslot - sc->rxslot));
  1056. vc->txspeed = 0;
  1057. vc->txslot = sc->txslot;
  1058. vc->txslot->nref++; /* bump reference count */
  1059. en_loadvc(sc, vc); /* does debug printf for us */
  1060. /* don't free below */
  1061. sc->vccs[vc->vcc.vci] = vc;
  1062. vc = NULL;
  1063. sc->vccs_open++;
  1064. done:
  1065. if (vc != NULL)
  1066. uma_zfree(en_vcc_zone, vc);
  1067. EN_UNLOCK(sc);
  1068. return (error);
  1069. }
  1070. /*
  1071. * Close finished
  1072. */
  1073. static void
  1074. en_close_finish(struct en_softc *sc, struct en_vcc *vc)
  1075. {
  1076. if (vc->rxslot != NULL)
  1077. vc->rxslot->vcc = NULL;
  1078. DBG(sc, VC, ("vci: %u free (%p)", vc->vcc.vci, vc));
  1079. sc->vccs[vc->vcc.vci] = NULL;
  1080. uma_zfree(en_vcc_zone, vc);
  1081. sc->vccs_open--;
  1082. }
  1083. /*
  1084. * LOCK: unlocked, needed
  1085. */
  1086. static int
  1087. en_close_vcc(struct en_softc *sc, struct atmio_closevcc *cl)
  1088. {
  1089. uint32_t oldmode, newmode;
  1090. struct en_vcc *vc;
  1091. int error = 0;
  1092. DBG(sc, IOCTL, ("disable vpi=%d, vci=%d", cl->vpi, cl->vci));
  1093. if (cl->vpi != 0 || cl->vci >= MID_N_VC)
  1094. return (EINVAL);
  1095. EN_LOCK(sc);
  1096. if ((vc = sc->vccs[cl->vci]) == NULL) {
  1097. error = ENOTCONN;
  1098. goto done;
  1099. }
  1100. /*
  1101. * turn off VCI
  1102. */
  1103. if (vc->rxslot == NULL) {
  1104. error = ENOTCONN;
  1105. goto done;
  1106. }
  1107. if (vc->vflags & VCC_DRAIN) {
  1108. error = EINVAL;
  1109. goto done;
  1110. }
  1111. oldmode = en_read(sc, MID_VC(cl->vci));
  1112. newmode = MIDV_SETMODE(oldmode, MIDV_TRASH) & ~MIDV_INSERVICE;
  1113. en_write(sc, MID_VC(cl->vci), (newmode | (oldmode & MIDV_INSERVICE)));
  1114. /* halt in tracks, be careful to preserve inservice bit */
  1115. DELAY(27);
  1116. vc->rxslot->mode = newmode;
  1117. vc->txslot->nref--;
  1118. /* if stuff is still going on we are going to have to drain it out */
  1119. if (_IF_QLEN(&vc->rxslot->indma) == 0 &&
  1120. _IF_QLEN(&vc->rxslot->q) == 0 &&
  1121. (vc->vflags & VCC_SWSL) == 0) {
  1122. en_close_finish(sc, vc);
  1123. goto done;
  1124. }
  1125. vc->vflags |= VCC_DRAIN;
  1126. DBG(sc, IOCTL, ("VCI %u now draining", cl->vci));
  1127. if (vc->vcc.flags & ATMIO_FLAG_ASYNC)
  1128. goto done;
  1129. vc->vflags |= VCC_CLOSE_RX;
  1130. while ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) &&
  1131. (vc->vflags & VCC_DRAIN))
  1132. cv_wait(&sc->cv_close, &sc->en_mtx);
  1133. en_close_finish(sc, vc);
  1134. if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
  1135. error = EIO;
  1136. goto done;
  1137. }
  1138. done:
  1139. EN_UNLOCK(sc);
  1140. return (error);
  1141. }
  1142. /*********************************************************************/
  1143. /*
  1144. * starting/stopping the card
  1145. */
  1146. /*
  1147. * en_reset_ul: reset the board, throw away work in progress.
  1148. * must en_init to recover.
  1149. *
  1150. * LOCK: locked, needed
  1151. */
  1152. static void
  1153. en_reset_ul(struct en_softc *sc)
  1154. {
  1155. struct en_map *map;
  1156. struct mbuf *m;
  1157. struct en_rxslot *rx;
  1158. int lcv;
  1159. device_printf(sc->dev, "reset\n");
  1160. sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  1161. if (sc->en_busreset)
  1162. sc->en_busreset(sc);
  1163. en_write(sc, MID_RESID, 0x0); /* reset hardware */
  1164. /*
  1165. * recv: dump any mbufs we are dma'ing into, if DRAINing, then a reset
  1166. * will free us! Don't release the rxslot from the channel.
  1167. */
  1168. for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
  1169. if (sc->vccs[lcv] == NULL)
  1170. continue;
  1171. rx = sc->vccs[lcv]->rxslot;
  1172. for (;;) {
  1173. _IF_DEQUEUE(&rx->indma, m);
  1174. if (m == NULL)
  1175. break;
  1176. map = (void *)m->m_pkthdr.rcvif;
  1177. uma_zfree(sc->map_zone, map);
  1178. m_freem(m);
  1179. }
  1180. for (;;) {
  1181. _IF_DEQUEUE(&rx->q, m);
  1182. if (m == NULL)
  1183. break;
  1184. m_freem(m);
  1185. }
  1186. sc->vccs[lcv]->vflags = 0;
  1187. }
  1188. /*
  1189. * xmit: dump everything
  1190. */
  1191. for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
  1192. for (;;) {
  1193. _IF_DEQUEUE(&sc->txslot[lcv].indma, m);
  1194. if (m == NULL)
  1195. break;
  1196. map = (void *)m->m_pkthdr.rcvif;
  1197. uma_zfree(sc->map_zone, map);
  1198. m_freem(m);
  1199. }
  1200. for (;;) {
  1201. _IF_DEQUEUE(&sc->txslot[lcv].q, m);
  1202. if (m == NULL)
  1203. break;
  1204. map = (void *)m->m_pkthdr.rcvif;
  1205. uma_zfree(sc->map_zone, map);
  1206. m_freem(m);
  1207. }
  1208. sc->txslot[lcv].mbsize = 0;
  1209. }
  1210. /*
  1211. * Unstop all waiters
  1212. */
  1213. cv_broadcast(&sc->cv_close);
  1214. }
  1215. /*
  1216. * en_reset: reset the board, throw away work in progress.
  1217. * must en_init to recover.
  1218. *
  1219. * LOCK: unlocked, needed
  1220. *
  1221. * Use en_reset_ul if you alreay have the lock
  1222. */
  1223. void
  1224. en_reset(struct en_softc *sc)
  1225. {
  1226. EN_LOCK(sc);
  1227. en_reset_ul(sc);
  1228. EN_UNLOCK(sc);
  1229. }
  1230. /*
  1231. * en_init: init board and sync the card with the data in the softc.
  1232. *
  1233. * LOCK: locked, needed
  1234. */
  1235. static void
  1236. en_init(struct en_softc *sc)
  1237. {
  1238. int vc, slot;
  1239. uint32_t loc;
  1240. if ((sc->ifp->if_flags & IFF_UP) == 0) {
  1241. DBG(sc, INIT, ("going down"));
  1242. en_reset(sc); /* to be safe */
  1243. return;
  1244. }
  1245. DBG(sc, INIT, ("going up"));
  1246. sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; /* enable */
  1247. if (sc->en_busreset)
  1248. sc->en_busreset(sc);
  1249. en_write(sc, MID_RESID, 0x0); /* reset */
  1250. /* zero memory */
  1251. bus_space_set_region_4(sc->en_memt, sc->en_base,
  1252. MID_RAMOFF, 0, sc->en_obmemsz / 4);
  1253. /*
  1254. * init obmem data structures: vc tab, dma q's, slist.
  1255. *
  1256. * note that we set drq_free/dtq_free to one less than the total number
  1257. * of DTQ/DRQs present. we do this because the card uses the condition
  1258. * (drq_chip == drq_us) to mean "list is empty"... but if you allow the
  1259. * circular list to be completely full then (drq_chip == drq_us) [i.e.
  1260. * the drq_us pointer will wrap all the way around]. by restricting
  1261. * the number of active requests to (N - 1) we prevent the list from
  1262. * becoming completely full. note that the card will sometimes give
  1263. * us an interrupt for a DTQ/DRQ we have already processes... this helps
  1264. * keep that interrupt from messing us up.
  1265. */
  1266. bzero(&sc->drq, sizeof(sc->drq));
  1267. sc->drq_free = MID_DRQ_N - 1;
  1268. sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
  1269. en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
  1270. sc->drq_us = sc->drq_chip;
  1271. bzero(&sc->dtq, sizeof(sc->dtq));
  1272. sc->dtq_free = MID_DTQ_N - 1;
  1273. sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
  1274. en_write(sc, MID_DMA_WRTX, MID_DRQ_A2REG(sc->dtq_chip));
  1275. sc->dtq_us = sc->dtq_chip;
  1276. sc->hwslistp = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
  1277. sc->swsl_size = sc->swsl_head = sc->swsl_tail = 0;
  1278. DBG(sc, INIT, ("drq free/chip: %d/0x%x, dtq free/chip: %d/0x%x, "
  1279. "hwslist: 0x%x", sc->drq_free, sc->drq_chip, sc->dtq_free,
  1280. sc->dtq_chip, sc->hwslistp));
  1281. for (slot = 0 ; slot < EN_NTX ; slot++) {
  1282. sc->txslot[slot].bfree = EN_TXSZ * 1024;
  1283. en_write(sc, MIDX_READPTR(slot), 0);
  1284. en_write(sc, MIDX_DESCSTART(slot), 0);
  1285. loc = sc->txslot[slot].cur = sc->txslot[slot].start;
  1286. loc = loc - MID_RAMOFF;
  1287. /* mask, cvt to words */
  1288. loc = (loc & ~((EN_TXSZ * 1024) - 1)) >> 2;
  1289. /* top 11 bits */
  1290. loc = loc >> MIDV_LOCTOPSHFT;
  1291. en_write(sc, MIDX_PLACE(slot), MIDX_MKPLACE(en_k2sz(EN_TXSZ),
  1292. loc));
  1293. DBG(sc, INIT, ("tx%d: place 0x%x", slot,
  1294. (u_int)en_read(sc, MIDX_PLACE(slot))));
  1295. }
  1296. for (vc = 0; vc < MID_N_VC; vc++)
  1297. if (sc->vccs[vc] != NULL)
  1298. en_loadvc(sc, sc->vccs[vc]);
  1299. /*
  1300. * enable!
  1301. */
  1302. en_write(sc, MID_INTENA, MID_INT_TX | MID_INT_DMA_OVR | MID_INT_IDENT |
  1303. MID_INT_LERR | MID_INT_DMA_ERR | MID_INT_DMA_RX | MID_INT_DMA_TX |
  1304. MID_INT_SERVICE | MID_INT_SUNI | MID_INT_STATS);
  1305. en_write(sc, MID_MAST_CSR, MID_SETIPL(sc->ipl) | MID_MCSR_ENDMA |
  1306. MID_MCSR_ENTX | MID_MCSR_ENRX);
  1307. }
  1308. /*********************************************************************/
  1309. /*
  1310. * Ioctls
  1311. */
  1312. /*
  1313. * en_ioctl: handle ioctl requests
  1314. *
  1315. * NOTE: if you add an ioctl to set txspeed, you should choose a new
  1316. * TX channel/slot. Choose the one with the lowest sc->txslot[slot].nref
  1317. * value, subtract one from sc->txslot[0].nref, add one to the
  1318. * sc->txslot[slot].nref, set sc->txvc2slot[vci] = slot, and then set
  1319. * txspeed[vci].
  1320. *
  1321. * LOCK: unlocked, needed
  1322. */
  1323. static int
  1324. en_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  1325. {
  1326. struct en_softc *sc = (struct en_softc *)ifp->if_softc;
  1327. #if defined(INET) || defined(INET6)
  1328. struct ifaddr *ifa = (struct ifaddr *)data;
  1329. #endif
  1330. struct ifreq *ifr = (struct ifreq *)data;
  1331. struct atmio_vcctable *vtab;
  1332. int error = 0;
  1333. switch (cmd) {
  1334. case SIOCSIFADDR:
  1335. EN_LOCK(sc);
  1336. ifp->if_flags |= IFF_UP;
  1337. #if defined(INET) || defined(INET6)
  1338. if (ifa->ifa_addr->sa_family == AF_INET
  1339. || ifa->ifa_addr->sa_family == AF_INET6) {
  1340. if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
  1341. en_reset_ul(sc);
  1342. en_init(sc);
  1343. }
  1344. ifa->ifa_rtrequest = atm_rtrequest; /* ??? */
  1345. EN_UNLOCK(sc);
  1346. break;
  1347. }
  1348. #endif /* INET */
  1349. if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
  1350. en_reset_ul(sc);
  1351. en_init(sc);
  1352. }
  1353. EN_UNLOCK(sc);
  1354. break;
  1355. case SIOCSIFFLAGS:
  1356. EN_LOCK(sc);
  1357. if (ifp->if_flags & IFF_UP) {
  1358. if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
  1359. en_init(sc);
  1360. } else {
  1361. if (ifp->if_drv_flags & IFF_DRV_RUNNING)
  1362. en_reset_ul(sc);
  1363. }
  1364. EN_UNLOCK(sc);
  1365. break;
  1366. case SIOCSIFMTU:
  1367. /*
  1368. * Set the interface MTU.
  1369. */
  1370. if (ifr->ifr_mtu > ATMMTU) {
  1371. error = EINVAL;
  1372. break;
  1373. }
  1374. ifp->if_mtu = ifr->ifr_mtu;
  1375. break;
  1376. case SIOCSIFMEDIA:
  1377. case SIOCGIFMEDIA:
  1378. error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
  1379. break;
  1380. case SIOCATMOPENVCC: /* kernel internal use */
  1381. error = en_open_vcc(sc, (struct atmio_openvcc *)data);
  1382. break;
  1383. case SIOCATMCLOSEVCC: /* kernel internal use */
  1384. error = en_close_vcc(sc, (struct atmio_closevcc *)data);
  1385. break;
  1386. case SIOCATMGETVCCS: /* internal netgraph use */
  1387. vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
  1388. MID_N_VC, sc->vccs_open, &sc->en_mtx, 0);
  1389. if (vtab == NULL) {
  1390. error = ENOMEM;
  1391. break;
  1392. }
  1393. *(void **)data = vtab;
  1394. break;
  1395. case SIOCATMGVCCS: /* return vcc table */
  1396. vtab = atm_getvccs((struct atmio_vcc **)sc->vccs,
  1397. MID_N_VC, sc->vccs_open, &sc->en_mtx, 1);
  1398. error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) +
  1399. vtab->count * sizeof(vtab->vccs[0]));
  1400. free(vtab, M_DEVBUF);
  1401. break;
  1402. default:
  1403. error = EINVAL;
  1404. break;
  1405. }
  1406. return (error);
  1407. }
  1408. /*********************************************************************/
  1409. /*
  1410. * Sysctl's
  1411. */
  1412. /*
  1413. * Sysctl handler for internal statistics
  1414. *
  1415. * LOCK: unlocked, needed
  1416. */
  1417. static int
  1418. en_sysctl_istats(SYSCTL_HANDLER_ARGS)
  1419. {
  1420. struct en_softc *sc = arg1;
  1421. uint32_t *ret;
  1422. int error;
  1423. ret = malloc(sizeof(sc->stats), M_TEMP, M_WAITOK);
  1424. EN_LOCK(sc);
  1425. bcopy(&sc->stats, ret, sizeof(sc->stats));
  1426. EN_UNLOCK(sc);
  1427. error = SYSCTL_OUT(req, ret, sizeof(sc->stats));
  1428. free(ret, M_TEMP);
  1429. return (error);
  1430. }
  1431. /*********************************************************************/
  1432. /*
  1433. * Interrupts
  1434. */
  1435. /*
  1436. * Transmit interrupt handler
  1437. *
  1438. * check for tx complete, if detected then this means that some space
  1439. * has come free on the card. we must account for it and arrange to
  1440. * kick the channel to life (in case it is stalled waiting on the card).
  1441. *
  1442. * LOCK: locked, needed
  1443. */
  1444. static uint32_t
  1445. en_intr_tx(struct en_softc *sc, uint32_t reg)
  1446. {
  1447. uint32_t kick;
  1448. uint32_t mask;
  1449. uint32_t val;
  1450. int chan;
  1451. kick = 0; /* bitmask of channels to kick */
  1452. for (mask = 1, chan = 0; chan < EN_NTX; chan++, mask *= 2) {
  1453. if (!(reg & MID_TXCHAN(chan)))
  1454. continue;
  1455. kick = kick | mask;
  1456. /* current read pointer */
  1457. val = en_read(sc, MIDX_READPTR(chan));
  1458. /* as offset */
  1459. val = (val * sizeof(uint32_t)) + sc->txslot[chan].start;
  1460. if (val > sc->txslot[chan].cur)
  1461. sc->txslot[chan].bfree = val - sc->txslot[chan].cur;
  1462. else
  1463. sc->txslot[chan].bfree = (val + (EN_TXSZ * 1024)) -
  1464. sc->txslot[chan].cur;
  1465. DBG(sc, INTR, ("tx%d: transmit done. %d bytes now free in "
  1466. "buffer", chan, sc->txslot[chan].bfree));
  1467. }
  1468. return (kick);
  1469. }
  1470. /*
  1471. * TX DMA interrupt
  1472. *
  1473. * check for TX DMA complete, if detected then this means
  1474. * that some DTQs are now free. it also means some indma
  1475. * mbufs can be freed. if we needed DTQs, kick all channels.
  1476. *
  1477. * LOCK: locked, needed
  1478. */
  1479. static uint32_t
  1480. en_intr_tx_dma(struct en_softc *sc)
  1481. {
  1482. uint32_t kick = 0;
  1483. uint32_t val;
  1484. uint32_t idx;
  1485. uint32_t slot;
  1486. uint32_t dtq;
  1487. struct en_map *map;
  1488. struct mbuf *m;
  1489. val = en_read(sc, MID_DMA_RDTX); /* chip's current location */
  1490. idx = MID_DTQ_A2REG(sc->dtq_chip); /* where we last saw chip */
  1491. if (sc->need_dtqs) {
  1492. kick = MID_NTX_CH - 1; /* assume power of 2, kick all! */
  1493. sc->need_dtqs = 0; /* recalculated in "kick" loop below */
  1494. DBG(sc, INTR, ("cleared need DTQ condition"));
  1495. }
  1496. while (idx != val) {
  1497. sc->dtq_free++;
  1498. if ((dtq = sc->dtq[idx]) != 0) {
  1499. /* don't forget to zero it out when done */
  1500. sc->dtq[idx] = 0;
  1501. slot = EN_DQ_SLOT(dtq);
  1502. _IF_DEQUEUE(&sc->txslot[slot].indma, m);
  1503. if (m == NULL)
  1504. panic("enintr: dtqsync");
  1505. map = (void *)m->m_pkthdr.rcvif;
  1506. uma_zfree(sc->map_zone, map);
  1507. m_freem(m);
  1508. sc->txslot[slot].mbsize -= EN_DQ_LEN(dtq);
  1509. DBG(sc, INTR, ("tx%d: free %d dma bytes, mbsize now "
  1510. "%d", slot, EN_DQ_LEN(dtq),
  1511. sc->txslot[slot].mbsize));
  1512. }
  1513. EN_WRAPADD(0, MID_DTQ_N, idx, 1);
  1514. }
  1515. sc->dtq_chip = MID_DTQ_REG2A(val); /* sync softc */
  1516. return (kick);
  1517. }
  1518. /*
  1519. * Service interrupt
  1520. *
  1521. * LOCK: locked, needed
  1522. */
  1523. static int
  1524. en_intr_service(struct en_softc *sc)
  1525. {
  1526. uint32_t chip;
  1527. uint32_t vci;
  1528. int need_softserv = 0;
  1529. struct en_vcc *vc;
  1530. chip = MID_SL_REG2A(en_read(sc, MID_SERV_WRITE));
  1531. while (sc->hwslistp != chip) {
  1532. /* fetch and remove it from hardware service list */
  1533. vci = en_read(sc, sc->hwslistp);
  1534. EN_WRAPADD(MID_SLOFF, MID_SLEND, sc->hwslistp, 4);
  1535. if ((vc = sc->vccs[vci]) == NULL ||
  1536. (vc->vcc.flags & ATMIO_FLAG_NORX)) {
  1537. DBG(sc, INTR, ("unexpected rx interrupt VCI %d", vci));
  1538. en_write(sc, MID_VC(vci), MIDV_TRASH); /* rx off */
  1539. continue;
  1540. }
  1541. /* remove from hwsl */
  1542. en_write(sc, MID_VC(vci), vc->rxslot->mode);
  1543. EN_COUNT(sc->stats.hwpull);
  1544. DBG(sc, INTR, ("pulled VCI %d off hwslist", vci));
  1545. /* add it to the software service list (if needed) */
  1546. if ((vc->vflags & VCC_SWSL) == 0) {
  1547. EN_COUNT(sc->stats.swadd);
  1548. need_softserv = 1;
  1549. vc->vflags |= VCC_SWSL;
  1550. sc->swslist[sc->swsl_tail] = vci;
  1551. EN_WRAPADD(0, MID_SL_N, sc->swsl_tail, 1);
  1552. sc->swsl_size++;
  1553. DBG(sc, INTR, ("added VCI %d to swslist", vci));
  1554. }
  1555. }
  1556. return (need_softserv);
  1557. }
  1558. /*
  1559. * Handle a receive DMA completion
  1560. */
  1561. static void
  1562. en_rx_drain(struct en_softc *sc, u_int drq)
  1563. {
  1564. struct en_rxslot *slot;
  1565. struct en_vcc *vc;
  1566. struct mbuf *m;
  1567. struct atm_pseudohdr ah;
  1568. slot = &sc->rxslot[EN_DQ_SLOT(drq)];
  1569. m = NULL; /* assume "JK" trash DMA */
  1570. if (EN_DQ_LEN(drq) != 0) {
  1571. _IF_DEQUEUE(&slot->indma, m);
  1572. KASSERT(m != NULL, ("drqsync: %s: lost mbuf in slot %td!",
  1573. sc->ifp->if_xname, slot - sc->rxslot));
  1574. uma_zfree(sc->map_zone, (struct en_map *)m->m_pkthdr.rcvif);
  1575. }
  1576. if ((vc = slot->vcc) == NULL) {
  1577. /* ups */
  1578. if (m != NULL)
  1579. m_freem(m);
  1580. return;
  1581. }
  1582. /* do something with this mbuf */
  1583. if (vc->vflags & VCC_DRAIN) {
  1584. /* drain? */
  1585. if (m != NULL)
  1586. m_freem(m);
  1587. if (_IF_QLEN(&slot->indma) == 0 && _IF_QLEN(&slot->q) == 0 &&
  1588. (en_read(sc, MID_VC(vc->vcc.vci)) & MIDV_INSERVICE) == 0 &&
  1589. (vc->vflags & VCC_SWSL) == 0) {
  1590. vc->vflags &= ~VCC_CLOSE_RX;
  1591. if (vc->vcc.flags & ATMIO_FLAG_ASYNC)
  1592. en_close_finish(sc, vc);
  1593. else
  1594. cv_signal(&sc->cv_close);
  1595. }
  1596. return;
  1597. }
  1598. if (m != NULL) {
  1599. ATM_PH_FLAGS(&ah) = vc->vcc.flags;
  1600. ATM_PH_VPI(&ah) = 0;
  1601. ATM_PH_SETVCI(&ah, vc->vcc.vci);
  1602. DBG(sc, INTR, ("rx%td: rxvci%d: atm_input, mbuf %p, len %d, "
  1603. "hand %p", slot - sc->rxslot, vc->vcc.vci, m,
  1604. EN_DQ_LEN(drq), vc->rxhand));
  1605. m->m_pkthdr.rcvif = sc->ifp;
  1606. sc->ifp->if_ipackets++;
  1607. vc->ipackets++;
  1608. vc->ibytes += m->m_pkthdr.len;
  1609. #ifdef EN_DEBUG
  1610. if (sc->debug & DBG_IPACKETS)
  1611. en_dump_packet(sc, m);
  1612. #endif
  1613. #ifdef ENABLE_BPF
  1614. BPF_MTAP(sc->ifp, m);
  1615. #endif
  1616. EN_UNLOCK(sc);
  1617. atm_input(sc->ifp, &ah, m, vc->rxhand);
  1618. EN_LOCK(sc);
  1619. }
  1620. }
  1621. /*
  1622. * check for RX DMA complete, and pass the data "upstairs"
  1623. *
  1624. * LOCK: locked, needed
  1625. */
  1626. static int
  1627. en_intr_rx_dma(struct en_softc *sc)
  1628. {
  1629. uint32_t val;
  1630. uint32_t idx;
  1631. uint32_t drq;
  1632. val = en_read(sc, MID_DMA_RDRX); /* chip's current location */
  1633. idx = MID_DRQ_A2REG(sc->drq_chip); /* where we last saw chip */
  1634. while (idx != val) {
  1635. sc->drq_free++;
  1636. if ((drq = sc->drq[idx]) != 0) {
  1637. /* don't forget to zero it out when done */
  1638. sc->drq[idx] = 0;
  1639. en_rx_drain(sc, drq);
  1640. }
  1641. EN_WRAPADD(0, MID_DRQ_N, idx, 1);
  1642. }
  1643. sc->drq_chip = MID_DRQ_REG2A(val); /* sync softc */
  1644. if (sc->need_drqs) {
  1645. /* true if we had a DRQ shortage */
  1646. sc->need_drqs = 0;
  1647. DBG(sc, INTR, ("cleared need DRQ condition"));
  1648. return (1);
  1649. } else
  1650. return (0);
  1651. }
  1652. /*
  1653. * en_mget: get an mbuf chain that can hold totlen bytes and return it
  1654. * (for recv). For the actual allocation totlen is rounded up to a multiple
  1655. * of 4. We also ensure, that each mbuf has a multiple of 4 bytes.
  1656. *
  1657. * After this call the sum of all the m_len's in the chain will be totlen.
  1658. * This is called at interrupt time, so we can't wait here.
  1659. *
  1660. * LOCK: any, not needed
  1661. */
  1662. static struct mbuf *
  1663. en_mget(struct en_softc *sc, u_int pktlen)
  1664. {
  1665. struct mbuf *m, *tmp;
  1666. u_int totlen, pad;
  1667. totlen = roundup(pktlen, sizeof(uint32_t));
  1668. pad = totlen - pktlen;
  1669. /*
  1670. * First get an mbuf with header. Keep space for a couple of
  1671. * words at the begin.
  1672. */
  1673. /* called from interrupt context */
  1674. MGETHDR(m, M_DONTWAIT, MT_DATA);
  1675. if (m == NULL)
  1676. return (NULL);
  1677. m->m_pkthdr.rcvif = NULL;
  1678. m->m_pkthdr.len = pktlen;
  1679. m->m_len = EN_RX1BUF;
  1680. MH_ALIGN(m, EN_RX1BUF);
  1681. if (m->m_len >= totlen) {
  1682. m->m_len = totlen;
  1683. } else {
  1684. totlen -= m->m_len;
  1685. /* called from interrupt context */
  1686. tmp = m_getm(m, totlen, M_DONTWAIT, MT_DATA);
  1687. if (tmp == NULL) {
  1688. m_free(m);
  1689. return (NULL);
  1690. }
  1691. tmp = m->m_next;
  1692. /* m_getm could do this for us */
  1693. while (tmp != NULL) {
  1694. tmp->m_len = min(MCLBYTES, totlen);
  1695. totlen -= tmp->m_len;
  1696. tmp = tmp->m_next;
  1697. }
  1698. }
  1699. return (m);
  1700. }
  1701. /*
  1702. * Argument for RX DMAMAP loader.
  1703. */
  1704. struct rxarg {
  1705. struct en_softc *sc;
  1706. struct mbuf *m;
  1707. u_int pre_skip; /* number of bytes to skip at begin */
  1708. u_int post_skip; /* number of bytes to skip at end */
  1709. struct en_vcc *vc; /* vc we are receiving on */
  1710. int wait; /* wait for DRQ entries */
  1711. };
  1712. /*
  1713. * Copy the segment table to the buffer for later use. And compute the
  1714. * number of dma queue entries we need.
  1715. *
  1716. * LOCK: locked, needed
  1717. */
  1718. static void
  1719. en_rxdma_load(void *uarg, bus_dma_segment_t *segs, int nseg,
  1720. bus_size_t mapsize, int error)
  1721. {
  1722. struct rxarg *rx = uarg;
  1723. struct en_softc *sc = rx->sc;
  1724. struct en_rxslot *slot = rx->vc->rxslot;
  1725. u_int free; /* number of free DRQ entries */
  1726. uint32_t cur; /* current buffer offset */
  1727. uint32_t drq; /* DRQ entry pointer */
  1728. uint32_t last_drq; /* where we have written last */
  1729. u_int needalign, cnt, count, bcode;
  1730. bus_addr_t addr;
  1731. bus_size_t rest;
  1732. int i;
  1733. if (error != 0)
  1734. return;
  1735. if (nseg > EN_MAX_DMASEG)
  1736. panic("too many DMA segments");
  1737. rx->wait = 0;
  1738. free = sc->drq_free;
  1739. drq = sc->drq_us;
  1740. cur = slot->cur;
  1741. last_drq = 0;
  1742. /*
  1743. * Local macro to add an entry to the receive DMA area. If there
  1744. * are no entries left, return. Save the byte offset of the entry
  1745. * in last_drq for later use.
  1746. */
  1747. #define PUT_DRQ_ENTRY(ENI, BCODE, COUNT, ADDR) \
  1748. if (free == 0) { \
  1749. EN_COUNT(sc->stats.rxdrqout); \
  1750. rx->wait = 1; \
  1751. return; \
  1752. } \
  1753. last_drq = drq; \
  1754. en_write(sc, drq + 0, (ENI || !sc->is_adaptec) ? \
  1755. MID_MK_RXQ_ENI(COUNT, rx->vc->vcc.vci, 0, BCODE) : \
  1756. MID_MK_RXQ_ADP(COUNT, rx->vc->vcc.vci, 0, BCODE)); \
  1757. en_write(sc, drq + 4, ADDR); \
  1758. \
  1759. EN_WRAPADD(MID_DRQOFF, MID_DRQEND, drq, 8); \
  1760. free--;
  1761. /*
  1762. * Local macro to generate a DMA entry to DMA cnt bytes. Updates
  1763. * the current buffer byte offset accordingly.
  1764. */
  1765. #define DO_DRQ(TYPE) do { \
  1766. rest -= cnt; \
  1767. EN_WRAPADD(slot->start, slot->stop, cur, cnt); \
  1768. DBG(sc, SERV, ("rx%td: "TYPE" %u bytes, %ju left, cur %#x", \
  1769. slot - sc->rxslot, cnt, (uintmax_t)rest, cur)); \
  1770. \
  1771. PUT_DRQ_ENTRY(1, bcode, count, addr); \
  1772. \
  1773. addr += cnt; \
  1774. } while (0)
  1775. /*
  1776. * Skip the RBD at the beginning
  1777. */
  1778. if (rx->pre_skip > 0) {
  1779. /* update DMA address */
  1780. EN_WRAPADD(slot->start, slot->stop, cur, rx->pre_skip);
  1781. PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
  1782. }
  1783. for (i = 0; i < nseg; i++, segs++) {
  1784. addr = segs->ds_addr;
  1785. rest = segs->ds_len;
  1786. if (sc->is_adaptec) {
  1787. /* adaptec card - simple */
  1788. /* advance the on-card buffer pointer */
  1789. EN_WRAPADD(slot->start, slot->stop, cur, rest);
  1790. DBG(sc, SERV, ("rx%td: adp %ju bytes %#jx "
  1791. "(cur now 0x%x)", slot - sc->rxslot,
  1792. (uintmax_t)rest, (uintmax_t)addr, cur));
  1793. PUT_DRQ_ENTRY(0, 0, rest, addr);
  1794. continue;
  1795. }
  1796. /*
  1797. * do we need to do a DMA op to align to the maximum
  1798. * burst? Note, that we are alway 32-bit aligned.
  1799. */
  1800. if (sc->alburst &&
  1801. (needalign = (addr & sc->bestburstmask)) != 0) {
  1802. /* compute number of bytes, words and code */
  1803. cnt = sc->bestburstlen - needalign;
  1804. if (cnt > rest)
  1805. cnt = rest;
  1806. count = cnt / sizeof(uint32_t);
  1807. if (sc->noalbursts) {
  1808. bcode = MIDDMA_WORD;
  1809. } else {
  1810. bcode = en_dmaplan[count].bcode;
  1811. count = cnt >> en_dmaplan[count].divshift;
  1812. }
  1813. DO_DRQ("al_dma");
  1814. }
  1815. /* do we need to do a max-sized burst? */
  1816. if (rest >= sc->bestburstlen) {
  1817. count = rest >> sc->bestburstshift;
  1818. cnt = count << sc->bestburstshift;
  1819. bcode = sc->bestburstcode;
  1820. DO_DRQ("best_dma");
  1821. }
  1822. /* do we need to do a cleanup burst? */
  1823. if (rest != 0) {
  1824. cnt = rest;
  1825. count = rest / sizeof(uint32_t);
  1826. if (sc->noalbursts) {
  1827. bcode = MIDDMA_WORD;
  1828. } else {
  1829. bcode = en_dmaplan[count].bcode;
  1830. count = cnt >> en_dmaplan[count].divshift;
  1831. }
  1832. DO_DRQ("clean_dma");
  1833. }
  1834. }
  1835. /*
  1836. * Skip stuff at the end
  1837. */
  1838. if (rx->post_skip > 0) {
  1839. /* update DMA address */
  1840. EN_WRAPADD(slot->start, slot->stop, cur, rx->post_skip);
  1841. PUT_DRQ_ENTRY(0, MIDDMA_JK, WORD_IDX(slot->start, cur), 0);
  1842. }
  1843. /* record the end for the interrupt routine */
  1844. sc->drq[MID_DRQ_A2REG(last_drq)] =
  1845. EN_DQ_MK(slot - sc->rxslot, rx->m->m_pkthdr.len);
  1846. /* set the end flag in the last descriptor */
  1847. en_write(sc, last_drq + 0, SETQ_END(sc, en_read(sc, last_drq + 0)));
  1848. #undef PUT_DRQ_ENTRY
  1849. #undef DO_DRQ
  1850. /* commit */
  1851. slot->cur = cur;
  1852. sc->drq_free = free;
  1853. sc->drq_us = drq;
  1854. /* signal to card */
  1855. en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
  1856. }
  1857. /*
  1858. * en_service: handle a service interrupt
  1859. *
  1860. * Q: why do we need a software service list?
  1861. *
  1862. * A: if we remove a VCI from the hardware list and we find that we are
  1863. * out of DRQs we must defer processing until some DRQs become free.
  1864. * so we must remember to look at this RX VCI/slot later, but we can't
  1865. * put it back on the hardware service list (since that isn't allowed).
  1866. * so we instead save it on the software service list. it would be nice
  1867. * if we could peek at the VCI on top of the hwservice list without removing
  1868. * it, however this leads to a race condition: if we peek at it and
  1869. * decide we are done with it new data could come in before we have a
  1870. * chance to remove it from the hwslist. by the time we get it out of
  1871. * the list the interrupt for the new data will be lost. oops!
  1872. *
  1873. * LOCK: locked, needed
  1874. */
  1875. static void
  1876. en_service(struct en_softc *sc)
  1877. {
  1878. struct mbuf *m, *lastm;
  1879. struct en_map *map;
  1880. struct rxarg rx;
  1881. uint32_t cur;
  1882. uint32_t dstart; /* data start (as reported by card) */
  1883. uint32_t rbd; /* receive buffer descriptor */
  1884. uint32_t pdu; /* AAL5 trailer */
  1885. int mlen;
  1886. int error;
  1887. struct en_rxslot *slot;
  1888. struct en_vcc *vc;
  1889. rx.sc = sc;
  1890. next_vci:
  1891. if (sc->swsl_size == 0) {
  1892. DBG(sc, SERV, ("en_service done"));
  1893. return;
  1894. }
  1895. /*
  1896. * get vcc to service
  1897. */
  1898. rx.vc = vc = sc->vccs[sc->swslist[sc->swsl_head]];
  1899. slot = vc->rxslot;
  1900. KASSERT (slot->vcc->rxslot == slot, ("en_service: rx slot/vci sync"));
  1901. /*
  1902. * determine our mode and if we've got any work to do
  1903. */
  1904. DBG(sc, SERV, ("rx%td: service vci=%d start/stop/cur=0x%x 0x%x "
  1905. "0x%x", slot - sc->rxslot, vc->vcc.vci, slot->start,
  1906. slot->stop, slot->cur));
  1907. same_vci:
  1908. cur = slot->cur;
  1909. dstart = MIDV_DSTART(en_read(sc, MID_DST_RP(vc->vcc.vci)));
  1910. dstart = (dstart * sizeof(uint32_t)) + slot->start;
  1911. /* check to see if there is any data at all */
  1912. if (dstart == cur) {
  1913. EN_WRAPADD(0, MID_SL_N, sc->swsl_head, 1);
  1914. /* remove from swslist */
  1915. vc->vflags &= ~VCC_SWSL;
  1916. sc->swsl_size--;
  1917. DBG(sc, SERV, ("rx%td: remove vci %d from swslist",
  1918. slot - sc->rxslot, vc->vcc.vci));
  1919. goto next_vci;
  1920. }
  1921. /*
  1922. * figure out how many bytes we need
  1923. * [mlen = # bytes to go in mbufs]
  1924. */
  1925. rbd = en_read(sc, cur);
  1926. if (MID_RBD_ID(rbd) != MID_RBD_STDID)
  1927. panic("en_service: id mismatch");
  1928. if (rbd & MID_RBD_T) {
  1929. mlen = 0; /* we've got trash */
  1930. rx.pre_skip = MID_RBD_SIZE;
  1931. rx.post_skip = 0;
  1932. EN_COUNT(sc->stats.ttrash);
  1933. DBG(sc, SERV, ("RX overflow lost %d cells!", MID_RBD_CNT(rbd)));
  1934. } else if (vc->vcc.aal != ATMIO_AAL_5) {
  1935. /* 1 cell (ick!) */
  1936. mlen = MID_CHDR_SIZE + MID_ATMDATASZ;
  1937. rx.pre_skip = MID_RBD_SIZE;
  1938. rx.post_skip = 0;
  1939. } else {
  1940. rx.pre_skip = MID_RBD_SIZE;
  1941. /* get PDU trailer in correct byte order */
  1942. pdu = cur + MID_RBD_CNT(rbd) * MID_ATMDATASZ +
  1943. MID_RBD_SIZE - MID_PDU_SIZE;
  1944. if (pdu >= slot->stop)
  1945. pdu -= EN_RXSZ * 1024;
  1946. pdu = en_read(sc, pdu);
  1947. if (MID_RBD_CNT(rbd) * MID_ATMDATASZ <
  1948. MID_PDU_LEN(pdu)) {
  1949. device_printf(sc->dev, "invalid AAL5 length\n");
  1950. rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
  1951. mlen = 0;
  1952. sc->ifp->if_ierrors++;
  1953. } else if (rbd & MID_RBD_CRCERR) {
  1954. device_printf(sc->dev, "CRC error\n");
  1955. rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ;
  1956. mlen = 0;
  1957. sc->ifp->if_ierrors++;
  1958. } else {
  1959. mlen = MID_PDU_LEN(pdu);
  1960. rx.post_skip = MID_RBD_CNT(rbd) * MID_ATMDATASZ - mlen;
  1961. }
  1962. }
  1963. /*
  1964. * now allocate mbufs for mlen bytes of data, if out of mbufs, trash all
  1965. *
  1966. * notes:
  1967. * 1. it is possible that we've already allocated an mbuf for this pkt
  1968. * but ran out of DRQs, in which case we saved the allocated mbuf
  1969. * on "q".
  1970. * 2. if we save an buf in "q" we store the "cur" (pointer) in the
  1971. * buf as an identity (that we can check later).
  1972. * 3. after this block of code, if m is still NULL then we ran out of
  1973. * mbufs
  1974. */
  1975. _IF_DEQUEUE(&slot->q, m);
  1976. if (m != NULL) {
  1977. if (m->m_pkthdr.csum_data != cur) {
  1978. /* wasn't ours */
  1979. DBG(sc, SERV, ("rx%td: q'ed buf %p not ours",
  1980. slot - sc->rxslot, m));
  1981. _IF_PREPEND(&slot->q, m);
  1982. m = NULL;
  1983. EN_COUNT(sc->stats.rxqnotus);
  1984. } else {
  1985. EN_COUNT(sc->stats.rxqus);
  1986. DBG(sc, SERV, ("rx%td: recovered q'ed buf %p",
  1987. slot - sc->rxslot, m));
  1988. }
  1989. }
  1990. if (mlen == 0 && m != NULL) {
  1991. /* should not happen */
  1992. m_freem(m);
  1993. m = NULL;
  1994. }
  1995. if (mlen != 0 && m == NULL) {
  1996. m = en_mget(sc, mlen);
  1997. if (m == NULL) {
  1998. rx.post_skip += mlen;
  1999. mlen = 0;
  2000. EN_COUNT(sc->stats.rxmbufout);
  2001. DBG(sc, SERV, ("rx%td: out of mbufs",
  2002. slot - sc->rxslot));
  2003. } else
  2004. rx.post_skip -= roundup(mlen, sizeof(uint32_t)) - mlen;
  2005. DBG(sc, SERV, ("rx%td: allocate buf %p, mlen=%d",
  2006. slot - sc->rxslot, m, mlen));
  2007. }
  2008. DBG(sc, SERV, ("rx%td: VCI %d, rbuf %p, mlen %d, skip %u/%u",
  2009. slot - sc->rxslot, vc->vcc.vci, m, mlen, rx.pre_skip,
  2010. rx.post_skip));
  2011. if (m != NULL) {
  2012. /* M_NOWAIT - called from interrupt context */
  2013. map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
  2014. if (map == NULL) {
  2015. rx.post_skip += mlen;
  2016. m_freem(m);
  2017. DBG(sc, SERV, ("rx%td: out of maps",
  2018. slot - sc->rxslot));
  2019. goto skip;
  2020. }
  2021. rx.m = m;
  2022. error = bus_dmamap_load_mbuf(sc->txtag, map->map, m,
  2023. en_rxdma_load, &rx, BUS_DMA_NOWAIT);
  2024. if (error != 0) {
  2025. device_printf(sc->dev, "loading RX map failed "
  2026. "%d\n", error);
  2027. uma_zfree(sc->map_zone, map);
  2028. m_freem(m);
  2029. rx.post_skip += mlen;
  2030. goto skip;
  2031. }
  2032. map->flags |= ENMAP_LOADED;
  2033. if (rx.wait) {
  2034. /* out of DRQs - wait */
  2035. uma_zfree(sc->map_zone, map);
  2036. m->m_pkthdr.csum_data = cur;
  2037. _IF_ENQUEUE(&slot->q, m);
  2038. EN_COUNT(sc->stats.rxdrqout);
  2039. sc->need_drqs = 1; /* flag condition */
  2040. return;
  2041. }
  2042. (void)m_length(m, &lastm);
  2043. lastm->m_len -= roundup(mlen, sizeof(uint32_t)) - mlen;
  2044. m->m_pkthdr.rcvif = (void *)map;
  2045. _IF_ENQUEUE(&slot->indma, m);
  2046. /* get next packet in this slot */
  2047. goto same_vci;
  2048. }
  2049. skip:
  2050. /*
  2051. * Here we end if we should drop the packet from the receive buffer.
  2052. * The number of bytes to drop is in fill. We can do this with on
  2053. * JK entry. If we don't even have that one - wait.
  2054. */
  2055. if (sc->drq_free == 0) {
  2056. sc->need_drqs = 1; /* flag condition */
  2057. return;
  2058. }
  2059. rx.post_skip += rx.pre_skip;
  2060. DBG(sc, SERV, ("rx%td: skipping %u", slot - sc->rxslot, rx.post_skip));
  2061. /* advance buffer address */
  2062. EN_WRAPADD(slot->start, slot->stop, cur, rx.post_skip);
  2063. /* write DRQ entry */
  2064. if (sc->is_adaptec)
  2065. en_write(sc, sc->drq_us,
  2066. MID_MK_RXQ_ADP(WORD_IDX(slot->start, cur),
  2067. vc->vcc.vci, MID_DMA_END, MIDDMA_JK));
  2068. else
  2069. en_write(sc, sc->drq_us,
  2070. MID_MK_RXQ_ENI(WORD_IDX(slot->start, cur),
  2071. vc->vcc.vci, MID_DMA_END, MIDDMA_JK));
  2072. en_write(sc, sc->drq_us + 4, 0);
  2073. EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_us, 8);
  2074. sc->drq_free--;
  2075. /* signal to RX interrupt */
  2076. sc->drq[MID_DRQ_A2REG(sc->drq_us)] = EN_DQ_MK(slot - sc->rxslot, 0);
  2077. slot->cur = cur;
  2078. /* signal to card */
  2079. en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_us));
  2080. goto same_vci;
  2081. }
  2082. /*
  2083. * interrupt handler
  2084. *
  2085. * LOCK: unlocked, needed
  2086. */
  2087. void
  2088. en_intr(void *arg)
  2089. {
  2090. struct en_softc *sc = arg;
  2091. uint32_t reg, kick, mask;
  2092. int lcv, need_softserv;
  2093. EN_LOCK(sc);
  2094. reg = en_read(sc, MID_INTACK);
  2095. DBG(sc, INTR, ("interrupt=0x%b", reg, MID_INTBITS));
  2096. if ((reg & MID_INT_ANY) == 0) {
  2097. EN_UNLOCK(sc);
  2098. return;
  2099. }
  2100. /*
  2101. * unexpected errors that need a reset
  2102. */
  2103. if ((reg & (MID_INT_IDENT | MID_INT_LERR | MID_INT_DMA_ERR)) != 0) {
  2104. device_printf(sc->dev, "unexpected interrupt=0x%b, "
  2105. "resetting\n", reg, MID_INTBITS);
  2106. #ifdef EN_DEBUG
  2107. panic("en: unexpected error");
  2108. #else
  2109. en_reset_ul(sc);
  2110. en_init(sc);
  2111. #endif
  2112. EN_UNLOCK(sc);
  2113. return;
  2114. }
  2115. if (reg & MID_INT_SUNI)
  2116. utopia_intr(&sc->utopia);
  2117. kick = 0;
  2118. if (reg & MID_INT_TX)
  2119. kick |= en_intr_tx(sc, reg);
  2120. if (reg & MID_INT_DMA_TX)
  2121. kick |= en_intr_tx_dma(sc);
  2122. /*
  2123. * kick xmit channels as needed.
  2124. */
  2125. if (kick) {
  2126. DBG(sc, INTR, ("tx kick mask = 0x%x", kick));
  2127. for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2)
  2128. if ((kick & mask) && _IF_QLEN(&sc->txslot[lcv].q) != 0)
  2129. en_txdma(sc, &sc->txslot[lcv]);
  2130. }
  2131. need_softserv = 0;
  2132. if (reg & MID_INT_DMA_RX)
  2133. need_softserv |= en_intr_rx_dma(sc);
  2134. if (reg & MID_INT_SERVICE)
  2135. need_softserv |= en_intr_service(sc);
  2136. if (need_softserv)
  2137. en_service(sc);
  2138. /*
  2139. * keep our stats
  2140. */
  2141. if (reg & MID_INT_DMA_OVR) {
  2142. EN_COUNT(sc->stats.dmaovr);
  2143. DBG(sc, INTR, ("MID_INT_DMA_OVR"));
  2144. }
  2145. reg = en_read(sc, MID_STAT);
  2146. sc->stats.otrash += MID_OTRASH(reg);
  2147. sc->stats.vtrash += MID_VTRASH(reg);
  2148. EN_UNLOCK(sc);
  2149. }
  2150. /*
  2151. * Read at most n SUNI regs starting at reg into val
  2152. */
  2153. static int
  2154. en_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n)
  2155. {
  2156. struct en_softc *sc = ifatm->ifp->if_softc;
  2157. u_int i;
  2158. EN_CHECKLOCK(sc);
  2159. if (reg >= MID_NSUNI)
  2160. return (EINVAL);
  2161. if (reg + *n > MID_NSUNI)
  2162. *n = MID_NSUNI - reg;
  2163. for (i = 0; i < *n; i++)
  2164. val[i] = en_read(sc, MID_SUNIOFF + 4 * (reg + i));
  2165. return (0);
  2166. }
  2167. /*
  2168. * change the bits given by mask to them in val in register reg
  2169. */
  2170. static int
  2171. en_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val)
  2172. {
  2173. struct en_softc *sc = ifatm->ifp->if_softc;
  2174. uint32_t regval;
  2175. EN_CHECKLOCK(sc);
  2176. if (reg >= MID_NSUNI)
  2177. return (EINVAL);
  2178. regval = en_read(sc, MID_SUNIOFF + 4 * reg);
  2179. regval = (regval & ~mask) | (val & mask);
  2180. en_write(sc, MID_SUNIOFF + 4 * reg, regval);
  2181. return (0);
  2182. }
  2183. static const struct utopia_methods en_utopia_methods = {
  2184. en_utopia_readregs,
  2185. en_utopia_writereg
  2186. };
  2187. /*********************************************************************/
  2188. /*
  2189. * Probing the DMA brokeness of the card
  2190. */
  2191. /*
  2192. * Physical address load helper function for DMA probe
  2193. *
  2194. * LOCK: unlocked, not needed
  2195. */
  2196. static void
  2197. en_dmaprobe_load(void *uarg, bus_dma_segment_t *segs, int nseg, int error)
  2198. {
  2199. if (error == 0)
  2200. *(bus_addr_t *)uarg = segs[0].ds_addr;
  2201. }
  2202. /*
  2203. * en_dmaprobe: helper function for en_attach.
  2204. *
  2205. * see how the card handles DMA by running a few DMA tests. we need
  2206. * to figure out the largest number of bytes we can DMA in one burst
  2207. * ("bestburstlen"), and if the starting address for a burst needs to
  2208. * be aligned on any sort of boundary or not ("alburst").
  2209. *
  2210. * Things turn out more complex than that, because on my (harti) brand
  2211. * new motherboard (2.4GHz) we can do 64byte aligned DMAs, but everything
  2212. * we more than 4 bytes fails (with an RX DMA timeout) for physical
  2213. * addresses that end with 0xc. Therefor we search not only the largest
  2214. * burst that is supported (hopefully 64) but also check what is the largerst
  2215. * unaligned supported size. If that appears to be lesser than 4 words,
  2216. * set the noalbursts flag. That will be set only if also alburst is set.
  2217. */
  2218. /*
  2219. * en_dmaprobe_doit: do actual testing for the DMA test.
  2220. * Cycle through all bursts sizes from 8 up to 64 and try whether it works.
  2221. * Return the largest one that works.
  2222. *
  2223. * LOCK: unlocked, not needed
  2224. */
  2225. static int
  2226. en_dmaprobe_doit(struct en_softc *sc, uint8_t *sp, bus_addr_t psp)
  2227. {
  2228. uint8_t *dp = sp + MIDDMA_MAXBURST;
  2229. bus_addr_t pdp = psp + MIDDMA_MAXBURST;
  2230. int lcv, retval = 4, cnt;
  2231. uint32_t reg, bcode, midvloc;
  2232. if (sc->en_busreset)
  2233. sc->en_busreset(sc);
  2234. en_write(sc, MID_RESID, 0x0); /* reset card before touching RAM */
  2235. /*
  2236. * set up a 1k buffer at MID_BUFOFF
  2237. */
  2238. midvloc = ((MID_BUFOFF - MID_RAMOFF) / sizeof(uint32_t))
  2239. >> MIDV_LOCTOPSHFT;
  2240. en_write(sc, MIDX_PLACE(0), MIDX_MKPLACE(en_k2sz(1), midvloc));
  2241. en_write(sc, MID_VC(0), (midvloc << MIDV_LOCSHIFT)
  2242. | (en_k2sz(1) << MIDV_SZSHIFT) | MIDV_TRASH);
  2243. en_write(sc, MID_DST_RP(0), 0);
  2244. en_write(sc, MID_WP_ST_CNT(0), 0);
  2245. /* set up sample data */
  2246. for (lcv = 0 ; lcv < MIDDMA_MAXBURST; lcv++)
  2247. sp[lcv] = lcv + 1;
  2248. /* enable DMA (only) */
  2249. en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
  2250. sc->drq_chip = MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX));
  2251. sc->dtq_chip = MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX));
  2252. /*
  2253. * try it now . . . DMA it out, then DMA it back in and compare
  2254. *
  2255. * note: in order to get the dma stuff to reverse directions it wants
  2256. * the "end" flag set! since we are not dma'ing valid data we may
  2257. * get an ident mismatch interrupt (which we will ignore).
  2258. */
  2259. DBG(sc, DMA, ("test sp=%p/%#lx, dp=%p/%#lx",
  2260. sp, (u_long)psp, dp, (u_long)pdp));
  2261. for (lcv = 8 ; lcv <= MIDDMA_MAXBURST ; lcv = lcv * 2) {
  2262. DBG(sc, DMA, ("test lcv=%d", lcv));
  2263. /* zero SRAM and dest buffer */
  2264. bus_space_set_region_4(sc->en_memt, sc->en_base,
  2265. MID_BUFOFF, 0, 1024 / 4);
  2266. bzero(dp, MIDDMA_MAXBURST);
  2267. bcode = en_sz2b(lcv);
  2268. /* build lcv-byte-DMA x NBURSTS */
  2269. if (sc->is_adaptec)
  2270. en_write(sc, sc->dtq_chip,
  2271. MID_MK_TXQ_ADP(lcv, 0, MID_DMA_END, 0));
  2272. else
  2273. en_write(sc, sc->dtq_chip,
  2274. MID_MK_TXQ_ENI(1, 0, MID_DMA_END, bcode));
  2275. en_write(sc, sc->dtq_chip + 4, psp);
  2276. EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
  2277. en_write(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip));
  2278. cnt = 1000;
  2279. while ((reg = en_readx(sc, MID_DMA_RDTX)) !=
  2280. MID_DTQ_A2REG(sc->dtq_chip)) {
  2281. DELAY(1);
  2282. if (--cnt == 0) {
  2283. DBG(sc, DMA, ("unexpected timeout in tx "
  2284. "DMA test\n alignment=0x%lx, burst size=%d"
  2285. ", dma addr reg=%#x, rdtx=%#x, stat=%#x\n",
  2286. (u_long)sp & 63, lcv,
  2287. en_read(sc, MID_DMA_ADDR), reg,
  2288. en_read(sc, MID_INTSTAT)));
  2289. return (retval);
  2290. }
  2291. }
  2292. reg = en_read(sc, MID_INTACK);
  2293. if ((reg & MID_INT_DMA_TX) != MID_INT_DMA_TX) {
  2294. DBG(sc, DMA, ("unexpected status in tx DMA test: %#x\n",
  2295. reg));
  2296. return (retval);
  2297. }
  2298. /* re-enable DMA (only) */
  2299. en_write(sc, MID_MAST_CSR, MID_MCSR_ENDMA);
  2300. /* "return to sender..." address is known ... */
  2301. /* build lcv-byte-DMA x NBURSTS */
  2302. if (sc->is_adaptec)
  2303. en_write(sc, sc->drq_chip,
  2304. MID_MK_RXQ_ADP(lcv, 0, MID_DMA_END, 0));
  2305. else
  2306. en_write(sc, sc->drq_chip,
  2307. MID_MK_RXQ_ENI(1, 0, MID_DMA_END, bcode));
  2308. en_write(sc, sc->drq_chip + 4, pdp);
  2309. EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
  2310. en_write(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
  2311. cnt = 1000;
  2312. while ((reg = en_readx(sc, MID_DMA_RDRX)) !=
  2313. MID_DRQ_A2REG(sc->drq_chip)) {
  2314. DELAY(1);
  2315. cnt--;
  2316. if (--cnt == 0) {
  2317. DBG(sc, DMA, ("unexpected timeout in rx "
  2318. "DMA test, rdrx=%#x\n", reg));
  2319. return (retval);
  2320. }
  2321. }
  2322. reg = en_read(sc, MID_INTACK);
  2323. if ((reg & MID_INT_DMA_RX) != MID_INT_DMA_RX) {
  2324. DBG(sc, DMA, ("unexpected status in rx DMA "
  2325. "test: 0x%x\n", reg));
  2326. return (retval);
  2327. }
  2328. if (bcmp(sp, dp, lcv)) {
  2329. DBG(sc, DMA, ("DMA test failed! lcv=%d, sp=%p, "
  2330. "dp=%p", lcv, sp, dp));
  2331. return (retval);
  2332. }
  2333. retval = lcv;
  2334. }
  2335. return (retval); /* studly 64 byte DMA present! oh baby!! */
  2336. }
  2337. /*
  2338. * Find the best DMA parameters
  2339. *
  2340. * LOCK: unlocked, not needed
  2341. */
  2342. static void
  2343. en_dmaprobe(struct en_softc *sc)
  2344. {
  2345. bus_dma_tag_t tag;
  2346. bus_dmamap_t map;
  2347. int err;
  2348. void *buffer;
  2349. int bestalgn, lcv, try, bestnoalgn;
  2350. bus_addr_t phys;
  2351. uint8_t *addr;
  2352. sc->alburst = 0;
  2353. sc->noalbursts = 0;
  2354. /*
  2355. * Allocate some DMA-able memory.
  2356. * We need 3 times the max burst size aligned to the max burst size.
  2357. */
  2358. err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), MIDDMA_MAXBURST, 0,
  2359. BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  2360. 3 * MIDDMA_MAXBURST, 1, 3 * MIDDMA_MAXBURST, 0,
  2361. NULL, NULL, &tag);
  2362. if (err)
  2363. panic("%s: cannot create test DMA tag %d", __func__, err);
  2364. err = bus_dmamem_alloc(tag, &buffer, 0, &map);
  2365. if (err)
  2366. panic("%s: cannot allocate test DMA memory %d", __func__, err);
  2367. err = bus_dmamap_load(tag, map, buffer, 3 * MIDDMA_MAXBURST,
  2368. en_dmaprobe_load, &phys, BUS_DMA_NOWAIT);
  2369. if (err)
  2370. panic("%s: cannot load test DMA map %d", __func__, err);
  2371. addr = buffer;
  2372. DBG(sc, DMA, ("phys=%#lx addr=%p", (u_long)phys, addr));
  2373. /*
  2374. * Now get the best burst size of the aligned case.
  2375. */
  2376. bestalgn = bestnoalgn = en_dmaprobe_doit(sc, addr, phys);
  2377. /*
  2378. * Now try unaligned.
  2379. */
  2380. for (lcv = 4; lcv < MIDDMA_MAXBURST; lcv += 4) {
  2381. try = en_dmaprobe_doit(sc, addr + lcv, phys + lcv);
  2382. if (try < bestnoalgn)
  2383. bestnoalgn = try;
  2384. }
  2385. if (bestnoalgn < bestalgn) {
  2386. sc->alburst = 1;
  2387. if (bestnoalgn < 32)
  2388. sc->noalbursts = 1;
  2389. }
  2390. sc->bestburstlen = bestalgn;
  2391. sc->bestburstshift = en_log2(bestalgn);
  2392. sc->bestburstmask = sc->bestburstlen - 1; /* must be power of 2 */
  2393. sc->bestburstcode = en_sz2b(bestalgn);
  2394. /*
  2395. * Reset the chip before freeing the buffer. It may still be trying
  2396. * to DMA.
  2397. */
  2398. if (sc->en_busreset)
  2399. sc->en_busreset(sc);
  2400. en_write(sc, MID_RESID, 0x0); /* reset card before touching RAM */
  2401. DELAY(10000); /* may still do DMA */
  2402. /*
  2403. * Free the DMA stuff
  2404. */
  2405. bus_dmamap_unload(tag, map);
  2406. bus_dmamem_free(tag, buffer, map);
  2407. bus_dma_tag_destroy(tag);
  2408. }
  2409. /*********************************************************************/
  2410. /*
  2411. * Attach/detach.
  2412. */
  2413. /*
  2414. * Attach to the card.
  2415. *
  2416. * LOCK: unlocked, not needed (but initialized)
  2417. */
  2418. int
  2419. en_attach(struct en_softc *sc)
  2420. {
  2421. struct ifnet *ifp = sc->ifp;
  2422. int sz;
  2423. uint32_t reg, lcv, check, ptr, sav, midvloc;
  2424. #ifdef EN_DEBUG
  2425. sc->debug = EN_DEBUG;
  2426. #endif
  2427. /*
  2428. * Probe card to determine memory size.
  2429. *
  2430. * The stupid ENI card always reports to PCI that it needs 4MB of
  2431. * space (2MB regs and 2MB RAM). If it has less than 2MB RAM the
  2432. * addresses wrap in the RAM address space (i.e. on a 512KB card
  2433. * addresses 0x3ffffc, 0x37fffc, and 0x2ffffc are aliases for
  2434. * 0x27fffc [note that RAM starts at offset 0x200000]).
  2435. */
  2436. /* reset card before touching RAM */
  2437. if (sc->en_busreset)
  2438. sc->en_busreset(sc);
  2439. en_write(sc, MID_RESID, 0x0);
  2440. for (lcv = MID_PROBEOFF; lcv <= MID_MAXOFF ; lcv += MID_PROBSIZE) {
  2441. en_write(sc, lcv, lcv); /* data[address] = address */
  2442. for (check = MID_PROBEOFF; check < lcv ;check += MID_PROBSIZE) {
  2443. reg = en_read(sc, check);
  2444. if (reg != check)
  2445. /* found an alias! - quit */
  2446. goto done_probe;
  2447. }
  2448. }
  2449. done_probe:
  2450. lcv -= MID_PROBSIZE; /* take one step back */
  2451. sc->en_obmemsz = (lcv + 4) - MID_RAMOFF;
  2452. /*
  2453. * determine the largest DMA burst supported
  2454. */
  2455. en_dmaprobe(sc);
  2456. /*
  2457. * "hello world"
  2458. */
  2459. /* reset */
  2460. if (sc->en_busreset)
  2461. sc->en_busreset(sc);
  2462. en_write(sc, MID_RESID, 0x0); /* reset */
  2463. /* zero memory */
  2464. bus_space_set_region_4(sc->en_memt, sc->en_base,
  2465. MID_RAMOFF, 0, sc->en_obmemsz / 4);
  2466. reg = en_read(sc, MID_RESID);
  2467. device_printf(sc->dev, "ATM midway v%d, board IDs %d.%d, %s%s%s, "
  2468. "%ldKB on-board RAM\n", MID_VER(reg), MID_MID(reg), MID_DID(reg),
  2469. (MID_IS_SABRE(reg)) ? "sabre controller, " : "",
  2470. (MID_IS_SUNI(reg)) ? "SUNI" : "Utopia",
  2471. (!MID_IS_SUNI(reg) && MID_IS_UPIPE(reg)) ? " (pipelined)" : "",
  2472. (long)sc->en_obmemsz / 1024);
  2473. /*
  2474. * fill in common ATM interface stuff
  2475. */
  2476. IFP2IFATM(sc->ifp)->mib.hw_version = (MID_VER(reg) << 16) |
  2477. (MID_MID(reg) << 8) | MID_DID(reg);
  2478. if (MID_DID(reg) & 0x4)
  2479. IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_UTP_155;
  2480. else
  2481. IFP2IFATM(sc->ifp)->mib.media = IFM_ATM_MM_155;
  2482. IFP2IFATM(sc->ifp)->mib.pcr = ATM_RATE_155M;
  2483. IFP2IFATM(sc->ifp)->mib.vpi_bits = 0;
  2484. IFP2IFATM(sc->ifp)->mib.vci_bits = MID_VCI_BITS;
  2485. IFP2IFATM(sc->ifp)->mib.max_vccs = MID_N_VC;
  2486. IFP2IFATM(sc->ifp)->mib.max_vpcs = 0;
  2487. if (sc->is_adaptec) {
  2488. IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_ADP155P;
  2489. if (sc->bestburstlen == 64 && sc->alburst == 0)
  2490. device_printf(sc->dev,
  2491. "passed 64 byte DMA test\n");
  2492. else
  2493. device_printf(sc->dev, "FAILED DMA TEST: "
  2494. "burst=%d, alburst=%d\n", sc->bestburstlen,
  2495. sc->alburst);
  2496. } else {
  2497. IFP2IFATM(sc->ifp)->mib.device = ATM_DEVICE_ENI155P;
  2498. device_printf(sc->dev, "maximum DMA burst length = %d "
  2499. "bytes%s\n", sc->bestburstlen, sc->alburst ?
  2500. sc->noalbursts ? " (no large bursts)" : " (must align)" :
  2501. "");
  2502. }
  2503. /*
  2504. * link into network subsystem and prepare card
  2505. */
  2506. sc->ifp->if_softc = sc;
  2507. ifp->if_flags = IFF_SIMPLEX;
  2508. ifp->if_ioctl = en_ioctl;
  2509. ifp->if_start = en_start;
  2510. mtx_init(&sc->en_mtx, device_get_nameunit(sc->dev),
  2511. MTX_NETWORK_LOCK, MTX_DEF);
  2512. cv_init(&sc->cv_close, "VC close");
  2513. /*
  2514. * Make the sysctl tree
  2515. */
  2516. sysctl_ctx_init(&sc->sysctl_ctx);
  2517. if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
  2518. SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO,
  2519. device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "")) == NULL)
  2520. goto fail;
  2521. if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
  2522. OID_AUTO, "istats", CTLTYPE_OPAQUE | CTLFLAG_RD, sc, 0,
  2523. en_sysctl_istats, "S", "internal statistics") == NULL)
  2524. goto fail;
  2525. #ifdef EN_DEBUG
  2526. if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
  2527. OID_AUTO, "debug", CTLFLAG_RW , &sc->debug, 0, "") == NULL)
  2528. goto fail;
  2529. #endif
  2530. IFP2IFATM(sc->ifp)->phy = &sc->utopia;
  2531. utopia_attach(&sc->utopia, IFP2IFATM(sc->ifp), &sc->media, &sc->en_mtx,
  2532. &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
  2533. &en_utopia_methods);
  2534. utopia_init_media(&sc->utopia);
  2535. MGET(sc->padbuf, M_WAIT, MT_DATA);
  2536. bzero(sc->padbuf->m_data, MLEN);
  2537. if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
  2538. BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  2539. EN_TXSZ * 1024, EN_MAX_DMASEG, EN_TXSZ * 1024, 0,
  2540. NULL, NULL, &sc->txtag))
  2541. goto fail;
  2542. sc->map_zone = uma_zcreate("en dma maps", sizeof(struct en_map),
  2543. en_map_ctor, en_map_dtor, NULL, en_map_fini, UMA_ALIGN_PTR,
  2544. UMA_ZONE_ZINIT);
  2545. if (sc->map_zone == NULL)
  2546. goto fail;
  2547. uma_zone_set_max(sc->map_zone, EN_MAX_MAPS);
  2548. /*
  2549. * init softc
  2550. */
  2551. sc->vccs = malloc(MID_N_VC * sizeof(sc->vccs[0]),
  2552. M_DEVBUF, M_ZERO | M_WAITOK);
  2553. sz = sc->en_obmemsz - (MID_BUFOFF - MID_RAMOFF);
  2554. ptr = sav = MID_BUFOFF;
  2555. ptr = roundup(ptr, EN_TXSZ * 1024); /* align */
  2556. sz = sz - (ptr - sav);
  2557. if (EN_TXSZ*1024 * EN_NTX > sz) {
  2558. device_printf(sc->dev, "EN_NTX/EN_TXSZ too big\n");
  2559. goto fail;
  2560. }
  2561. for (lcv = 0 ;lcv < EN_NTX ;lcv++) {
  2562. sc->txslot[lcv].mbsize = 0;
  2563. sc->txslot[lcv].start = ptr;
  2564. ptr += (EN_TXSZ * 1024);
  2565. sz -= (EN_TXSZ * 1024);
  2566. sc->txslot[lcv].stop = ptr;
  2567. sc->txslot[lcv].nref = 0;
  2568. DBG(sc, INIT, ("tx%d: start 0x%x, stop 0x%x", lcv,
  2569. sc->txslot[lcv].start, sc->txslot[lcv].stop));
  2570. }
  2571. sav = ptr;
  2572. ptr = roundup(ptr, EN_RXSZ * 1024); /* align */
  2573. sz = sz - (ptr - sav);
  2574. sc->en_nrx = sz / (EN_RXSZ * 1024);
  2575. if (sc->en_nrx <= 0) {
  2576. device_printf(sc->dev, "EN_NTX/EN_TXSZ/EN_RXSZ too big\n");
  2577. goto fail;
  2578. }
  2579. /*
  2580. * ensure that there is always one VC slot on the service list free
  2581. * so that we can tell the difference between a full and empty list.
  2582. */
  2583. if (sc->en_nrx >= MID_N_VC)
  2584. sc->en_nrx = MID_N_VC - 1;
  2585. for (lcv = 0 ; lcv < sc->en_nrx ; lcv++) {
  2586. sc->rxslot[lcv].vcc = NULL;
  2587. midvloc = sc->rxslot[lcv].start = ptr;
  2588. ptr += (EN_RXSZ * 1024);
  2589. sz -= (EN_RXSZ * 1024);
  2590. sc->rxslot[lcv].stop = ptr;
  2591. midvloc = midvloc - MID_RAMOFF;
  2592. /* mask, cvt to words */
  2593. midvloc = (midvloc & ~((EN_RXSZ*1024) - 1)) >> 2;
  2594. /* we only want the top 11 bits */
  2595. midvloc = midvloc >> MIDV_LOCTOPSHFT;
  2596. midvloc = (midvloc & MIDV_LOCMASK) << MIDV_LOCSHIFT;
  2597. sc->rxslot[lcv].mode = midvloc |
  2598. (en_k2sz(EN_RXSZ) << MIDV_SZSHIFT) | MIDV_TRASH;
  2599. DBG(sc, INIT, ("rx%d: start 0x%x, stop 0x%x, mode 0x%x", lcv,
  2600. sc->rxslot[lcv].start, sc->rxslot[lcv].stop,
  2601. sc->rxslot[lcv].mode));
  2602. }
  2603. device_printf(sc->dev, "%d %dKB receive buffers, %d %dKB transmit "
  2604. "buffers\n", sc->en_nrx, EN_RXSZ, EN_NTX, EN_TXSZ);
  2605. device_printf(sc->dev, "end station identifier (mac address) "
  2606. "%6D\n", IFP2IFATM(sc->ifp)->mib.esi, ":");
  2607. /*
  2608. * Start SUNI stuff. This will call our readregs/writeregs
  2609. * functions and these assume the lock to be held so we must get it
  2610. * here.
  2611. */
  2612. EN_LOCK(sc);
  2613. utopia_start(&sc->utopia);
  2614. utopia_reset(&sc->utopia);
  2615. EN_UNLOCK(sc);
  2616. /*
  2617. * final commit
  2618. */
  2619. atm_ifattach(ifp);
  2620. #ifdef ENABLE_BPF
  2621. bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc));
  2622. #endif
  2623. return (0);
  2624. fail:
  2625. en_destroy(sc);
  2626. return (-1);
  2627. }
  2628. /*
  2629. * Free all internal resources. No access to bus resources here.
  2630. * No locking required here (interrupt is already disabled).
  2631. *
  2632. * LOCK: unlocked, needed (but destroyed)
  2633. */
  2634. void
  2635. en_destroy(struct en_softc *sc)
  2636. {
  2637. u_int i;
  2638. if (sc->utopia.state & UTP_ST_ATTACHED) {
  2639. /* these assume the lock to be held */
  2640. EN_LOCK(sc);
  2641. utopia_stop(&sc->utopia);
  2642. utopia_detach(&sc->utopia);
  2643. EN_UNLOCK(sc);
  2644. }
  2645. if (sc->vccs != NULL) {
  2646. /* get rid of sticky VCCs */
  2647. for (i = 0; i < MID_N_VC; i++)
  2648. if (sc->vccs[i] != NULL)
  2649. uma_zfree(en_vcc_zone, sc->vccs[i]);
  2650. free(sc->vccs, M_DEVBUF);
  2651. }
  2652. if (sc->padbuf != NULL)
  2653. m_free(sc->padbuf);
  2654. /*
  2655. * Destroy the map zone before the tag (the fini function will
  2656. * destroy the DMA maps using the tag)
  2657. */
  2658. if (sc->map_zone != NULL)
  2659. uma_zdestroy(sc->map_zone);
  2660. if (sc->txtag != NULL)
  2661. bus_dma_tag_destroy(sc->txtag);
  2662. (void)sysctl_ctx_free(&sc->sysctl_ctx);
  2663. cv_destroy(&sc->cv_close);
  2664. mtx_destroy(&sc->en_mtx);
  2665. }
  2666. /*
  2667. * Module loaded/unloaded
  2668. */
  2669. int
  2670. en_modevent(module_t mod __unused, int event, void *arg __unused)
  2671. {
  2672. switch (event) {
  2673. case MOD_LOAD:
  2674. en_vcc_zone = uma_zcreate("EN vccs", sizeof(struct en_vcc),
  2675. NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  2676. if (en_vcc_zone == NULL)
  2677. return (ENOMEM);
  2678. break;
  2679. case MOD_UNLOAD:
  2680. uma_zdestroy(en_vcc_zone);
  2681. break;
  2682. }
  2683. return (0);
  2684. }
  2685. /*********************************************************************/
  2686. /*
  2687. * Debugging support
  2688. */
  2689. #ifdef EN_DDBHOOK
  2690. /*
  2691. * functions we can call from ddb
  2692. */
  2693. /*
  2694. * en_dump: dump the state
  2695. */
  2696. #define END_SWSL 0x00000040 /* swsl state */
  2697. #define END_DRQ 0x00000020 /* drq state */
  2698. #define END_DTQ 0x00000010 /* dtq state */
  2699. #define END_RX 0x00000008 /* rx state */
  2700. #define END_TX 0x00000004 /* tx state */
  2701. #define END_MREGS 0x00000002 /* registers */
  2702. #define END_STATS 0x00000001 /* dump stats */
  2703. #define END_BITS "\20\7SWSL\6DRQ\5DTQ\4RX\3TX\2MREGS\1STATS"
  2704. static void
  2705. en_dump_stats(const struct en_stats *s)
  2706. {
  2707. printf("en_stats:\n");
  2708. printf("\t%d/%d mfix (%d failed)\n", s->mfixaddr, s->mfixlen,
  2709. s->mfixfail);
  2710. printf("\t%d rx dma overflow interrupts\n", s->dmaovr);
  2711. printf("\t%d times out of TX space and stalled\n", s->txoutspace);
  2712. printf("\t%d times out of DTQs\n", s->txdtqout);
  2713. printf("\t%d times launched a packet\n", s->launch);
  2714. printf("\t%d times pulled the hw service list\n", s->hwpull);
  2715. printf("\t%d times pushed a vci on the sw service list\n", s->swadd);
  2716. printf("\t%d times RX pulled an mbuf from Q that wasn't ours\n",
  2717. s->rxqnotus);
  2718. printf("\t%d times RX pulled a good mbuf from Q\n", s->rxqus);
  2719. printf("\t%d times ran out of DRQs\n", s->rxdrqout);
  2720. printf("\t%d transmit packets dropped due to mbsize\n", s->txmbovr);
  2721. printf("\t%d cells trashed due to turned off rxvc\n", s->vtrash);
  2722. printf("\t%d cells trashed due to totally full buffer\n", s->otrash);
  2723. printf("\t%d cells trashed due almost full buffer\n", s->ttrash);
  2724. printf("\t%d rx mbuf allocation failures\n", s->rxmbufout);
  2725. printf("\t%d times out of tx maps\n", s->txnomap);
  2726. #ifdef NATM
  2727. #ifdef NATM_STAT
  2728. printf("\tnatmintr so_rcv: ok/drop cnt: %d/%d, ok/drop bytes: %d/%d\n",
  2729. natm_sookcnt, natm_sodropcnt, natm_sookbytes, natm_sodropbytes);
  2730. #endif
  2731. #endif
  2732. }
  2733. static void
  2734. en_dump_mregs(struct en_softc *sc)
  2735. {
  2736. u_int cnt;
  2737. printf("mregs:\n");
  2738. printf("resid = 0x%x\n", en_read(sc, MID_RESID));
  2739. printf("interrupt status = 0x%b\n",
  2740. (int)en_read(sc, MID_INTSTAT), MID_INTBITS);
  2741. printf("interrupt enable = 0x%b\n",
  2742. (int)en_read(sc, MID_INTENA), MID_INTBITS);
  2743. printf("mcsr = 0x%b\n", (int)en_read(sc, MID_MAST_CSR), MID_MCSRBITS);
  2744. printf("serv_write = [chip=%u] [us=%u]\n", en_read(sc, MID_SERV_WRITE),
  2745. MID_SL_A2REG(sc->hwslistp));
  2746. printf("dma addr = 0x%x\n", en_read(sc, MID_DMA_ADDR));
  2747. printf("DRQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
  2748. MID_DRQ_REG2A(en_read(sc, MID_DMA_RDRX)),
  2749. MID_DRQ_REG2A(en_read(sc, MID_DMA_WRRX)), sc->drq_chip, sc->drq_us);
  2750. printf("DTQ: chip[rd=0x%x,wr=0x%x], sc[chip=0x%x,us=0x%x]\n",
  2751. MID_DTQ_REG2A(en_read(sc, MID_DMA_RDTX)),
  2752. MID_DTQ_REG2A(en_read(sc, MID_DMA_WRTX)), sc->dtq_chip, sc->dtq_us);
  2753. printf(" unusal txspeeds:");
  2754. for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
  2755. if (sc->vccs[cnt]->txspeed)
  2756. printf(" vci%d=0x%x", cnt, sc->vccs[cnt]->txspeed);
  2757. printf("\n");
  2758. printf(" rxvc slot mappings:");
  2759. for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
  2760. if (sc->vccs[cnt]->rxslot != NULL)
  2761. printf(" %d->%td", cnt,
  2762. sc->vccs[cnt]->rxslot - sc->rxslot);
  2763. printf("\n");
  2764. }
  2765. static void
  2766. en_dump_tx(struct en_softc *sc)
  2767. {
  2768. u_int slot;
  2769. printf("tx:\n");
  2770. for (slot = 0 ; slot < EN_NTX; slot++) {
  2771. printf("tx%d: start/stop/cur=0x%x/0x%x/0x%x [%d] ", slot,
  2772. sc->txslot[slot].start, sc->txslot[slot].stop,
  2773. sc->txslot[slot].cur,
  2774. (sc->txslot[slot].cur - sc->txslot[slot].start) / 4);
  2775. printf("mbsize=%d, bfree=%d\n", sc->txslot[slot].mbsize,
  2776. sc->txslot[slot].bfree);
  2777. printf("txhw: base_address=0x%x, size=%u, read=%u, "
  2778. "descstart=%u\n",
  2779. (u_int)MIDX_BASE(en_read(sc, MIDX_PLACE(slot))),
  2780. MIDX_SZ(en_read(sc, MIDX_PLACE(slot))),
  2781. en_read(sc, MIDX_READPTR(slot)),
  2782. en_read(sc, MIDX_DESCSTART(slot)));
  2783. }
  2784. }
  2785. static void
  2786. en_dump_rx(struct en_softc *sc)
  2787. {
  2788. struct en_rxslot *slot;
  2789. printf(" recv slots:\n");
  2790. for (slot = sc->rxslot ; slot < &sc->rxslot[sc->en_nrx]; slot++) {
  2791. printf("rx%td: start/stop/cur=0x%x/0x%x/0x%x mode=0x%x ",
  2792. slot - sc->rxslot, slot->start, slot->stop, slot->cur,
  2793. slot->mode);
  2794. if (slot->vcc != NULL) {
  2795. printf("vci=%u\n", slot->vcc->vcc.vci);
  2796. printf("RXHW: mode=0x%x, DST_RP=0x%x, WP_ST_CNT=0x%x\n",
  2797. en_read(sc, MID_VC(slot->vcc->vcc.vci)),
  2798. en_read(sc, MID_DST_RP(slot->vcc->vcc.vci)),
  2799. en_read(sc, MID_WP_ST_CNT(slot->vcc->vcc.vci)));
  2800. }
  2801. }
  2802. }
  2803. /*
  2804. * This is only correct for non-adaptec adapters
  2805. */
  2806. static void
  2807. en_dump_dtqs(struct en_softc *sc)
  2808. {
  2809. uint32_t ptr, reg;
  2810. printf(" dtq [need_dtqs=%d,dtq_free=%d]:\n", sc->need_dtqs,
  2811. sc->dtq_free);
  2812. ptr = sc->dtq_chip;
  2813. while (ptr != sc->dtq_us) {
  2814. reg = en_read(sc, ptr);
  2815. printf("\t0x%x=[%#x cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
  2816. sc->dtq[MID_DTQ_A2REG(ptr)], reg, MID_DMA_CNT(reg),
  2817. MID_DMA_TXCHAN(reg), (reg & MID_DMA_END) != 0,
  2818. MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
  2819. EN_WRAPADD(MID_DTQOFF, MID_DTQEND, ptr, 8);
  2820. }
  2821. }
  2822. static void
  2823. en_dump_drqs(struct en_softc *sc)
  2824. {
  2825. uint32_t ptr, reg;
  2826. printf(" drq [need_drqs=%d,drq_free=%d]:\n", sc->need_drqs,
  2827. sc->drq_free);
  2828. ptr = sc->drq_chip;
  2829. while (ptr != sc->drq_us) {
  2830. reg = en_read(sc, ptr);
  2831. printf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%x]\n",
  2832. sc->drq[MID_DRQ_A2REG(ptr)], MID_DMA_CNT(reg),
  2833. MID_DMA_RXVCI(reg), (reg & MID_DMA_END) != 0,
  2834. MID_DMA_TYPE(reg), en_read(sc, ptr + 4));
  2835. EN_WRAPADD(MID_DRQOFF, MID_DRQEND, ptr, 8);
  2836. }
  2837. }
  2838. /* Do not staticize - meant for calling from DDB! */
  2839. int
  2840. en_dump(int unit, int level)
  2841. {
  2842. struct en_softc *sc;
  2843. int lcv, cnt;
  2844. devclass_t dc;
  2845. int maxunit;
  2846. dc = devclass_find("en");
  2847. if (dc == NULL) {
  2848. printf("%s: can't find devclass!\n", __func__);
  2849. return (0);
  2850. }
  2851. maxunit = devclass_get_maxunit(dc);
  2852. for (lcv = 0 ; lcv < maxunit ; lcv++) {
  2853. sc = devclass_get_softc(dc, lcv);
  2854. if (sc == NULL)
  2855. continue;
  2856. if (unit != -1 && unit != lcv)
  2857. continue;
  2858. device_printf(sc->dev, "dumping device at level 0x%b\n",
  2859. level, END_BITS);
  2860. if (sc->dtq_us == 0) {
  2861. printf("<hasn't been en_init'd yet>\n");
  2862. continue;
  2863. }
  2864. if (level & END_STATS)
  2865. en_dump_stats(&sc->stats);
  2866. if (level & END_MREGS)
  2867. en_dump_mregs(sc);
  2868. if (level & END_TX)
  2869. en_dump_tx(sc);
  2870. if (level & END_RX)
  2871. en_dump_rx(sc);
  2872. if (level & END_DTQ)
  2873. en_dump_dtqs(sc);
  2874. if (level & END_DRQ)
  2875. en_dump_drqs(sc);
  2876. if (level & END_SWSL) {
  2877. printf(" swslist [size=%d]: ", sc->swsl_size);
  2878. for (cnt = sc->swsl_head ; cnt != sc->swsl_tail ;
  2879. cnt = (cnt + 1) % MID_SL_N)
  2880. printf("0x%x ", sc->swslist[cnt]);
  2881. printf("\n");
  2882. }
  2883. }
  2884. return (0);
  2885. }
  2886. /*
  2887. * en_dumpmem: dump the memory
  2888. *
  2889. * Do not staticize - meant for calling from DDB!
  2890. */
  2891. int
  2892. en_dumpmem(int unit, int addr, int len)
  2893. {
  2894. struct en_softc *sc;
  2895. uint32_t reg;
  2896. devclass_t dc;
  2897. dc = devclass_find("en");
  2898. if (dc == NULL) {
  2899. printf("%s: can't find devclass\n", __func__);
  2900. return (0);
  2901. }
  2902. sc = devclass_get_softc(dc, unit);
  2903. if (sc == NULL) {
  2904. printf("%s: invalid unit number: %d\n", __func__, unit);
  2905. return (0);
  2906. }
  2907. addr = addr & ~3;
  2908. if (addr < MID_RAMOFF || addr + len * 4 > MID_MAXOFF || len <= 0) {
  2909. printf("invalid addr/len number: %d, %d\n", addr, len);
  2910. return (0);
  2911. }
  2912. printf("dumping %d words starting at offset 0x%x\n", len, addr);
  2913. while (len--) {
  2914. reg = en_read(sc, addr);
  2915. printf("mem[0x%x] = 0x%x\n", addr, reg);
  2916. addr += 4;
  2917. }
  2918. return (0);
  2919. }
  2920. #endif